Compare commits

..

21 Commits

Author SHA1 Message Date
Alejandro Ranchal-Pedrosa
a12175dafc perf(relayer): add sequencer submission strategy with blob‐fee history and target price (#1659)
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-05-19 22:32:28 +02:00
colin
fedfa04c2b refactor(coordinator): simplify logic post-Euclid (#1652) 2025-05-16 23:37:52 +08:00
colin
0d8b00c3de fix(gas-oracle): nil pointer error and allow update gas price when no diff (#1657) 2025-05-12 12:35:42 +08:00
colin
826357ab5d fix(rollup-relayer): update commit status logic (#1656) 2025-05-10 18:46:36 +08:00
georgehao
d26381cba3 upgrade cargo chef to 0.1.71 (#1655) 2025-05-08 21:40:25 +08:00
georgehao
0e65686ce4 upgrade intermdiate rust to 1.86.0 (#1654) 2025-05-08 19:30:13 +08:00
georgehao
6dd878eaca upgrade intermediate rust version (#1653) 2025-05-08 17:57:15 +08:00
colin
a18fe06440 feat: openvm v0.3.0 (#1648)
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-24 22:06:35 +08:00
colin
3ac69bec51 feat(rollup-relayer): add a tool to analyze chunk/batch/bundle proposing (#1645)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-23 13:41:03 +08:00
colin
e80f030246 feat(gas-oracle): add a warn log when entering fallback mode (#1646) 2025-04-16 18:05:14 +08:00
colin
a77f1413ee chore(gas-oracle): remove unused code (#1644) 2025-04-14 16:36:27 +08:00
colin
5b62692098 chore(gas-oracle): decommission L2 gas price oracle (#1643) 2025-04-14 15:35:23 +08:00
Morty
4f34e90f00 fix(zkvm-prover): update scroll-proving-sdk (#1642) 2025-04-14 13:38:52 +08:00
colin
6b1b822c81 fix(coordinator): set v4.4.56 as minimum prover version (#1641)
Co-authored-by: Morty <yiweichi1@gmail.com>
2025-04-11 16:35:39 +08:00
colin
a34c01d90b fix(coordinator): support darwin chunk provers (#1640)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-09 19:04:20 +08:00
colin
0578aab3ae feat: openvm euclid v2 (#1613)
Signed-off-by: noelwei <fan@scroll.io>
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: Ömer Faruk Irmak <omerfirmak@gmail.com>
Co-authored-by: noelwei <fan@scroll.io>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com>
Co-authored-by: omerfirmak <omerfirmak@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Morty <yiweichi1@gmail.com>
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-04 16:44:09 +08:00
colin
55dfbf6735 fix(rollup-relayer): rollup status overwrite (#1638) 2025-04-01 14:16:56 +08:00
Péter Garamvölgyi
f3ddf43439 feat: use specific go version for builder images (#1634) 2025-03-17 16:42:23 +01:00
goofylfg
a2582dcc3f fix(prover): fix clap deprecated warnings (#1632) 2025-03-14 16:42:14 +01:00
Jonas Theis
0a1868cec1 fix(relayer): fix rolling hash computation (#1628)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-03-13 13:34:35 +08:00
Jonas Theis
fcfbc53252 feat(chunk proposer): add config parameter to limit chunk proposing by L2 gas (#1622)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2025-03-12 17:38:19 +08:00
112 changed files with 3410 additions and 10137 deletions

View File

@@ -42,6 +42,10 @@ jobs:
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Lint
working-directory: 'common'
run: |

View File

@@ -307,6 +307,13 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Run custom script
run: |
./build/dockerfiles/coordinator-api/init-openvm.sh
- name: Build and push
uses: docker/build-push-action@v3
env:

View File

@@ -9,9 +9,13 @@ on:
type: choice
options:
- "1.20"
- "1.20.14"
- "1.21"
- "1.21.13"
- "1.22"
- "1.22.12"
- "1.23"
- "1.23.7"
default: "1.21"
RUST_VERSION:
description: "Rust toolchain version"
@@ -20,6 +24,7 @@ on:
options:
- nightly-2023-12-03
- nightly-2022-12-10
- 1.86.0
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
@@ -43,6 +48,7 @@ on:
type: choice
options:
- 0.1.41
- 0.1.71
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true

View File

@@ -1,99 +0,0 @@
name: Prover
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'prover'
jobs:
skip_check:
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
cancel_others: 'true'
concurrent_skipping: 'same_content_newer'
paths_ignore: '["**/README.md"]'
fmt:
needs: [skip_check]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: rustfmt
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Cargo check
run: cargo check --all-features
- name: Cargo fmt
run: cargo fmt --all -- --check
clippy:
needs: [skip_check, fmt]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: clippy
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Run clippy
run: cargo clippy --all-features --all-targets -- -D warnings
compile:
needs: [skip_check, clippy]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Test
run: |
make prover

View File

@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs

View File

@@ -9,6 +9,10 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .

View File

@@ -0,0 +1,24 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }

View File

@@ -0,0 +1,2 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -uex
OPENVM_GPU_COMMIT=dfa10b4
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout openvm-gpu
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -1,5 +1,5 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.21
ARG GO_VERSION=1.22.12
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
@@ -36,7 +36,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.21
ARG GO_VERSION=1.22.12
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
@@ -32,7 +32,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,8 @@ ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-verifier" }
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -26,12 +26,6 @@ pub unsafe extern "C" fn verify_chunk_proof(
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
// Skip verification for darwinV2 as we can't host darwinV2 and euclid verifiers on the same
// binary.
if fork_name_str == "darwinV2" {
return true as c_char;
}
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);

View File

@@ -1,9 +1,11 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
@@ -51,7 +53,17 @@ pub fn init(config: VerifierConfig) {
unsafe {
VERIFIER_LOW
.set(VerifierPair(
config.high_version_circuit.fork_name,
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();

View File

@@ -4,13 +4,13 @@ use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifier, ChunkVerifier};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
@@ -24,7 +24,7 @@ impl EuclidVerifier {
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifier::setup(&config, &exe, &verifier_bin)
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}

View File

@@ -0,0 +1,65 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidV2Verifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV2,
}
impl EuclidV2Verifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidV2Verifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -276,8 +276,8 @@ const (
SenderTypeFinalizeBatch
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
SenderTypeL1GasOracle
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
SenderTypeL2GasOracle
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
SenderTypeL2GasOracleDeprecated
)
// String returns a string representation of the SenderType.
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
return "SenderTypeFinalizeBatch"
case SenderTypeL1GasOracle:
return "SenderTypeL1GasOracle"
case SenderTypeL2GasOracle:
return "SenderTypeL2GasOracle"
case SenderTypeL2GasOracleDeprecated:
return "SenderTypeL2GasOracleDeprecated"
default:
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
}

View File

@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
"SenderTypeL1GasOracle",
},
{
"SenderTypeL2GasOracle",
SenderTypeL2GasOracle,
"SenderTypeL2GasOracle",
"SenderTypeL2GasOracleDeprecated",
SenderTypeL2GasOracleDeprecated,
"SenderTypeL2GasOracleDeprecated",
},
{
"Invalid Value",

View File

@@ -4,12 +4,17 @@ import (
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
)
const (
euclidFork = "euclid"
EuclidFork = "euclid"
EuclidV2Fork = "euclidV2"
EuclidV2ForkNameForProver = "euclidv2"
)
// ProofType represents the type of task.
@@ -39,189 +44,102 @@ const (
ProofTypeBundle
)
// ChunkTaskDetail is a type containing ChunkTask detail.
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
BlockHashes []common.Hash `json:"block_hashes"`
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
}
// it is a hex encoded big with fixed length on 48 bytes
type Byte48 struct {
hexutil.Big
}
func (e Byte48) MarshalText() ([]byte, error) {
i := e.ToInt()
// overrite encode big
if sign := i.Sign(); sign < 0 {
// sanity check
return nil, errors.New("Byte48 must be positive integer")
} else {
s := i.Text(16)
if len(s) > 96 {
return nil, errors.New("integer Exceed 384bit")
}
return []byte(fmt.Sprintf("0x%0*s", 96, s)), nil
}
}
func isString(input []byte) bool {
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
}
// hexutil.Big has limition of 256bit so we have to override it ...
func (e *Byte48) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errors.New("not hex string")
}
b, err := hexutil.Decode(string(input[1 : len(input)-1]))
if err != nil {
return err
}
if len(b) != 48 {
return fmt.Errorf("not a 48 bytes hex string: %d", len(b))
}
var dec big.Int
dec.SetBytes(b)
*e = Byte48{(hexutil.Big)(dec)}
return nil
}
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof []byte `json:"kzg_proof"`
KzgCommitment []byte `json:"kzg_commitment"`
Challenge common.Hash `json:"challenge"`
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
}
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
BatchProofs []BatchProof `json:"batch_proofs"`
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
}
// ChunkInfo is for calculating pi_hash for chunk
type ChunkInfo struct {
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
TxDataLength uint64 `json:"tx_data_length"`
InitialBlockNumber uint64 `json:"initial_block_number"`
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
}
// SubCircuitRowUsage tracing info added in v0.11.0rc8
type SubCircuitRowUsage struct {
Name string `json:"name"`
RowNumber uint64 `json:"row_number"`
}
// ChunkProof
type ChunkProof interface {
Proof() []byte
}
// NewChunkProof creates a new ChunkProof instance.
func NewChunkProof(hardForkName string) ChunkProof {
switch hardForkName {
case euclidFork:
return &OpenVMChunkProof{}
default:
return &Halo2ChunkProof{}
}
}
// Halo2ChunkProof includes the proof info that are required for chunk verification and rollup.
type Halo2ChunkProof struct {
StorageTrace []byte `json:"storage_trace,omitempty"`
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
}
// Proof returns the proof bytes of a ChunkProof
func (ap *Halo2ChunkProof) Proof() []byte {
return ap.RawProof
}
// BatchProof
type BatchProof interface {
SanityCheck() error
Proof() []byte
}
// NewBatchProof creates a new BatchProof instance.
func NewBatchProof(hardForkName string) BatchProof {
switch hardForkName {
case euclidFork:
return &OpenVMBatchProof{}
default:
return &Halo2BatchProof{}
}
}
// Halo2BatchProof includes the proof info that are required for batch verification and rollup.
type Halo2BatchProof struct {
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
BatchHash common.Hash `json:"batch_hash"`
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BatchProof
func (ap *Halo2BatchProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BatchProof is in a legal format
func (ap *Halo2BatchProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// BundleProof
type BundleProof interface {
SanityCheck() error
Proof() []byte
}
// NewBundleProof creates a new BundleProof instance.
func NewBundleProof(hardForkName string) BundleProof {
switch hardForkName {
case euclidFork:
return &OpenVMBundleProof{}
default:
return &Halo2BundleProof{}
}
}
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
type Halo2BundleProof struct {
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BundleProof
func (ap *Halo2BundleProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BundleProof is in a legal format
func (ap *Halo2BundleProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
// BlockContextV2 is the block context for euclid v2
type BlockContextV2 struct {
Timestamp uint64 `json:"timestamp"`
BaseFee hexutil.Big `json:"base_fee"`
GasLimit uint64 `json:"gas_limit"`
NumTxs uint16 `json:"num_txs"`
NumL1Msgs uint16 `json:"num_l1_msgs"`
}
// Proof for flatten VM proof
@@ -258,12 +176,14 @@ func (p *OpenVMChunkProof) Proof() []byte {
// OpenVMBatchInfo is for calculating pi_hash for batch header
type OpenVMBatchInfo struct {
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.
@@ -323,6 +243,7 @@ type OpenVMBundleInfo struct {
NumBatches uint32 `json:"num_batches"`
PrevBatchHash common.Hash `json:"prev_batch_hash"`
BatchHash common.Hash `json:"batch_hash"`
MsgQueueHash common.Hash `json:"msg_queue_hash"`
}
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.

View File

@@ -0,0 +1,22 @@
package message
import (
"fmt"
"testing"
)
func TestBytes48(t *testing.T) {
ti := &Byte48{}
ti.UnmarshalText([]byte("0x1"))
if s, err := ti.MarshalText(); err == nil {
if len(s) != 98 {
panic(fmt.Sprintf("wrong str: %s", s))
}
}
ti.UnmarshalText([]byte("0x0"))
if s, err := ti.MarshalText(); err == nil {
if len(s) != 98 {
panic(fmt.Sprintf("wrong str: %s", s))
}
}
}

View File

@@ -20,9 +20,12 @@ var (
}
// RollupRelayerFlags contains flags only used in rollup-relayer
RollupRelayerFlags = []cli.Flag{
&ImportGenesisFlag,
&MinCodecVersionFlag,
}
// ProposerToolFlags contains flags only used in proposer tool
ProposerToolFlags = []cli.Flag{
&StartL2BlockFlag,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
Name: "config",
@@ -73,12 +76,6 @@ var (
Category: "METRICS",
Value: 6060,
}
// ImportGenesisFlag import genesis batch during startup
ImportGenesisFlag = cli.BoolFlag{
Name: "import-genesis",
Usage: "Import genesis batch into L1 contract during startup",
Value: false,
}
// ServicePortFlag is the port the service will listen on
ServicePortFlag = cli.IntFlag{
Name: "service.port",
@@ -97,4 +94,10 @@ var (
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
Required: true,
}
// StartL2BlockFlag indicates the start L2 block number for proposer tool
StartL2BlockFlag = cli.Uint64Flag{
Name: "start-l2-block",
Usage: "Start L2 block number for proposer tool",
Value: 0,
}
)

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.96"
var tag = "v4.5.11"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -90,18 +90,10 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{
MockMode: true,
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwin",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwinV2",
MinProverVersion: "v4.3.0",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
},
},
BatchCollectionTimeSec: 60,

View File

@@ -62,14 +62,14 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
}
var batchProofs []message.BatchProof
var batchProofs []*message.OpenVMBatchProof
for _, batch := range batches {
proof := message.NewBatchProof("darwinV2")
var proof message.OpenVMBatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
log.Error("failed to unmarshal batch proof")
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{

View File

@@ -7,17 +7,9 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"low_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwin",
"min_prover_version": "v4.4.43"
},
"high_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwinV2",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
}

View File

@@ -2,8 +2,6 @@ module scroll-tech/coordinator
go 1.22
toolchain go1.22.2
require (
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1
@@ -11,7 +9,7 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0

View File

@@ -177,8 +177,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=

View File

@@ -51,7 +51,6 @@ type Config struct {
// CircuitConfig circuit items.
type CircuitConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
@@ -59,8 +58,6 @@ type CircuitConfig struct {
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
MockMode bool `json:"mock_mode"`
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
}

View File

@@ -15,15 +15,18 @@ func TestConfig(t *testing.T) {
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",
"agg_vk_path": ""
"high_version_circuit": {
"assets_path": "assets",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
},
"max_verifier_workers": 4,
"min_prover_version": "v1.0.0"
"max_verifier_workers": 4
},
"db": {
"driver_name": "postgres",

View File

@@ -26,7 +26,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap, "openVmVerifier", vf.OpenVMVkMap)
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)

View File

@@ -9,6 +9,7 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
@@ -21,9 +22,6 @@ import (
type LoginLogic struct {
cfg *config.Config
challengeOrm *orm.Challenge
chunkVks map[string]struct{}
batchVKs map[string]struct{}
bundleVks map[string]struct{}
openVmVks map[string]struct{}
@@ -33,27 +31,14 @@ type LoginLogic struct {
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
panic("verifier config file error")
}
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != "euclid" {
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
}
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
return &LoginLogic{
cfg: cfg,
chunkVks: vf.ChunkVKMap,
batchVKs: vf.BatchVKMap,
bundleVks: vf.BundleVkMap,
openVmVks: vf.OpenVMVkMap,
challengeOrm: orm.NewChallenge(db),
proverVersionHardForkMap: proverVersionHardForkMap,
@@ -73,46 +58,25 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
}
if len(login.Message.ProverTypes) > 0 {
vks := make(map[string]struct{})
for _, proverType := range login.Message.ProverTypes {
switch proverType {
case types.ProverTypeChunk:
for vk := range l.chunkVks {
vks[vk] = struct{}{}
}
case types.ProverTypeBatch:
for vk := range l.batchVKs {
vks[vk] = struct{}{}
}
for vk := range l.bundleVks {
vks[vk] = struct{}{}
}
case types.ProverTypeOpenVM:
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
default:
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
}
}
vks := make(map[string]struct{})
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"math/big"
"time"
"github.com/gin-gonic/gin"
@@ -11,6 +12,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -121,7 +123,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight)
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
@@ -195,33 +197,32 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
}
var chunkProofs []message.ChunkProof
var chunkProofs []*message.OpenVMChunkProof
var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
proof := message.NewChunkProof(hardForkName)
var proof message.OpenVMChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, proof)
chunkProofs = append(chunkProofs, &proof)
chunkInfo := message.ChunkInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
IsPadding: false,
}
if haloProot, ok := proof.(*message.Halo2ChunkProof); ok {
if haloProot.ChunkInfo != nil {
chunkInfo.TxBytes = haloProot.ChunkInfo.TxBytes
}
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
IsPadding: false,
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs)
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
}
@@ -238,6 +239,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
TaskData: string(chunkProofsBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBatch.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
return taskMsg, nil
}
@@ -247,12 +251,19 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
}
}
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof) (*message.BatchTaskDetail, error) {
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
@@ -271,17 +282,13 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
if len(dbBatch.BlobDataProof) < 160 {
return nil, fmt.Errorf("blob data proof length is less than 160 bytes = %d, taskID: %s: %s", len(dbBatch.BlobDataProof), dbBatch.Hash, common.Bytes2Hex(dbBatch.BlobDataProof))
}
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = dbBatch.BlobDataProof[112:160]
taskDetail.KzgCommitment = dbBatch.BlobDataProof[64:112]
taskDetail.Challenge = common.Hash(dbBatch.BlobDataProof[0:32])
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
return taskDetail, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -120,7 +121,7 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight)
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
@@ -194,19 +195,42 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
var batchProofs []message.BatchProof
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
var batchProofs []*message.OpenVMBatchProof
for _, batch := range batches {
proof := message.NewBatchProof(hardForkName)
var proof message.OpenVMBatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
}
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
}
batchProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err)
@@ -219,6 +243,9 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
TaskData: string(batchProofsBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBundle.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
return taskMsg, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -118,7 +119,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight)
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
@@ -162,7 +163,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
@@ -179,17 +180,28 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, chunk *orm.Chunk, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
}
var taskDetailBytes []byte
taskDetail := message.ChunkTaskDetail{
BlockHashes: blockHashes,
BlockHashes: blockHashes,
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
}
blockHashesBytes, err := json.Marshal(taskDetail)
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
}
var err error
taskDetailBytes, err = json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
}
@@ -198,10 +210,12 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
TaskData: string(taskDetailBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeChunk.String(), "hard_fork_name", hardForkName, "task_data", proverTaskSchema.TaskData)
return proverTaskSchema, nil
}

View File

@@ -122,7 +122,7 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
}
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
return "", errors.New("to be assigned prover task's hard-fork name is not the same as prover")
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
}
return hardForkName, nil
}

View File

@@ -171,19 +171,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
chunkProof := message.NewChunkProof(hardForkName)
chunkProof := &message.OpenVMChunkProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
case message.ProofTypeBatch:
batchProof := message.NewBatchProof(hardForkName)
batchProof := &message.OpenVMBatchProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
case message.ProofTypeBundle:
bundleProof := message.NewBundleProof(hardForkName)
bundleProof := &message.OpenVMBundleProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
return unmarshalErr
}

View File

@@ -10,31 +10,26 @@ import (
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]struct{}{"mock_vk": {}}
chunkVKMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBundleProof return a mock verification result for a BundleProof.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
return true, nil
}

View File

@@ -7,11 +7,8 @@ import (
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
// Verifier represents a rust ffi to a verifier.
type Verifier struct {
cfg *config.VerifierConfig
ChunkVKMap map[string]struct{}
BatchVKMap map[string]struct{}
BundleVkMap map[string]struct{}
OpenVMVkMap map[string]struct{}
}

View File

@@ -30,14 +30,12 @@ import (
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
ForkName string `json:"fork_name"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
return &rustCircuitConfig{
ForkName: cfg.ForkName,
ParamsPath: cfg.ParamsPath,
AssetsPath: cfg.AssetsPath,
}
}
@@ -46,13 +44,11 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
}
}
@@ -65,19 +61,6 @@ type rustVkDump struct {
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
chunkVKMap := map[string]struct{}{"mock_vk": {}}
batchVKMap := map[string]struct{}{"mock_vk": {}}
bundleVKMap := map[string]struct{}{"mock_vk": {}}
openVMVkMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{
cfg: cfg,
ChunkVKMap: chunkVKMap,
BatchVKMap: batchVKMap,
BundleVkMap: bundleVKMap,
OpenVMVkMap: openVMVkMap,
}, nil
}
verifierConfig := newRustVerifierConfig(cfg)
configBytes, err := json.Marshal(verifierConfig)
if err != nil {
@@ -93,34 +76,22 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]struct{}),
BatchVKMap: make(map[string]struct{}),
BundleVkMap: make(map[string]struct{}),
OpenVMVkMap: make(map[string]struct{}),
}
if err := v.loadLowVersionVKs(cfg); err != nil {
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
return nil, err
}
if err := v.loadOpenVMVks(cfg.HighVersionCircuit.ForkName); err != nil {
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
return nil, err
}
v.loadCurieVersionVKs()
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
@@ -138,16 +109,8 @@ func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (
return verified != 0, nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
@@ -166,15 +129,7 @@ func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
@@ -204,31 +159,6 @@ func (v *Verifier) readVK(filePat string) (string, error) {
return base64.StdEncoding.EncodeToString(byt), nil
}
// load low version vks, current is darwin
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
if err != nil {
return err
}
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
if err != nil {
return err
}
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
if err != nil {
return err
}
v.BundleVkMap[bundleVK] = struct{}{}
v.BatchVKMap[batchVK] = struct{}{}
v.ChunkVKMap[chunkVK] = struct{}{}
return nil
}
func (v *Verifier) loadCurieVersionVKs() {
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA="] = struct{}{}
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
}
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {

View File

@@ -29,17 +29,9 @@ func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
MockMode: false,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathLo,
ForkName: "darwin",
MinProverVersion: "",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathHi,
ForkName: "darwinV2",
ForkName: "euclidV2",
MinProverVersion: "",
},
}
@@ -48,43 +40,43 @@ func TestFFI(t *testing.T) {
as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as)
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
as.NoError(err)
as.True(chunkOk1)
t.Log("Verified chunk proof 1")
chunkProof2 := readChunkProof(*chunkProofPath2, as)
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
as.NoError(err)
as.True(chunkOk2)
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) types.BatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2BatchProof{}
proof := &types.OpenVMBatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) types.ChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2ChunkProof{}
proof := &types.OpenVMChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -19,20 +19,23 @@ type Batch struct {
db *gorm.DB `gorm:"column:-"`
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`

View File

@@ -22,7 +22,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
ProverVersion: "v0.0.1",
Challenge: "abcdef",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeBatch},
ProverTypes: []ProverType{ProverTypeOpenVM},
VKs: []string{"vk1", "vk2"},
},
PublicKey: publicKeyHex,
@@ -64,7 +64,7 @@ func TestGenerateSignature(t *testing.T) {
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeChunk},
ProverTypes: []ProverType{ProverTypeOpenVM},
VKs: []string{"mock_vk"},
},
PublicKey: publicKeyHex,

View File

@@ -2,7 +2,6 @@ package types
import (
"fmt"
"scroll-tech/common/types/message"
)
@@ -21,10 +20,10 @@ type ProverType uint8
func (r ProverType) String() string {
switch r {
case ProverTypeChunk:
return "prover type chunk"
case ProverTypeBatch:
return "prover type batch"
case ProverTypeChunkDeprecated:
return "prover type chunk (deprecated)"
case ProverTypeBatchDeprecated:
return "prover type batch (deprecated)"
case ProverTypeOpenVM:
return "prover type openvm"
default:
@@ -35,10 +34,10 @@ func (r ProverType) String() string {
const (
// ProverTypeUndefined is an unknown prover type
ProverTypeUndefined ProverType = iota
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
ProverTypeChunk
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
ProverTypeBatch
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks, which is deprecated
ProverTypeChunkDeprecated
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks, which is deprecated
ProverTypeBatchDeprecated
// ProverTypeOpenVM
ProverTypeOpenVM
)
@@ -47,9 +46,9 @@ const (
func MakeProverType(proofType message.ProofType) ProverType {
switch proofType {
case message.ProofTypeChunk:
return ProverTypeChunk
return ProverTypeChunkDeprecated
case message.ProofTypeBatch, message.ProofTypeBundle:
return ProverTypeBatch
return ProverTypeBatchDeprecated
default:
return ProverTypeUndefined
}

View File

@@ -67,7 +67,7 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
db, err = testApps.GetGormDBClient()
@@ -84,18 +84,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
MockMode: true,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "homestead",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "bernoulli",
MinProverVersion: "v4.3.0",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
},
},
BatchCollectionTimeSec: 10,
@@ -109,20 +101,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
},
}
var chainConf params.ChainConfig
for _, forkName := range forks {
switch forkName {
case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(100)
case "homestead":
chainConf.HomesteadBlock = big.NewInt(0)
}
}
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
router := gin.New()
api.InitController(conf, &chainConf, db, nil)
api.InitController(conf, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
route.Route(router, conf, nil)
srv := &http.Server{
Addr: coordinatorURL,
@@ -142,7 +131,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
func setEnv(t *testing.T) {
var err error
version.Version = "v4.2.0"
version.Version = "v4.4.89"
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
@@ -198,7 +187,7 @@ func TestApis(t *testing.T) {
func testHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -211,7 +200,7 @@ func testHandshake(t *testing.T) {
func testFailedHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
}()
@@ -229,7 +218,7 @@ func testFailedHandshake(t *testing.T) {
func testGetTaskBlocked(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -273,7 +262,7 @@ func testGetTaskBlocked(t *testing.T) {
func testOutdatedProverVersion(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -285,14 +274,12 @@ func testOutdatedProverVersion(t *testing.T) {
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion)
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion)
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
@@ -300,7 +287,7 @@ func testOutdatedProverVersion(t *testing.T) {
func testValidProof(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -383,7 +370,7 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -471,7 +458,7 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -572,7 +559,7 @@ func testProofGeneratedFailed(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -595,7 +582,9 @@ func testTimeoutProof(t *testing.T) {
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
assert.NoError(t, err)
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)

View File

@@ -207,14 +207,16 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
var proof []byte
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
encodeData, err := json.Marshal(message.Halo2BatchProof{})
case message.ProofTypeBatch:
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
@@ -223,16 +225,14 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
if proofStatus == verifiedFailed {
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
chunkProof := message.Halo2ChunkProof{}
chunkProof.RawProof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&chunkProof)
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
batchProof := message.Halo2BatchProof{}
batchProof.RawProof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&batchProof)
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(25), cur)
assert.Equal(t, int64(26), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(25), cur)
assert.Equal(t, int64(26), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(25), version)
assert.Equal(t, int64(26), version)
assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE batch
ADD COLUMN challenge_digest VARCHAR DEFAULT '';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN IF EXISTS challenge_digest;
-- +goose StatementEnd

View File

@@ -1357,13 +1357,13 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6yba
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=

5854
prover/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,50 +0,0 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
anyhow = "1.0"
log = "0.4"
env_logger = "0.11.3"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
prover_darwin_v2 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "160db6c"}
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"

View File

@@ -1,48 +0,0 @@
.PHONY: prover lint tests_binary
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
ifeq (${ZKEVM_VERSION},)
$(error ZKEVM_VERSION not set)
else
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
endif
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
ifeq (${GO_TAG},)
$(error GO_TAG not set)
else
$(info GO_TAG is ${GO_TAG})
endif
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
endif
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
tests_binary:
cargo clean && cargo test --release --no-run
ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
lint:
cargo check --all-features
cargo clippy --all-features --all-targets -- -D warnings
cargo fmt --all

View File

@@ -1,30 +0,0 @@
{
"sdk_config": {
"prover_name_prefix": "prover-1",
"keys_dir": "keys",
"coordinator": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999"
},
"prover": {
"circuit_types": [1,2,3],
"circuit_version": "v0.13.1"
},
"db_path": "unique-db-path-for-prover-1"
},
"low_version_circuit": {
"hard_fork_name": "darwin",
"params_path": "params",
"assets_path": "assets"
},
"high_version_circuit": {
"hard_fork_name": "darwinV2",
"params_path": "params",
"assets_path": "assets"
}
}

View File

@@ -1,21 +0,0 @@
#!/bin/bash
config_file="$HOME/.cargo/config"
if [ ! -e "$config_file" ]; then
exit 0
fi
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
exit 0
fi
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
pushd $halo2gpu_path
commit_hash=$(git log --pretty=format:%h -n 1)
echo "${commit_hash:0:7}"
popd

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -ue
higher_zkevm_item=`grep "zkevm-circuits.git" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
higher_version=`echo $higher_zkevm_item | awk '{print $1}'`
higher_commit=`echo $higher_zkevm_item | cut -d ' ' -f2 | cut -c-7`
echo "$higher_version $higher_commit"

View File

@@ -1 +0,0 @@
nightly-2023-12-03

View File

@@ -1,9 +0,0 @@
edition = "2021"
comment_width = 100
imports_granularity = "Crate"
max_width = 100
newline_style = "Unix"
# normalize_comments = true
reorder_imports = true
wrap_comments = true

View File

@@ -1,51 +0,0 @@
use anyhow::{bail, Result};
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
#[derive(Debug)]
pub struct AssetsDirEnvConfig {}
impl AssetsDirEnvConfig {
pub fn init() -> Result<()> {
let value = std::env::var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME)?;
let dirs: Vec<&str> = value.split(',').collect();
if dirs.len() != 2 {
bail!("env variable SCROLL_PROVER_ASSETS_DIR value must be 2 parts seperated by comma.")
}
unsafe {
SCROLL_PROVER_ASSETS_DIRS = dirs.into_iter().map(|s| s.to_string()).collect();
log::info!(
"init SCROLL_PROVER_ASSETS_DIRS: {:?}",
SCROLL_PROVER_ASSETS_DIRS
);
}
Ok(())
}
pub fn enable_first() {
unsafe {
log::info!(
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
&SCROLL_PROVER_ASSETS_DIRS[0]
);
std::env::set_var(
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
&SCROLL_PROVER_ASSETS_DIRS[0],
);
}
}
pub fn enable_second() {
unsafe {
log::info!(
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
&SCROLL_PROVER_ASSETS_DIRS[1]
);
std::env::set_var(
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
&SCROLL_PROVER_ASSETS_DIRS[1],
);
}
}
}

View File

@@ -1,76 +0,0 @@
#![feature(lazy_cell)]
#![feature(core_intrinsics)]
mod config;
mod prover;
mod types;
mod utils;
mod zk_circuits_handler;
use clap::{ArgAction, Parser};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::ProverBuilder,
utils::{get_version, init_tracing},
};
use tokio::runtime;
use utils::get_prover_type;
#[derive(Parser, Debug)]
#[clap(disable_version_flag = true)]
struct Args {
/// Path of config file
#[arg(long = "config", default_value = "conf/config.json")]
config_file: String,
/// Version of this prover
#[arg(short, long, action = ArgAction::SetTrue)]
version: bool,
/// Path of log file
#[arg(long = "log.file")]
log_file: Option<String>,
}
fn main() -> anyhow::Result<()> {
let rt = runtime::Builder::new_multi_thread()
.thread_stack_size(16 * 1024 * 1024) // Set stack size to 16MB
.enable_all()
.build()
.expect("Failed to create Tokio runtime");
rt.block_on(async {
init_tracing();
let args = Args::parse();
if args.version {
println!("version is {}", get_version());
std::process::exit(0);
}
let cfg = LocalProverConfig::from_file(args.config_file)?;
let sdk_config = cfg.sdk_config.clone();
let mut prover_types = vec![];
sdk_config
.prover
.circuit_types
.iter()
.for_each(|circuit_type| {
if let Some(pt) = get_prover_type(*circuit_type) {
if !prover_types.contains(&pt) {
prover_types.push(pt);
}
}
});
let local_prover = LocalProver::new(cfg, prover_types);
let prover = ProverBuilder::new(sdk_config)
.with_proving_service(Box::new(local_prover))
.build()
.await?;
prover.run().await;
Ok(())
})
}

View File

@@ -1,192 +0,0 @@
use crate::{
types::ProverType,
utils::get_prover_type,
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
};
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
ProvingService,
},
};
use serde::{Deserialize, Serialize};
use std::{
fs::File,
sync::{Arc, Mutex},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::RwLock, task::JoinHandle};
#[derive(Clone, Serialize, Deserialize)]
pub struct LocalProverConfig {
pub sdk_config: SdkConfig,
pub high_version_circuit: CircuitConfig,
pub low_version_circuit: CircuitConfig,
}
impl LocalProverConfig {
pub fn from_reader<R>(reader: R) -> Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
}
pub fn from_file(file_name: String) -> Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CircuitConfig {
pub hard_fork_name: String,
pub params_path: String,
pub assets_path: String,
}
pub struct LocalProver {
config: LocalProverConfig,
prover_types: Vec<ProverType>,
circuits_handler_provider: RwLock<CircuitsHandlerProvider>,
next_task_id: Arc<Mutex<u64>>,
current_task: Arc<Mutex<Option<JoinHandle<Result<String>>>>>,
}
#[async_trait]
impl ProvingService for LocalProver {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
let mut prover_types = vec![];
req.circuit_types.iter().for_each(|circuit_type| {
if let Some(pt) = get_prover_type(*circuit_type) {
if !prover_types.contains(&pt) {
prover_types.push(pt);
}
}
});
let vks = self
.circuits_handler_provider
.read()
.await
.init_vks(&self.config, prover_types)
.await;
GetVkResponse { vks, error: None }
}
async fn prove(&self, req: ProveRequest) -> ProveResponse {
let handler = self
.circuits_handler_provider
.write()
.await
.get_circuits_handler(&req.hard_fork_name, self.prover_types.clone())
.expect("failed to get circuit handler");
match self.do_prove(req, handler).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to request proof: {}", e)),
..Default::default()
},
}
}
async fn query_task(&self, req: QueryTaskRequest) -> QueryTaskResponse {
let handle = self.current_task.lock().unwrap().take();
if let Some(handle) = handle {
if handle.is_finished() {
return match handle.await {
Ok(Ok(proof)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Success,
proof: Some(proof),
..Default::default()
},
Ok(Err(e)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
};
} else {
*self.current_task.lock().unwrap() = Some(handle);
return QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Proving,
..Default::default()
};
}
}
// If no handle is found
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("no proving task is running".to_string()),
..Default::default()
}
}
}
impl LocalProver {
pub fn new(config: LocalProverConfig, prover_types: Vec<ProverType>) -> Self {
let circuits_handler_provider = CircuitsHandlerProvider::new(config.clone())
.expect("failed to create circuits handler provider");
Self {
config,
prover_types,
circuits_handler_provider: RwLock::new(circuits_handler_provider),
next_task_id: Arc::new(Mutex::new(0)),
current_task: Arc::new(Mutex::new(None)),
}
}
async fn do_prove(
&self,
req: ProveRequest,
handler: Arc<Box<dyn CircuitsHandler>>,
) -> Result<ProveResponse> {
let task_id = {
let mut next_task_id = self.next_task_id.lock().unwrap();
*next_task_id += 1;
*next_task_id
};
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
let req_clone = req.clone();
let handle = Handle::current();
let task_handle =
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
*self.current_task.lock().unwrap() = Some(task_handle);
Ok(ProveResponse {
task_id: task_id.to_string(),
circuit_type: req.circuit_type,
circuit_version: req.circuit_version,
hard_fork_name: req.hard_fork_name,
status: TaskStatus::Proving,
created_at,
input: Some(req.input),
..Default::default()
})
}
}

View File

@@ -1,153 +0,0 @@
use ethers_core::types::H256;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use scroll_proving_sdk::prover::types::CircuitType;
pub type CommonHash = H256;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ProverType {
Chunk,
Batch,
}
impl ProverType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProverType::Chunk,
2 => ProverType::Batch,
_ => {
panic!("invalid prover_type")
}
}
}
}
impl Serialize for ProverType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProverType::Chunk => serializer.serialize_u8(1),
ProverType::Batch => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProverType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProverType::from_u8(v))
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
#[serde(rename = "type", default)]
pub task_type: CircuitType,
pub task_data: String,
#[serde(default)]
pub hard_fork_name: String,
}
#[derive(Serialize, Deserialize, Default)]
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: CircuitType,
pub proof_data: String,
pub error: String,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofFailureType {
Undefined,
Panic,
NoPanic,
}
impl ProofFailureType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofFailureType::Panic,
2 => ProofFailureType::NoPanic,
_ => ProofFailureType::Undefined,
}
}
}
impl Serialize for ProofFailureType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofFailureType::Undefined => serializer.serialize_u8(0),
ProofFailureType::Panic => serializer.serialize_u8(1),
ProofFailureType::NoPanic => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofFailureType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofFailureType::from_u8(v))
}
}
impl Default for ProofFailureType {
fn default() -> Self {
Self::Undefined
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofStatus {
Ok,
Error,
}
impl ProofStatus {
fn from_u8(v: u8) -> Self {
match v {
0 => ProofStatus::Ok,
_ => ProofStatus::Error,
}
}
}
impl Serialize for ProofStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofStatus::Ok => serializer.serialize_u8(0),
ProofStatus::Error => serializer.serialize_u8(1),
}
}
}
impl<'de> Deserialize<'de> for ProofStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofStatus::from_u8(v))
}
}
impl Default for ProofStatus {
fn default() -> Self {
Self::Ok
}
}

View File

@@ -1,18 +0,0 @@
use crate::types::ProverType;
use scroll_proving_sdk::prover::types::CircuitType;
pub fn get_circuit_types(prover_type: ProverType) -> Vec<CircuitType> {
match prover_type {
ProverType::Chunk => vec![CircuitType::Chunk],
ProverType::Batch => vec![CircuitType::Batch, CircuitType::Bundle],
}
}
pub fn get_prover_type(task_type: CircuitType) -> Option<ProverType> {
match task_type {
CircuitType::Undefined => None,
CircuitType::Chunk => Some(ProverType::Chunk),
CircuitType::Batch => Some(ProverType::Batch),
CircuitType::Bundle => Some(ProverType::Batch),
}
}

View File

@@ -1,165 +0,0 @@
mod common;
mod darwin;
mod darwin_v2;
use crate::{
config::AssetsDirEnvConfig, prover::LocalProverConfig, types::ProverType,
utils::get_circuit_types,
};
use anyhow::{bail, Result};
use async_trait::async_trait;
use darwin::DarwinHandler;
use darwin_v2::DarwinV2Handler;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
use std::{collections::HashMap, sync::Arc};
type HardForkName = String;
pub mod utils {
pub fn encode_vk(vk: Vec<u8>) -> String {
base64::encode(vk)
}
}
#[async_trait]
pub trait CircuitsHandler: Send + Sync {
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>>;
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
}
type CircuitsHandlerBuilder = fn(
prover_types: Vec<ProverType>,
config: &LocalProverConfig,
) -> Result<Box<dyn CircuitsHandler>>;
pub struct CircuitsHandlerProvider {
config: LocalProverConfig,
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
current_fork_name: Option<HardForkName>,
current_circuit: Option<Arc<Box<dyn CircuitsHandler>>>,
}
impl CircuitsHandlerProvider {
pub fn new(config: LocalProverConfig) -> Result<Self> {
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
if let Err(e) = AssetsDirEnvConfig::init() {
panic!("AssetsDirEnvConfig init failed: {:#}", e);
}
fn handler_builder(
prover_types: Vec<ProverType>,
config: &LocalProverConfig,
) -> Result<Box<dyn CircuitsHandler>> {
log::info!(
"now init zk circuits handler, hard_fork_name: {}",
&config.low_version_circuit.hard_fork_name
);
AssetsDirEnvConfig::enable_first();
DarwinHandler::new(
prover_types,
&config.low_version_circuit.params_path,
&config.low_version_circuit.assets_path,
)
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
}
m.insert(
config.low_version_circuit.hard_fork_name.clone(),
handler_builder,
);
fn next_handler_builder(
prover_types: Vec<ProverType>,
config: &LocalProverConfig,
) -> Result<Box<dyn CircuitsHandler>> {
log::info!(
"now init zk circuits handler, hard_fork_name: {}",
&config.high_version_circuit.hard_fork_name
);
AssetsDirEnvConfig::enable_second();
DarwinV2Handler::new(
prover_types,
&config.high_version_circuit.params_path,
&config.high_version_circuit.assets_path,
)
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
}
m.insert(
config.high_version_circuit.hard_fork_name.clone(),
next_handler_builder,
);
let provider = CircuitsHandlerProvider {
config,
circuits_handler_builder_map: m,
current_fork_name: None,
current_circuit: None,
};
Ok(provider)
}
pub fn get_circuits_handler(
&mut self,
hard_fork_name: &String,
prover_types: Vec<ProverType>,
) -> Result<Arc<Box<dyn CircuitsHandler>>> {
match &self.current_fork_name {
Some(fork_name) if fork_name == hard_fork_name => {
log::info!("get circuits handler from cache");
if let Some(handler) = &self.current_circuit {
Ok(handler.clone())
} else {
bail!("missing cached handler, there must be something wrong.")
}
}
_ => {
log::info!(
"failed to get circuits handler from cache, create a new one: {hard_fork_name}"
);
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
log::info!("building circuits handler for {hard_fork_name}");
let handler = builder(prover_types, &self.config)
.expect("failed to build circuits handler");
self.current_fork_name = Some(hard_fork_name.clone());
let arc_handler = Arc::new(handler);
self.current_circuit = Some(arc_handler.clone());
Ok(arc_handler)
} else {
bail!("missing builder, there must be something wrong.")
}
}
}
}
pub async fn init_vks(
&self,
config: &LocalProverConfig,
prover_types: Vec<ProverType>,
) -> Vec<String> {
let mut vks = Vec::new();
for (hard_fork_name, build) in self.circuits_handler_builder_map.iter() {
let handler =
build(prover_types.clone(), config).expect("failed to build circuits handler");
for prover_type in prover_types.iter() {
for task_type in get_circuit_types(*prover_type).into_iter() {
let vk = handler
.get_vk(task_type)
.await
.map_or("".to_string(), utils::encode_vk);
log::info!(
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
task_type
);
if !vk.is_empty() {
vks.push(vk)
}
}
}
}
vks
}
}

View File

@@ -1,33 +0,0 @@
use std::{collections::BTreeMap, rc::Rc};
use crate::types::ProverType;
use once_cell::sync::OnceCell;
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
static mut PARAMS_MAP: OnceCell<Rc<BTreeMap<u32, ParamsKZG<Bn256>>>> = OnceCell::new();
pub fn get_params_map_instance<'a, F>(load_params_func: F) -> &'a BTreeMap<u32, ParamsKZG<Bn256>>
where
F: FnOnce() -> BTreeMap<u32, ParamsKZG<Bn256>>,
{
unsafe {
PARAMS_MAP.get_or_init(|| {
let params_map = load_params_func();
Rc::new(params_map)
})
}
}
pub fn get_degrees<F>(prover_types: &std::collections::HashSet<ProverType>, f: F) -> Vec<u32>
where
F: FnMut(&ProverType) -> Vec<u32>,
{
prover_types
.iter()
.flat_map(f)
.collect::<std::collections::HashSet<u32>>()
.into_iter()
.collect()
}

View File

@@ -1,401 +0,0 @@
use super::{common::*, CircuitsHandler};
use crate::types::ProverType;
use anyhow::{bail, Context, Ok, Result};
use async_trait::async_trait;
use once_cell::sync::Lazy;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
use serde::Deserialize;
use tokio::sync::RwLock;
use crate::types::CommonHash;
use std::env;
use prover_darwin::{
aggregator::Prover as BatchProver,
check_chunk_hashes,
common::Prover as CommonProver,
config::{AGG_DEGREES, ZKEVM_DEGREES},
zkevm::Prover as ChunkProver,
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
ChunkProof, ChunkProvingTask,
};
// Only used for debugging.
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
#[derive(Debug, Clone, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkInfo>,
#[serde(flatten)]
pub batch_proving_task: BatchProvingTask,
}
type BundleTaskDetail = BundleProvingTask;
#[derive(Debug, Clone, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Default)]
pub struct DarwinHandler {
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
batch_prover: Option<RwLock<BatchProver<'static>>>,
}
impl DarwinHandler {
pub fn new_multi(
prover_types: Vec<ProverType>,
params_dir: &str,
assets_dir: &str,
) -> Result<Self> {
let class_name = std::intrinsics::type_name::<Self>();
let prover_types_set = prover_types
.into_iter()
.collect::<std::collections::HashSet<ProverType>>();
let mut handler = Self {
batch_prover: None,
chunk_prover: None,
};
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
ProverType::Chunk => ZKEVM_DEGREES.clone(),
ProverType::Batch => AGG_DEGREES.clone(),
});
let params_map = get_params_map_instance(|| {
log::info!(
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
class_name,
prover_types_set,
degrees
);
CommonProver::load_params_map(params_dir, &degrees)
});
for prover_type in prover_types_set {
match prover_type {
ProverType::Chunk => {
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
params_map, assets_dir,
)));
}
ProverType::Batch => {
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
params_map, assets_dir,
)))
}
}
}
Ok(handler)
}
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
Self::new_multi(prover_types, params_dir, assets_dir)
}
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
if let Some(prover) = self.chunk_prover.as_ref() {
let chunk = ChunkProvingTask::from(chunk_trace);
let chunk_proof =
prover
.write()
.await
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
return Ok(chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
Ok(serde_json::to_string(&chunk_proof)?)
}
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
.collect();
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = prover.read().await.check_protocol_of_chunks(&chunk_proofs);
if !is_valid {
bail!("non-match chunk protocol")
}
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
let batch_proof = prover.write().await.gen_batch_proof(
batch_task_detail.batch_proving_task,
None,
self.get_output_dir(),
)?;
return Ok(batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
Ok(serde_json::to_string(&batch_proof)?)
}
async fn gen_bundle_proof_raw(
&self,
bundle_task_detail: BundleTaskDetail,
) -> Result<BundleProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let bundle_proof = prover.write().await.gen_bundle_proof(
bundle_task_detail,
None,
self.get_output_dir(),
)?;
return Ok(bundle_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
Ok(serde_json::to_string(&bundle_proof)?)
}
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
}
#[async_trait]
impl CircuitsHandler for DarwinHandler {
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
match task_type {
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
CircuitType::Batch => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_batch_vk(),
CircuitType::Bundle => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_bundle_vk(),
_ => unreachable!(),
}
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.circuit_type {
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
_ => unreachable!(),
}
}
}
// =================================== tests module ========================================
#[cfg(test)]
mod tests {
use super::*;
use crate::zk_circuits_handler::utils::encode_vk;
use prover_darwin::utils::chunk_trace_to_witness_block;
use scroll_proving_sdk::utils::init_tracing;
use std::{path::PathBuf, sync::LazyLock};
#[ctor::ctor]
fn init() {
init_tracing();
log::info!("logger initialized");
}
static DEFAULT_WORK_DIR: &str = "/assets";
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
std::env::var("DARWIN_TEST_DIR")
.unwrap_or(String::from(DEFAULT_WORK_DIR))
.trim_end_matches('/')
.to_string()
});
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
static PROOF_DUMP_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
static BATCH_DIR_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
static BATCH_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
static CHUNK_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
#[test]
fn it_works() {
let result = true;
assert!(result);
}
#[tokio::test]
async fn test_circuits() -> Result<()> {
let bi_handler = DarwinHandler::new_multi(
vec![ProverType::Chunk, ProverType::Batch],
&PARAMS_PATH,
&ASSETS_PATH,
)?;
let chunk_handler = bi_handler;
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
let chunk_dir_paths = get_chunk_dir_paths()?;
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
let mut chunk_infos = vec![];
let mut chunk_proofs = vec![];
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
let chunk_id = format!("chunk_proof{}", id + 1);
log::info!("start to process {chunk_id}");
let chunk_trace = read_chunk_trace(chunk_path)?;
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
chunk_infos.push(chunk_info);
log::info!("start to prove {chunk_id}");
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
let proof_data = serde_json::to_string(&chunk_proof)?;
dump_proof(chunk_id, proof_data)?;
chunk_proofs.push(chunk_proof);
}
let batch_handler = chunk_handler;
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs);
log::info!("start to prove batch");
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
let proof_data = serde_json::to_string(&batch_proof)?;
dump_proof("batch_proof".to_string(), proof_data)?;
Ok(())
}
fn make_batch_task_detail(_: Vec<ChunkInfo>, _: Vec<ChunkProof>) -> BatchTaskDetail {
todo!();
// BatchTaskDetail {
// chunk_infos,
// batch_proving_task: BatchProvingTask {
// parent_batch_hash: todo!(),
// parent_state_root: todo!(),
// batch_header: todo!(),
// chunk_proofs,
// },
// }
}
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
log::info!("check_vk, {:?}", proof_type);
let vk_from_file = read_vk(proof_type).unwrap();
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
}
fn read_vk(proof_type: CircuitType) -> Result<String> {
log::info!("read_vk, {:?}", proof_type);
let vk_file = match proof_type {
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
CircuitType::Batch => BATCH_VK_PATH.clone(),
CircuitType::Bundle => todo!(),
CircuitType::Undefined => unreachable!(),
};
let data = std::fs::read(vk_file)?;
Ok(encode_vk(data))
}
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
log::info!("read_chunk_trace, {:?}", path);
let mut chunk_trace: Vec<BlockTrace> = vec![];
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
let f = std::fs::File::open(file)?;
Ok(serde_json::from_reader(&f)?)
}
if path.is_dir() {
let entries = std::fs::read_dir(&path)?;
let mut files: Vec<String> = entries
.into_iter()
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
return None;
}
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
})
.collect();
files.sort();
log::info!("files in chunk {:?} is {:?}", path, files);
for file in files {
let block_trace = read_block_trace(&path.join(file))?;
chunk_trace.push(block_trace);
}
} else {
let block_trace = read_block_trace(&path)?;
chunk_trace.push(block_trace);
}
Ok(chunk_trace)
}
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
let entries = std::fs::read_dir(&batch_path)?;
let mut files: Vec<String> = entries
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
} else {
None
}
})
.collect();
files.sort();
log::info!("files in batch {:?} is {:?}", batch_path, files);
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
}
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
Ok(ChunkInfo::from_witness_block(&witness_block, false))
}
fn dump_proof(id: String, proof_data: String) -> Result<()> {
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
Ok(std::fs::write(dump_path.join(id), proof_data)?)
}
}

View File

@@ -1,459 +0,0 @@
use super::{common::*, CircuitsHandler};
use crate::types::ProverType;
use anyhow::{bail, Context, Ok, Result};
use async_trait::async_trait;
use once_cell::sync::Lazy;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
use serde::Deserialize;
use tokio::sync::RwLock;
use crate::types::CommonHash;
use std::env;
use prover_darwin_v2::{
aggregator::Prover as BatchProver,
check_chunk_hashes,
common::Prover as CommonProver,
config::{AGG_DEGREES, ZKEVM_DEGREES},
zkevm::Prover as ChunkProver,
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
ChunkProof, ChunkProvingTask,
};
// Only used for debugging.
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
#[derive(Debug, Clone, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkInfo>,
#[serde(flatten)]
pub batch_proving_task: BatchProvingTask,
}
type BundleTaskDetail = BundleProvingTask;
#[derive(Debug, Clone, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Default)]
pub struct DarwinV2Handler {
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
batch_prover: Option<RwLock<BatchProver<'static>>>,
}
impl DarwinV2Handler {
pub fn new_multi(
prover_types: Vec<ProverType>,
params_dir: &str,
assets_dir: &str,
) -> Result<Self> {
let class_name = std::intrinsics::type_name::<Self>();
let prover_types_set = prover_types
.into_iter()
.collect::<std::collections::HashSet<ProverType>>();
let mut handler = Self {
batch_prover: None,
chunk_prover: None,
};
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
ProverType::Chunk => ZKEVM_DEGREES.clone(),
ProverType::Batch => AGG_DEGREES.clone(),
});
let params_map = get_params_map_instance(|| {
log::info!(
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
class_name,
prover_types_set,
degrees
);
CommonProver::load_params_map(params_dir, &degrees)
});
for prover_type in prover_types_set {
match prover_type {
ProverType::Chunk => {
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
params_map, assets_dir,
)));
}
ProverType::Batch => {
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
params_map, assets_dir,
)))
}
}
}
Ok(handler)
}
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
Self::new_multi(prover_types, params_dir, assets_dir)
}
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
if let Some(prover) = self.chunk_prover.as_ref() {
let chunk = ChunkProvingTask::from(chunk_trace);
let chunk_proof =
prover
.write()
.await
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
return Ok(chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
Ok(serde_json::to_string(&chunk_proof)?)
}
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
.collect();
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = prover.write().await.check_protocol_of_chunks(&chunk_proofs);
if !is_valid {
bail!("non-match chunk protocol")
}
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
let batch_proof = prover.write().await.gen_batch_proof(
batch_task_detail.batch_proving_task,
None,
self.get_output_dir(),
)?;
return Ok(batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
Ok(serde_json::to_string(&batch_proof)?)
}
async fn gen_bundle_proof_raw(
&self,
bundle_task_detail: BundleTaskDetail,
) -> Result<BundleProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let bundle_proof = prover.write().await.gen_bundle_proof(
bundle_task_detail,
None,
self.get_output_dir(),
)?;
return Ok(bundle_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
Ok(serde_json::to_string(&bundle_proof)?)
}
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
}
#[async_trait]
impl CircuitsHandler for DarwinV2Handler {
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
match task_type {
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
CircuitType::Batch => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_batch_vk(),
CircuitType::Bundle => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_bundle_vk(),
_ => unreachable!(),
}
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.circuit_type {
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
_ => unreachable!(),
}
}
}
// =================================== tests module ========================================
#[cfg(test)]
mod tests {
use super::*;
use crate::zk_circuits_handler::utils::encode_vk;
use ethers_core::types::H256;
use prover_darwin_v2::{
aggregator::eip4844, utils::chunk_trace_to_witness_block, BatchData, BatchHeader,
MAX_AGG_SNARKS,
};
use scroll_proving_sdk::utils::init_tracing;
use std::{path::PathBuf, sync::LazyLock};
#[ctor::ctor]
fn init() {
init_tracing();
log::info!("logger initialized");
}
static DEFAULT_WORK_DIR: &str = "/assets";
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
std::env::var("DARWIN_V2_TEST_DIR")
.unwrap_or(String::from(DEFAULT_WORK_DIR))
.trim_end_matches('/')
.to_string()
});
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
static PROOF_DUMP_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
static BATCH_DIR_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
static BATCH_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
static CHUNK_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
#[test]
fn it_works() {
let result = true;
assert!(result);
}
#[tokio::test]
async fn test_circuits() -> Result<()> {
let bi_handler = DarwinV2Handler::new_multi(
vec![ProverType::Chunk, ProverType::Batch],
&PARAMS_PATH,
&ASSETS_PATH,
)?;
let chunk_handler = bi_handler;
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
let chunk_dir_paths = get_chunk_dir_paths()?;
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
let mut chunk_traces = vec![];
let mut chunk_infos = vec![];
let mut chunk_proofs = vec![];
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
let chunk_id = format!("chunk_proof{}", id + 1);
log::info!("start to process {chunk_id}");
let chunk_trace = read_chunk_trace(chunk_path)?;
chunk_traces.push(chunk_trace.clone());
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
chunk_infos.push(chunk_info);
log::info!("start to prove {chunk_id}");
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
let proof_data = serde_json::to_string(&chunk_proof)?;
dump_proof(chunk_id, proof_data)?;
chunk_proofs.push(chunk_proof);
}
let batch_handler = chunk_handler;
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
let batch_task_detail = make_batch_task_detail(chunk_traces, chunk_proofs, None);
log::info!("start to prove batch");
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
let proof_data = serde_json::to_string(&batch_proof)?;
dump_proof("batch_proof".to_string(), proof_data)?;
Ok(())
}
// copied from https://github.com/scroll-tech/scroll-prover/blob/main/integration/src/prove.rs
fn get_blob_from_chunks(chunks: &[ChunkInfo]) -> Vec<u8> {
let num_chunks = chunks.len();
let padded_chunk =
ChunkInfo::mock_padded_chunk_info_for_testing(chunks.last().as_ref().unwrap());
let chunks_with_padding = [
chunks.to_vec(),
vec![padded_chunk; MAX_AGG_SNARKS - num_chunks],
]
.concat();
let batch_data = BatchData::<{ MAX_AGG_SNARKS }>::new(chunks.len(), &chunks_with_padding);
let batch_bytes = batch_data.get_batch_data_bytes();
let blob_bytes = eip4844::get_blob_bytes(&batch_bytes);
log::info!("blob_bytes len {}", blob_bytes.len());
blob_bytes
}
// TODO: chunk_infos can be extracted from chunk_proofs.
// Still needed?
fn make_batch_task_detail(
chunk_traces: Vec<Vec<BlockTrace>>,
chunk_proofs: Vec<ChunkProof>,
last_batcher_header: Option<BatchHeader<{ MAX_AGG_SNARKS }>>,
) -> BatchTaskDetail {
// dummy parent batch hash
let dummy_parent_batch_hash = H256([
0xab, 0xac, 0xad, 0xae, 0xaf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
]);
let chunk_infos: Vec<_> = chunk_proofs.iter().map(|p| p.chunk_info.clone()).collect();
let l1_message_popped = chunk_traces
.iter()
.flatten()
.map(|chunk| chunk.num_l1_txs())
.sum();
let last_block_timestamp = chunk_traces.last().map_or(0, |block_traces| {
block_traces
.last()
.map_or(0, |block_trace| block_trace.header.timestamp.as_u64())
});
let blob_bytes = get_blob_from_chunks(&chunk_infos);
let batch_header = BatchHeader::construct_from_chunks(
last_batcher_header.map_or(4, |header| header.version),
last_batcher_header.map_or(123, |header| header.batch_index + 1),
l1_message_popped,
last_batcher_header.map_or(l1_message_popped, |header| {
header.total_l1_message_popped + l1_message_popped
}),
last_batcher_header.map_or(dummy_parent_batch_hash, |header| header.batch_hash()),
last_block_timestamp,
&chunk_infos,
&blob_bytes,
);
BatchTaskDetail {
chunk_infos,
batch_proving_task: BatchProvingTask {
chunk_proofs,
batch_header,
blob_bytes,
},
}
}
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
log::info!("check_vk, {:?}", proof_type);
let vk_from_file = read_vk(proof_type).unwrap();
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
}
fn read_vk(proof_type: CircuitType) -> Result<String> {
log::info!("read_vk, {:?}", proof_type);
let vk_file = match proof_type {
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
CircuitType::Batch => BATCH_VK_PATH.clone(),
CircuitType::Bundle => todo!(),
CircuitType::Undefined => unreachable!(),
};
let data = std::fs::read(vk_file)?;
Ok(encode_vk(data))
}
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
log::info!("read_chunk_trace, {:?}", path);
let mut chunk_trace: Vec<BlockTrace> = vec![];
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
let f = std::fs::File::open(file)?;
Ok(serde_json::from_reader(&f)?)
}
if path.is_dir() {
let entries = std::fs::read_dir(&path)?;
let mut files: Vec<String> = entries
.into_iter()
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
return None;
}
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
})
.collect();
files.sort();
log::info!("files in chunk {:?} is {:?}", path, files);
for file in files {
let block_trace = read_block_trace(&path.join(file))?;
chunk_trace.push(block_trace);
}
} else {
let block_trace = read_block_trace(&path)?;
chunk_trace.push(block_trace);
}
Ok(chunk_trace)
}
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
let entries = std::fs::read_dir(&batch_path)?;
let mut files: Vec<String> = entries
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
} else {
None
}
})
.collect();
files.sort();
log::info!("files in batch {:?} is {:?}", batch_path, files);
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
}
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
Ok(ChunkInfo::from_witness_block(&witness_block, false))
}
fn dump_proof(id: String, proof_data: String) -> Result<()> {
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
Ok(std::fs::write(dump_path.join(id), proof_data)?)
}
}

View File

@@ -33,3 +33,45 @@ make rollup_bins
./build/bin/gas_oracle --config ./conf/config.json
./build/bin/rollup_relayer --config ./conf/config.json
```
## Proposer Tool
The Proposer Tool replays historical blocks with custom configurations (e.g., future hardfork configs, custom chunk/batch/bundle proposer configs) to generate chunks/batches/bundles, helping test parameter changes before protocol upgrade.
You can:
1. Enable different hardforks in the genesis configuration.
2. Set custom chunk-proposer, batch-proposer, and bundle-proposer parameters.
3. Analyze resulting metrics (blob size, block count, transaction count, gas usage).
## How to run the proposer tool?
### Set the configs
1. Set genesis config to enable desired hardforks in [`proposer-tool-genesis.json`](./proposer-tool-genesis.json).
2. Set proposer config in [`proposer-tool-config.json`](./proposer-tool-config.json) for data analysis.
3. Set `start-l2-block` in the launch command of proposer-tool in [`docker-compose-proposer-tool.yml`](./docker-compose-proposer-tool.yml) to the block number you want to start from. The default is `0`, which means starting from the genesis block.
### Start the proposer tool using docker-compose
Prerequisite: an RPC URL to an archive L2 node. The default url in [`proposer-tool-config.json`](./proposer-tool-config.json) is `https://rpc.scroll.io`.
```
cd rollup
DOCKER_BUILDKIT=1 docker-compose -f docker-compose-proposer-tool.yml up -d
```
> Note: The port 5432 of database is mapped to the host machine. You can use `psql` or any db clients to connect to the database.
> The DSN for the database is `postgres://postgres:postgres@db:5432/scroll?sslmode=disable`.
### Reset env
```
docker-compose -f docker-compose-proposer-tool.yml down -v
```
If you need to rebuild the images, removing the old images is necessary. You can do this by running the following command:
```
docker images | grep rollup | awk '{print $3}' | xargs docker rmi -f
```

View File

@@ -10,7 +10,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
@@ -72,22 +71,12 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
if err != nil {
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
// Fetch the latest block number to decrease the delay when fetching gas prices
@@ -106,7 +95,6 @@ func action(ctx *cli.Context) error {
// Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully", "version", version.Version)

View File

@@ -0,0 +1,93 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/watcher"
)
var app *cli.App
func init() {
// Set up proposer-tool app info.
app = cli.NewApp()
app.Action = action
app.Name = "proposer-tool"
app.Usage = "The Scroll Proposer Tool"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Flags = append(app.Flags, utils.ProposerToolFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfigForReplay(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
startL2BlockHeight := ctx.Uint64(utils.StartL2BlockFlag.Name)
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
// sanity check config
if cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch <= 0 {
log.Crit("cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch must be greater than 0")
}
if cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk <= 0 {
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
}
proposerTool, err := watcher.NewProposerTool(subCtx, cancel, cfg, startL2BlockHeight, minCodecVersion, genesis.Config)
if err != nil {
log.Crit("failed to create proposer tool", "startL2BlockHeight", startL2BlockHeight, "minCodecVersion", minCodecVersion, "error", err)
}
proposerTool.Start()
log.Info("Start proposer-tool successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
cancel()
proposerTool.Stop()
return nil
}
// Run proposer tool cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/proposer_tool/app"
func main() {
app.Run()
}

View File

@@ -79,8 +79,6 @@ func action(ctx *cli.Context) error {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
// sanity check config
if cfg.L2Config.RelayerConfig.BatchSubmission == nil {
log.Crit("cfg.L2Config.RelayerConfig.BatchSubmission must not be nil")
@@ -94,8 +92,11 @@ func action(ctx *cli.Context) error {
if cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch <= 0 {
log.Crit("cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch must be greater than 0")
}
if cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk <= 0 {
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL2RollupRelayer, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}

View File

@@ -54,7 +54,8 @@
"batch_submission": {
"min_batches": 1,
"max_batches": 6,
"timeout": 300
"timeout": 7200,
"backlog_max": 75
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -94,6 +95,7 @@
"propose_interval_milliseconds": 100,
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,

View File

@@ -0,0 +1,40 @@
version: '3'
services:
db:
image: postgres:14
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=scroll
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
proposer-tool:
build:
context: ..
dockerfile: ./rollup/proposer_tool.Dockerfile
depends_on:
db:
condition: service_healthy
command: [
"--config", "/app/conf/proposer-tool-config.json",
"--genesis", "/app/conf/proposer-tool-genesis.json",
"--min-codec-version", "4",
"--start-l2-block", "10000",
"--log.debug", "--verbosity", "3"
]
volumes:
- ./proposer-tool-config.json:/app/conf/proposer-tool-config.json
- ./proposer-tool-genesis.json:/app/conf/proposer-tool-genesis.json
restart: unless-stopped
volumes:
postgres_data:

View File

@@ -2,8 +2,6 @@ module scroll-tech/rollup
go 1.22
toolchain go1.22.2
require (
github.com/agiledragon/gomonkey/v2 v2.12.0
github.com/consensys/gnark-crypto v0.16.0
@@ -13,7 +11,7 @@ require (
github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0

View File

@@ -249,8 +249,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=

View File

@@ -1,7 +1,10 @@
package config
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
@@ -20,6 +23,11 @@ type Config struct {
DBConfig *database.Config `json:"db_config"`
}
type ConfigForReplay struct {
Config
DBConfigForReplay *database.Config `json:"db_config_for_replay"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
v := viper.New()
@@ -87,3 +95,19 @@ func NewConfig(file string) (*Config, error) {
return cfg, nil
}
// NewConfigForReplay returns a new instance of ConfigForReplay.
func NewConfigForReplay(file string) (*ConfigForReplay, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &ConfigForReplay{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -31,6 +31,7 @@ type ChunkProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`

View File

@@ -38,6 +38,8 @@ type BatchSubmission struct {
MaxBatches int `json:"max_batches"`
// The time in seconds after which a batch is considered stale and should be submitted ignoring the min batch count.
TimeoutSec int64 `json:"timeout"`
// The maximum number of pending batches to keep in the backlog.
BacklogMax int64 `json:"backlog_max"`
}
// ChainMonitor this config is used to get batch status from chain_monitor API.

View File

@@ -25,6 +25,6 @@ const (
ServiceTypeL2RollupRelayer
// ServiceTypeL1GasOracle indicates the service is a Layer 1 gas oracle.
ServiceTypeL1GasOracle
// ServiceTypeL2GasOracle indicates the service is a Layer 2 gas oracle.
ServiceTypeL2GasOracle
// ServiceTypeL2GasOracleDeprecated indicates the service is a Layer 2 gas oracle, which is deprecated.
ServiceTypeL2GasOracleDeprecated
)

View File

@@ -167,6 +167,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault {
return
}
log.Warn("The committing batch has been stuck for a long time, it's likely that the L1 gas fee spiked, set fees to default values", "currentBaseFee", baseFee, "currentBlobBaseFee", blobBaseFee, "threshold (min)", r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold, "defaultBaseFee", r.cfg.GasOracleConfig.L1BaseFeeDefault, "defaultBlobBaseFee", r.cfg.GasOracleConfig.L1BlobBaseFeeDefault)
baseFee = r.cfg.GasOracleConfig.L1BaseFeeDefault
blobBaseFee = r.cfg.GasOracleConfig.L1BlobBaseFeeDefault
} else if err != nil {
@@ -250,7 +251,11 @@ func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64
return true
}
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
expectedBaseFeeDelta := r.lastBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
// Allowing a minimum of 0 wei if the gas price diff config is 0, this will be used to let the gas oracle send transactions continuously.
if r.gasPriceDiff > 0 {
expectedBaseFeeDelta += 1
}
if baseFee >= r.minGasPrice && math.Abs(float64(baseFee)-float64(r.lastBaseFee)) >= float64(expectedBaseFeeDelta) {
return true
}
@@ -278,5 +283,7 @@ func (r *Layer1Relayer) commitBatchReachTimeout() (bool, error) {
}
// len(batches) == 0 probably shouldn't ever happen, but need to check this
// Also, we should check if it's a genesis batch. If so, skip the timeout check.
return len(batches) == 0 || (batches[0].Index != 0 && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
// If finalizing/finalized status is updated before committed status, skip the timeout check of this round.
// Because batches[0].CommittedAt is nil in this case, this will only continue for a short time window.
return len(batches) == 0 || (batches[0].Index != 0 && batches[0].CommittedAt != nil && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
}

View File

@@ -34,6 +34,32 @@ import (
rutils "scroll-tech/rollup/internal/utils"
)
// RelaxType enumerates the relaxation functions we support when
// turning a baseline fee into a “target” fee.
type RelaxType int
const (
// NoRelaxation means “dont touch the baseline” (i.e. fallback/default).
NoRelaxation RelaxType = iota
Exponential
Sigmoid
)
const secondsPerBlock = 12
// BaselineType enumerates the baseline types we support when
// turning a baseline fee into a “target” fee.
type BaselineType int
const (
// PctMin means “take the minimum of the last N blocks fees, then
// take the PCT of that”.
PctMin BaselineType = iota
// EWMA means “take the exponentiallyweighted moving average of
// the last N blocks fees”.
EWMA
)
// Layer2Relayer is responsible for:
// i. committing and finalizing L2 blocks on L1.
// ii. updating L2 gas price oracle contract on L1.
@@ -47,6 +73,7 @@ type Layer2Relayer struct {
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
l1BlockOrm *orm.L1Block
cfg *config.RelayerConfig
@@ -54,12 +81,7 @@ type Layer2Relayer struct {
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
l2GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
l2GasOracleABI *abi.ABI
// Used to get batch status from chain_monitor api.
chainMonitorClient *resty.Client
@@ -67,25 +89,33 @@ type Layer2Relayer struct {
metrics *l2RelayerMetrics
chainCfg *params.ChainConfig
lastFetchedBlock uint64 // highest block number ever pulled
feeHistory []*big.Int // sliding window of blob fees
batchStrategy StrategyParams
}
// StrategyParams holds the perwindow feesubmission rules.
type StrategyParams struct {
BaselineType BaselineType // "pct_min" or "ewma"
BaselineParam float64 // percentile (01) or α for EWMA
Gamma float64 // relaxation γ
Beta float64 // relaxation β
RelaxType RelaxType // Exponential or Sigmoid
}
// bestParams maps your 2h/5h/12h windows to their best rules.
var bestParams = map[uint64]StrategyParams{
2 * 3600: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
5 * 3600: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
12 * 3600: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
var gasOracleSender, commitSender, finalizeSender *sender.Sender
var err error
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
var commitSender, finalizeSender *sender.Sender
switch serviceType {
case ServiceTypeL2GasOracle:
gasOracleSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderSignerConfig, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg)
if err != nil {
return nil, fmt.Errorf("new gas oracle sender failed, err: %w", err)
}
// Ensure test features aren't enabled on the ethereum mainnet.
if gasOracleSender.GetChainID().Cmp(big.NewInt(1)) == 0 && cfg.EnableTestEnvBypassFeatures {
return nil, errors.New("cannot enable test env features in mainnet")
}
case ServiceTypeL2RollupRelayer:
commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
if err != nil {
@@ -118,22 +148,13 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
bundleOrm: orm.NewBundle(db),
batchOrm: orm.NewBatch(db),
l1BlockOrm: orm.NewL1Block(db),
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
@@ -143,14 +164,10 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
gasOracleSender: gasOracleSender,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
chainCfg: chainCfg,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: bestParams[uint64(cfg.BatchSubmission.TimeoutSec)],
cfg: cfg,
chainCfg: chainCfg,
}
// chain_monitor client
@@ -161,16 +178,12 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
}
// Initialize genesis before we do anything else
if initGenesis {
if err := layer2Relayer.initializeGenesis(); err != nil {
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
if err := layer2Relayer.initializeGenesis(); err != nil {
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
layer2Relayer.metrics = initL2RelayerMetrics(reg)
switch serviceType {
case ServiceTypeL2GasOracle:
go layer2Relayer.handleL2GasOracleConfirmLoop(ctx)
case ServiceTypeL2RollupRelayer:
go layer2Relayer.handleL2RollupRelayerConfirmLoop(ctx)
default:
@@ -301,81 +314,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
}
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if err != nil {
log.Error("Failed to GetLatestBatch", "err", err)
return
}
if types.GasOracleStatus(batch.OracleStatus) == types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
// include the token exchange rate in the fee data if alternative gas token enabled
if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled {
// The exchange rate represent the number of native token on L1 required to exchange for 1 native token on L2.
var exchangeRate float64
switch r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode {
case "Fixed":
exchangeRate = r.cfg.GasOracleConfig.AlternativeGasTokenConfig.FixedExchangeRate
case "BinanceApi":
exchangeRate, err = rutils.GetExchangeRateFromBinanceApi(r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, 5)
if err != nil {
log.Error("Failed to get gas token exchange rate from Binance api", "tokenSymbolPair", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, "err", err)
return
}
default:
log.Error("Invalid alternative gas token mode", "mode", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode)
return
}
if exchangeRate == 0 {
log.Error("Invalid exchange rate", "exchangeRate", exchangeRate)
return
}
suggestGasPriceUint64 = uint64(math.Ceil(float64(suggestGasPriceUint64) * exchangeRate))
suggestGasPrice = new(big.Int).SetUint64(suggestGasPriceUint64)
}
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
if r.lastGasPrice > 0 && expectedDelta == 0 {
expectedDelta = 1
}
// last is undefined or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice &&
(math.Abs(float64(suggestGasPriceUint64)-float64(r.lastGasPrice)) >= float64(expectedDelta))) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
if err != nil {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
return
}
err = r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
r.metrics.rollupL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
// Pending batchess are submitted if one of the following conditions is met:
// - the first batch is too old -> forceSubmit
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
// - we have at least minBatches AND price hits a desired target price
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
@@ -384,8 +327,40 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
var batchesToSubmit []*dbBatchWithChunksAndParent
// if backlog outgrow max size, forcesubmit enough oldest batches
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
var forceSubmit bool
oldestBatchTimestamp := dbBatches[0].CreatedAt
// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBatchTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}
// force submit if backlog is too big
if backlogCount > r.cfg.BatchSubmission.BacklogMax {
forceSubmit = true
}
if !forceSubmit {
// check if we should skip submitting the batch based on the fee target
skip, err := r.skipSubmitByFee(oldestBatchTimestamp)
// return if not hitting target price
if skip {
log.Debug("Skipping batch submission", "reason", err)
return
}
if err != nil {
log.Warn("Failed to check if we should skip batch submission, fallback to immediate submission", "err", err)
}
}
var batchesToSubmit []*dbBatchWithChunksAndParent
for i, dbBatch := range dbBatches {
if i == 0 && encoding.CodecVersion(dbBatch.CodecVersion) < encoding.CodecV7 {
// if the first batch is not >= V7 then we need to submit batches one by one
@@ -446,11 +421,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
break
}
// if one of the batches is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && !forceSubmit && time.Since(dbBatch.CreatedAt) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunksAndParent{
Batch: dbBatch,
@@ -834,11 +804,9 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
return fmt.Errorf("failed to get end chunk of batch: %w", err)
}
hardForkName := encoding.GetHardforkName(r.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
var aggProof message.BundleProof
var aggProof *message.OpenVMBundleProof
if withProof {
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash, hardForkName)
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash)
if err != nil {
return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err)
}
@@ -1040,22 +1008,6 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
}
case types.SenderTypeL2GasOracle:
batchHash := cfm.ContextID
var status types.GasOracleStatus
if cfm.IsSuccessful {
status = types.GasOracleImported
r.metrics.rollupL2UpdateGasOracleConfirmedTotal.Inc()
} else {
status = types.GasOracleImportedFailed
r.metrics.rollupL2UpdateGasOracleConfirmedFailedTotal.Inc()
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer1", "confirmation", cfm)
}
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batchHash, status, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
}
default:
log.Warn("Unknown transaction type", "confirmation", cfm)
}
@@ -1063,17 +1015,6 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
log.Info("Transaction confirmed in layer1", "confirmation", cfm)
}
func (r *Layer2Relayer) handleL2GasOracleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.gasOracleSender.ConfirmChan():
r.handleConfirmation(cfm)
}
}
}
func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
for {
select {
@@ -1186,7 +1127,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof message.BundleProof) ([]byte, error) {
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundleWithProof",
@@ -1214,7 +1155,7 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof message.BundleProof) ([]byte, error) {
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2",
@@ -1248,10 +1189,6 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {
if r.gasOracleSender != nil {
r.gasOracleSender.Stop()
}
if r.commitSender != nil {
r.commitSender.Stop()
}
@@ -1261,6 +1198,136 @@ func (r *Layer2Relayer) StopSenders() {
}
}
// fetchBlobFeeHistory returns the last WindowSec seconds of blobfee samples,
// by reading L1Block tables BlobBaseFee column.
func (r *Layer2Relayer) fetchBlobFeeHistory(windowSec uint64) ([]*big.Int, error) {
latest, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
return nil, fmt.Errorf("GetLatestL1BlockHeight: %w", err)
}
// bootstrap on first call
if r.lastFetchedBlock == 0 {
// start window
r.lastFetchedBlock = latest - windowSec/secondsPerBlock
}
from := r.lastFetchedBlock + 1
//if new blocks
if from <= latest {
raw, err := r.l1BlockOrm.GetBlobFeesInRange(r.ctx, from, latest)
if err != nil {
return nil, fmt.Errorf("GetBlobFeesInRange: %w", err)
}
// append them
for _, v := range raw {
r.feeHistory = append(r.feeHistory, new(big.Int).SetUint64(v))
r.lastFetchedBlock++
}
}
maxLen := int(windowSec / secondsPerBlock)
if len(r.feeHistory) > maxLen {
r.feeHistory = r.feeHistory[len(r.feeHistory)-maxLen:]
}
return r.feeHistory, nil
}
// calculateTargetPrice applies pct_min/ewma + relaxation to get a BigInt target
func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime time.Time, history []*big.Int) *big.Int {
var baseline float64 // baseline in Gwei (converting to float, small loss of precision)
n := len(history)
if n == 0 {
return big.NewInt(0)
}
switch strategy.BaselineType {
case PctMin:
// make a copy, sort by big.Int.Cmp, then pick the percentile element
sorted := make([]*big.Int, n)
copy(sorted, history)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Cmp(sorted[j]) < 0
})
idx := int(strategy.BaselineParam * float64(n-1))
if idx < 0 {
idx = 0
}
baseline, _ = new(big.Float).
Quo(new(big.Float).SetInt(sorted[idx]), big.NewFloat(1e9)).
Float64()
case EWMA:
one := big.NewFloat(1)
alpha := big.NewFloat(strategy.BaselineParam)
oneMinusAlpha := new(big.Float).Sub(one, alpha)
// start from first history point
ewma := new(big.Float).
Quo(new(big.Float).SetInt(history[0]), big.NewFloat(1e9))
for i := 1; i < n; i++ {
curr := new(big.Float).
Quo(new(big.Float).SetInt(history[i]), big.NewFloat(1e9))
term1 := new(big.Float).Mul(alpha, curr)
term2 := new(big.Float).Mul(oneMinusAlpha, ewma)
ewma = new(big.Float).Add(term1, term2)
}
baseline, _ = ewma.Float64()
default:
// fallback to last element
baseline, _ = new(big.Float).
Quo(new(big.Float).SetInt(history[n-1]), big.NewFloat(1e9)).
Float64()
} // now baseline holds our baseline in float64 Gwei
// relaxation
age := time.Since(firstTime).Seconds()
frac := age / float64(windowSec)
var adjusted float64
switch strategy.RelaxType {
case Exponential:
adjusted = baseline * (1 + strategy.Gamma*math.Exp(strategy.Beta*(frac-1)))
case Sigmoid:
adjusted = baseline * (1 + strategy.Gamma/(1+math.Exp(-strategy.Beta*(frac-0.5))))
default:
adjusted = baseline
}
// back to wei
f := new(big.Float).Mul(big.NewFloat(adjusted), big.NewFloat(1e9))
out, _ := f.Int(nil)
return out
}
// skipSubmitByFee returns (true, nil) when submission should be skipped right now
// because the blobfee is above target and the timeout window hasnt yet elapsed.
// Otherwise returns (false, err)
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)
hist, err := r.fetchBlobFeeHistory(windowSec)
if err != nil || len(hist) == 0 {
return false, fmt.Errorf(
"blob-fee history unavailable or empty: %w (history_length=%d)",
err, len(hist),
)
}
// calculate target & get current (in wei)
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1]
// if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf(
"blob-fee above target & window not yet passed; current=%s target=%s age=%s",
current.String(), target.String(), time.Since(oldest),
)
}
// otherwise proceed with submission
return false, nil
}
func addrFromSignerConfig(config *config.SignerConfig) (common.Address, error) {
switch config.SignerType {
case sender.PrivateKeySignerType:

View File

@@ -12,14 +12,10 @@ type l2RelayerMetrics struct {
rollupL2RelayerProcessPendingBatchTotal prometheus.Counter
rollupL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal prometheus.Counter
rollupL2RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL2RelayerLastGasPrice prometheus.Gauge
rollupL2BatchesCommittedConfirmedTotal prometheus.Counter
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter
@@ -56,14 +52,6 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_layer2_process_pending_batch_err_too_many_pending_blob_txs_total",
Help: "The total number of layer2 process pending batch failed on too many pending blob txs",
}),
rollupL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
rollupL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of rollup relayer l2",
}),
rollupL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
@@ -80,14 +68,6 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_layer2_process_finalized_batches_confirmed_failed_total",
Help: "The total number of layer2 process finalized batches confirmed failed total",
}),
rollupL2UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_total",
Help: "The total number of updating layer2 gas oracle confirmed",
}),
rollupL2UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total",
Help: "The total number of updating layer2 gas oracle confirmed failed",
}),
rollupL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_chain_monitor_latest_failed_batch_call",
Help: "The total number of failed call chain_monitor api",

View File

@@ -2,7 +2,6 @@ package relayer
import (
"context"
"errors"
"math/big"
"net/http"
"strings"
@@ -14,9 +13,7 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/params"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -44,7 +41,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
@@ -64,7 +61,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
@@ -113,7 +110,7 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
batch := &encoding.Batch{
@@ -151,11 +148,12 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
proof := &message.Halo2BundleProof{
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
patchGuard := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
return nil
})
defer patchGuard.Reset()
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
assert.NoError(t, err)
@@ -181,7 +179,7 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -257,7 +255,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -313,7 +311,7 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -374,149 +372,6 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected")
}
func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
batch1 := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batchOrm := orm.NewBatch(db)
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0, rutils.BatchMetrics{})
assert.NoError(t, err)
batch2 := &encoding.Batch{
Index: batch1.Index + 1,
TotalL1MessagePoppedBefore: batch1.TotalL1MessagePoppedBefore,
ParentBatchHash: common.HexToHash(dbBatch1.Hash),
Chunks: []*encoding.Chunk{chunk2},
}
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0, rutils.BatchMetrics{})
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// Simulate message confirmations.
type BatchConfirmation struct {
batchHash string
isSuccessful bool
}
confirmations := []BatchConfirmation{
{batchHash: dbBatch1.Hash, isSuccessful: true},
{batchHash: dbBatch2.Hash, isSuccessful: false},
}
for _, confirmation := range confirmations {
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ContextID: confirmation.batchHash,
IsSuccessful: confirmation.isSuccessful,
SenderType: types.SenderTypeL2GasOracle,
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleImportedFailed}
for i, confirmation := range confirmations {
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
return false
}
}
return true
})
assert.True(t, ok)
}
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
var batchOrm *orm.Batch
convey.Convey("Failed to GetLatestBatch", t, func() {
targetErr := errors.New("GetLatestBatch error")
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
return nil, targetErr
})
defer patchGuard.Reset()
relayer.ProcessGasPriceOracle()
})
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
batch := orm.Batch{
OracleStatus: int16(types.GasOraclePending),
Hash: "0x0000000000000000000000000000000000000000",
}
return &batch, nil
})
defer patchGuard.Reset()
convey.Convey("Failed to fetch SuggestGasPrice from l2geth", t, func() {
targetErr := errors.New("SuggestGasPrice error")
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
return nil, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
return big.NewInt(100), nil
})
convey.Convey("Failed to pack setL2BaseFee", t, func() {
targetErr := errors.New("setL2BaseFee error")
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, nil
})
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
return common.Hash{}, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
return common.HexToHash("0x56789abcdef1234"), nil
})
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return nil
})
relayer.ProcessGasPriceOracle()
}
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
router := gin.New()
r := router.Group("/v1")
@@ -539,7 +394,7 @@ func testGetBatchStatusByIndex(t *testing.T) {
defer database.CloseDB(db)
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()

View File

@@ -128,8 +128,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles)
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
// test getBatchStatusByIndex
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)

View File

@@ -13,6 +13,8 @@ import (
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
@@ -34,6 +36,7 @@ type BatchProposer struct {
maxUncompressedBatchBytesSize uint64
maxChunksPerBatch int
replayMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -80,6 +83,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
maxChunksPerBatch: cfg.MaxChunksPerBatch,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -152,6 +156,14 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
return p
}
// SetReplayDB sets the replay database for the BatchProposer.
// This is used for the proposer tool only, to change the l2_block data source.
// This function is not thread-safe and should be called after initializing the BatchProposer and before starting to propose chunks.
func (p *BatchProposer) SetReplayDB(replayDB *gorm.DB) {
p.l2BlockOrm = orm.NewL2Block(replayDB)
p.replayMode = true
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
@@ -226,6 +238,15 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", dbBatch.Hash, "error", dbErr)
return dbErr
}
if p.replayMode {
// If replayMode is true, meaning the batch was proposed by the proposer tool,
// set batch status to types.RollupCommitted and assign a unique commit tx hash to enable new bundle proposals.
if dbErr = p.batchOrm.UpdateCommitTxHashAndRollupStatus(p.ctx, dbBatch.Hash, dbBatch.Hash, types.RollupCommitted, dbTX); dbErr != nil {
log.Warn("BatchProposer.UpdateCommitTxHashAndRollupStatus update the batch's commit tx hash failure", "hash", dbBatch.Hash, "error", dbErr)
return dbErr
}
}
return nil
})
if err != nil {

View File

@@ -111,6 +111,7 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL2GasPerChunk: 20000000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
@@ -206,6 +207,7 @@ func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL2GasPerChunk: 20_000_000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
@@ -292,6 +294,7 @@ func testBatchProposerBlobSizeLimitCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
@@ -387,6 +390,7 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,

View File

@@ -199,7 +199,7 @@ func (p *BundleProposer) proposeBundle() error {
currentTimeSec := uint64(time.Now().Unix())
if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec {
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "current time", currentTimeSec)
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.bundleTimeoutSec, "current time", currentTimeSec)
batches, err = p.allBatchesCommittedInSameTXIncluded(batches)
if err != nil {

View File

@@ -96,6 +96,7 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,

View File

@@ -28,6 +28,7 @@ type ChunkProposer struct {
maxBlockNumPerChunk uint64
maxTxNumPerChunk uint64
maxL2GasPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
@@ -35,6 +36,7 @@ type ChunkProposer struct {
gasCostIncreaseMultiplier float64
maxUncompressedBatchBytesSize uint64
replayMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -43,6 +45,7 @@ type ChunkProposer struct {
proposeChunkUpdateInfoTotal prometheus.Counter
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkTxNum prometheus.Gauge
chunkL2Gas prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
@@ -66,6 +69,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
log.Info("new chunk proposer",
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
"maxL2GasPerChunk", cfg.MaxL2GasPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
@@ -81,12 +85,14 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
l2BlockOrm: orm.NewL2Block(db),
maxBlockNumPerChunk: cfg.MaxBlockNumPerChunk,
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
maxL2GasPerChunk: cfg.MaxL2GasPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -114,6 +120,10 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
Name: "rollup_propose_chunk_tx_num",
Help: "The chunk tx num",
}),
chunkL2Gas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_l2_gas",
Help: "The chunk l2 gas",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_estimate_l1_commit_gas",
Help: "The chunk estimate l1 commit gas",
@@ -167,6 +177,14 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
return p
}
// SetReplayDB sets the replay database for the ChunkProposer.
// This is used for the proposer tool only, to change the l2_block data source.
// This function is not thread-safe and should be called after initializing the ChunkProposer and before starting to propose chunks.
func (p *ChunkProposer) SetReplayDB(replayDB *gorm.DB) {
p.l2BlockOrm = orm.NewL2Block(replayDB)
p.replayMode = true
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
@@ -233,9 +251,12 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "err", err)
return err
}
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber, "err", err)
return err
// In replayMode we don't need to update chunk_hash in l2_block table.
if !p.replayMode {
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
log.Error("failed to update chunk_hash for l2_block", "chunk hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber, "err", err)
return err
}
}
return nil
})
@@ -342,6 +363,7 @@ func (p *ChunkProposer) proposeChunk() error {
overEstimatedL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.TxNum > p.maxTxNumPerChunk ||
metrics.L2Gas > p.maxL2GasPerChunk ||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
@@ -356,6 +378,8 @@ func (p *ChunkProposer) proposeChunk() error {
log.Debug("breaking limit condition in chunking",
"txNum", metrics.TxNum,
"maxTxNum", p.maxTxNumPerChunk,
"l2Gas", metrics.L2Gas,
"maxL2Gas", p.maxL2GasPerChunk,
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerChunk,
"l1CommitGas", metrics.L1CommitGas,
@@ -409,6 +433,7 @@ func (p *ChunkProposer) recordAllChunkMetrics(metrics *utils.ChunkMetrics) {
p.chunkTxNum.Set(float64(metrics.TxNum))
p.maxTxConsumption.Set(float64(metrics.CrcMax))
p.chunkBlocksNum.Set(float64(metrics.NumBlocks))
p.chunkL2Gas.Set(float64(metrics.L2Gas))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
@@ -424,6 +449,12 @@ func (p *ChunkProposer) recordTimerChunkMetrics(metrics *utils.ChunkMetrics) {
}
func (p *ChunkProposer) tryProposeEuclidTransitionChunk(blocks []*encoding.Block) (bool, error) {
// If we are in replay mode, there is a corner case when StartL2Block is set as 0 in this check,
// it needs to get genesis block, but in mainnet db there is no genesis block, so we need to bypass this check.
if p.replayMode {
return false, nil
}
if !p.chainCfg.IsEuclid(blocks[0].Header.Time) {
return false, nil
}

View File

@@ -21,6 +21,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name string
maxBlockNum uint64
maxTxNum uint64
maxL2Gas uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
maxRowConsumption uint64
@@ -32,6 +33,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -42,6 +44,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "Timeout",
maxBlockNum: 100,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -53,6 +56,18 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL2GasPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10,
maxL2Gas: 0,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -63,6 +78,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxL1CommitGasPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -73,6 +89,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxL1CommitCalldataSizePerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
maxRowConsumption: 1000000,
@@ -83,6 +100,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 0,
@@ -93,6 +111,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxBlockNumPerChunkIs1",
maxBlockNum: 1,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -104,6 +123,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxTxNumPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 2,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -111,10 +131,25 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
// In this test the second block is not included in the chunk because together
// with the first block it exceeds the maxL2GasPerChunk limit.
name: "MaxL2GasPerChunkIsSecondBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 1_153_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitGasPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 62500,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
@@ -126,6 +161,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
maxRowConsumption: 1000000,
@@ -137,6 +173,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
name: "MaxRowConsumptionPerChunkIs1",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1,
@@ -158,6 +195,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxTxNumPerChunk: tt.maxTxNum,
MaxL2GasPerChunk: tt.maxL2Gas,
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
@@ -208,6 +246,7 @@ func testChunkProposerBlobSizeLimitCodecV4(t *testing.T) {
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 255,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,

View File

@@ -8,8 +8,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
@@ -86,43 +84,6 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
}
}
func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
txsData := make([]*gethTypes.TransactionData, len(txs))
for i, tx := range txs {
v, r, s := tx.RawSignatureValues()
nonce := tx.Nonce()
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
// does not have this field. Since `L1MessageTx` do not have a nonce,
// we reuse this field for storing the queue index.
if msg := tx.AsL1MessageTx(); msg != nil {
nonce = msg.QueueIndex
}
txsData[i] = &gethTypes.TransactionData{
Type: tx.Type(),
TxHash: tx.Hash().String(),
Nonce: nonce,
ChainId: (*hexutil.Big)(tx.ChainId()),
Gas: tx.Gas(),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
GasTipCap: (*hexutil.Big)(tx.GasTipCap()),
GasFeeCap: (*hexutil.Big)(tx.GasFeeCap()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
Data: hexutil.Encode(tx.Data()),
IsCreate: tx.To() == nil,
AccessList: tx.AccessList(),
AuthorizationList: tx.SetCodeAuthorizations(),
V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
}
}
return txsData
}
func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64) error {
var blocks []*encoding.Block
for number := from; number <= to; number++ {
@@ -150,7 +111,7 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
}
blocks = append(blocks, &encoding.Block{
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
Transactions: encoding.TxsToTxsData(block.Transactions()),
WithdrawRoot: common.BytesToHash(withdrawRoot),
RowConsumption: block.RowConsumption,
})

View File

@@ -0,0 +1,160 @@
package watcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
rutils "scroll-tech/rollup/internal/utils"
)
// ProposerTool is a tool for proposing chunks and bundles to the L1 chain.
type ProposerTool struct {
ctx context.Context
cancel context.CancelFunc
db *gorm.DB
dbForReplay *gorm.DB
client *ethclient.Client
chunkProposer *ChunkProposer
batchProposer *BatchProposer
bundleProposer *BundleProposer
}
// NewProposerTool creates a new ProposerTool instance.
func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config.ConfigForReplay, startL2BlockHeight uint64, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig) (*ProposerTool, error) {
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
return nil, fmt.Errorf("failed to init db connection: %w", err)
}
sqlDB, err := db.DB()
if err != nil {
return nil, fmt.Errorf("failed to get db connection: %w", err)
}
if err = migrate.ResetDB(sqlDB); err != nil {
return nil, fmt.Errorf("failed to reset db: %w", err)
}
log.Info("successfully reset db")
// Init dbForReplay connection
dbForReplay, err := database.InitDB(cfg.DBConfigForReplay)
if err != nil {
return nil, fmt.Errorf("failed to init dbForReplay connection: %w", err)
}
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to connect to L2 geth, endpoint: %s, err: %w", cfg.L2Config.Endpoint, err)
}
prevChunk, err := orm.NewChunk(dbForReplay).GetParentChunkByBlockNumber(ctx, startL2BlockHeight)
if err != nil {
return nil, fmt.Errorf("failed to get previous chunk: %w", err)
}
var startQueueIndex uint64
if prevChunk != nil {
startQueueIndex = prevChunk.TotalL1MessagesPoppedBefore + prevChunk.TotalL1MessagesPoppedInChunk
}
startBlock := uint64(0)
if prevChunk != nil {
startBlock = prevChunk.EndBlockNumber + 1
}
var chunk *encoding.Chunk
for blockNum := startBlock; blockNum <= startL2BlockHeight; blockNum++ {
block, err := client.BlockByNumber(ctx, new(big.Int).SetUint64(blockNum))
if err != nil {
return nil, fmt.Errorf("failed to get block %d: %w", blockNum, err)
}
for _, tx := range block.Transactions() {
if tx.Type() == gethTypes.L1MessageTxType {
startQueueIndex++
}
}
if blockNum == startL2BlockHeight {
chunk = &encoding.Chunk{Blocks: []*encoding.Block{{Header: block.Header()}}}
}
}
// Setting empty hash as the post_l1_message_queue_hash of the first chunk,
// i.e., treating the first L1 message after this chunk as the first L1 message in message queue v2.
// Though this setting is different from mainnet, it's simple yet sufficient for data analysis usage.
_, err = orm.NewChunk(db).InsertTestChunkForProposerTool(ctx, chunk, minCodecVersion, startQueueIndex)
if err != nil {
return nil, fmt.Errorf("failed to insert chunk, minCodecVersion: %d, startQueueIndex: %d, err: %w", minCodecVersion, startQueueIndex, err)
}
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
var dbBatch *orm.Batch
dbBatch, err = orm.NewBatch(db).InsertBatch(ctx, batch, encoding.CodecV0, rutils.BatchMetrics{})
if err != nil {
return nil, fmt.Errorf("failed to insert batch: %w", err)
}
if err = orm.NewChunk(db).UpdateBatchHashInRange(ctx, 0, 0, dbBatch.Hash); err != nil {
return nil, fmt.Errorf("failed to update batch hash for chunks: %w", err)
}
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
chunkProposer.SetReplayDB(dbForReplay)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
batchProposer.SetReplayDB(dbForReplay)
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)
return &ProposerTool{
ctx: ctx,
cancel: cancel,
db: db,
dbForReplay: dbForReplay,
client: client,
chunkProposer: chunkProposer,
batchProposer: batchProposer,
bundleProposer: bundleProposer,
}, nil
}
func (p *ProposerTool) Start() {
go utils.Loop(p.ctx, 100*time.Millisecond, p.chunkProposer.TryProposeChunk)
go utils.Loop(p.ctx, 100*time.Millisecond, p.batchProposer.TryProposeBatch)
go utils.Loop(p.ctx, 100*time.Millisecond, p.bundleProposer.TryProposeBundle)
}
func (p *ProposerTool) Stop() {
p.cancel()
if err := database.CloseDB(p.db); err != nil {
log.Error("failed to close db connection", "error", err)
}
if err := database.CloseDB(p.dbForReplay); err != nil {
log.Error("failed to close dbForReplay connection", "error", err)
}
p.client.Close()
}

View File

@@ -39,6 +39,7 @@ type Batch struct {
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"` // use for debug
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
@@ -55,10 +56,6 @@ type Batch struct {
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// gas oracle
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
@@ -124,7 +121,7 @@ func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
}
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName string) (message.BatchProof, error) {
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.OpenVMBatchProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
@@ -135,11 +132,11 @@ func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName s
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
proof := message.NewBatchProof(hardForkName)
var proof message.OpenVMBatchProof
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
return proof, nil
return &proof, nil
}
// GetLatestBatch retrieves the latest batch from the database.
@@ -165,7 +162,8 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
return latestBatch.EndChunkIndex + 1, nil
}
// GetCommittedBatchesGEIndexGECodecVersion retrieves batches that have been committed (commit_tx_hash is set) and have a batch index greater than or equal to the given index and codec version.
// GetCommittedBatchesGEIndexGECodecVersion retrieves batches that have been committed (commit_tx_hash is set) and not finalized (finalize_tx_hash is NULL).
// It returns batches that have an index greater than or equal to the given index and codec version.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetCommittedBatchesGEIndexGECodecVersion(ctx context.Context, index uint64, codecv encoding.CodecVersion, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx)
@@ -173,6 +171,7 @@ func (o *Batch) GetCommittedBatchesGEIndexGECodecVersion(ctx context.Context, in
db = db.Where("index >= ?", index)
db = db.Where("codec_version >= ?", codecv)
db = db.Where("commit_tx_hash IS NOT NULL") // only include committed batches
db = db.Where("finalize_tx_hash IS NULL") // exclude finalized batches
db = db.Order("index ASC")
if limit > 0 {
@@ -219,6 +218,18 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
return statuses, nil
}
func (o *Batch) GetFailedAndPendingBatchesCount(ctx context.Context) (int64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ? OR rollup_status = ?", types.RollupCommitFailed, types.RollupPending)
var count int64
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Batch.GetFailedAndPendingBatchesCount error: %w", err)
}
return count, nil
}
// GetFailedAndPendingBatches retrieves batches with failed or pending status up to the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetFailedAndPendingBatches(ctx context.Context, limit int) ([]*Batch, error) {
@@ -305,10 +316,10 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
PostL1MessageQueueHash: batch.PostL1MessageQueueHash.Hex(),
EnableCompress: enableCompress,
BlobBytes: batchMeta.BlobBytes,
ChallengeDigest: batchMeta.ChallengeDigest.Hex(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
@@ -329,22 +340,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
return &newBatch, nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
}
return nil
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
@@ -403,7 +398,12 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["commit_tx_hash"] = commitTxHash
updateFields["rollup_status"] = int(status)
updateFields["rollup_status"] = gorm.Expr(
`CASE
WHEN rollup_status NOT IN (?, ?) THEN ?
ELSE rollup_status
END`,
types.RollupFinalizing, types.RollupFinalized, int(status))
if status == types.RollupCommitted {
updateFields["committed_at"] = utils.NowUTC()
}
@@ -413,6 +413,7 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
@@ -443,7 +444,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
// UpdateProofByHash updates the batch proof by hash.
// for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof message.BatchProof, proofTimeSec uint64) error {
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.OpenVMBatchProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)

View File

@@ -134,7 +134,7 @@ func (o *Bundle) GetFirstPendingBundle(ctx context.Context) (*Bundle, error) {
}
// GetVerifiedProofByHash retrieves the verified aggregate proof for a bundle with the given hash.
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName string) (message.BundleProof, error) {
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.OpenVMBundleProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Select("proof")
@@ -145,11 +145,11 @@ func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
}
proof := message.NewBundleProof(hardForkName)
var proof message.OpenVMBundleProof
if err := json.Unmarshal(bundle.Proof, &proof); err != nil {
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
}
return proof, nil
return &proof, nil
}
// InsertBundle inserts a new bundle into the database.
@@ -256,7 +256,7 @@ func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status typ
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
// only used in unit tests.
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof message.BundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.OpenVMBundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]

View File

@@ -179,6 +179,25 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
return chunks, nil
}
// GetParentChunkByBlockNumber retrieves the parent chunk by block number
// only used by proposer tool for analysis usage
func (o *Chunk) GetParentChunkByBlockNumber(ctx context.Context, blockNumber uint64) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("end_block_number < ?", blockNumber)
db = db.Order("end_block_number DESC")
db = db.Limit(1)
var chunk Chunk
if err := db.First(&chunk).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Chunk.GetParentChunkByBlockNumber error: %w", err)
}
return &chunk, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics rutils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
@@ -259,6 +278,51 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
return &newChunk, nil
}
// InsertTestChunkForProposerTool inserts a new chunk into the database only for analysis usage by proposer tool.
func (o *Chunk) InsertTestChunkForProposerTool(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, totalL1MessagePoppedBefore uint64, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
chunkHash, err := rutils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
numBlocks := len(chunk.Blocks)
firstBlock := chunk.Blocks[0]
lastBlock := chunk.Blocks[numBlocks-1]
newChunk := Chunk{
Index: 0,
Hash: chunkHash.Hex(),
StartBlockNumber: firstBlock.Header.Number.Uint64(),
StartBlockHash: firstBlock.Header.Hash().Hex(),
EndBlockNumber: lastBlock.Header.Number.Uint64(),
EndBlockHash: lastBlock.Header.Hash().Hex(),
TotalL2TxGas: chunk.TotalGasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
StartBlockTime: firstBlock.Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
StateRoot: lastBlock.Header.Root.Hex(),
WithdrawRoot: lastBlock.WithdrawRoot.Hex(),
CodecVersion: int16(codecVersion),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return &newChunk, nil
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})

View File

@@ -71,6 +71,20 @@ func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}
return l1Blocks, nil
}
// GetBlobFeesInRange returns all blob_base_fee values for blocks
// with number ∈ [startBlock..endBlock], ordered by block number ascending.
func (o *L1Block) GetBlobFeesInRange(ctx context.Context, startBlock, endBlock uint64) ([]uint64, error) {
var fees []uint64
db := o.db.WithContext(ctx).
Model(&L1Block{}).
Where("number >= ? AND number <= ?", startBlock, endBlock).
Order("number ASC")
if err := db.Pluck("blob_base_fee", &fees).Error; err != nil {
return nil, fmt.Errorf("L1Block.GetBlobFeesInRange error: %w", err)
}
return fees, nil
}
// InsertL1Blocks batch inserts l1 blocks.
// If there's a block number conflict (e.g., due to reorg), soft deletes the existing block and inserts the new one.
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {

View File

@@ -292,7 +292,7 @@ func TestBatchOrm(t *testing.T) {
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1, "darwinV2")
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
@@ -300,16 +300,12 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
@@ -319,7 +315,7 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
@@ -332,9 +328,8 @@ func TestBatchOrm(t *testing.T) {
batches, err := batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(batches))
assert.Equal(t, 1, len(batches))
assert.Equal(t, batchHash1, batches[0].Hash)
assert.Equal(t, batchHash2, batches[1].Hash)
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 1)
assert.NoError(t, err)
@@ -343,8 +338,7 @@ func TestBatchOrm(t *testing.T) {
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 1, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
assert.Equal(t, batchHash2, batches[0].Hash)
assert.Equal(t, 0, len(batches))
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion+1, 0)
assert.NoError(t, err)
@@ -361,13 +355,7 @@ func TestBatchOrm(t *testing.T) {
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(batches))
assert.Equal(t, batchHash1, batches[0].Hash)
assert.Equal(t, batchHash2, batches[1].Hash)
assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus))
assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batches[1].ProvingStatus))
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(batches[1].RollupStatus))
assert.Equal(t, 0, len(batches))
}
}
@@ -463,18 +451,16 @@ func TestBundleOrm(t *testing.T) {
})
t.Run("GetVerifiedProofByHash", func(t *testing.T) {
proof := &message.Halo2BundleProof{
RawProof: []byte("test proof"),
}
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
proofBytes, err := json.Marshal(proof)
assert.NoError(t, err)
err = db.Model(&Bundle{}).Where("hash = ?", bundle1.Hash).Update("proof", proofBytes).Error
assert.NoError(t, err)
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash, "darwinV2")
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash)
assert.NoError(t, err)
assert.Equal(t, proof.RawProof, retrievedProof.Proof())
assert.Equal(t, proof.Proof(), retrievedProof.Proof())
})
t.Run("GetBundles", func(t *testing.T) {
@@ -486,9 +472,7 @@ func TestBundleOrm(t *testing.T) {
})
t.Run("UpdateProofAndProvingStatusByHash", func(t *testing.T) {
proof := &message.Halo2BundleProof{
RawProof: []byte("new test proof"),
}
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
err := bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle2.Hash, proof, types.ProvingTaskVerified, 600)
assert.NoError(t, err)
@@ -499,10 +483,10 @@ func TestBundleOrm(t *testing.T) {
assert.Equal(t, int32(600), bundle.ProofTimeSec)
assert.NotNil(t, bundle.ProvedAt)
retrievedProof := message.Halo2BundleProof{}
retrievedProof := &message.OpenVMBundleProof{}
err = json.Unmarshal(bundle.Proof, &retrievedProof)
assert.NoError(t, err)
assert.Equal(t, proof.RawProof, retrievedProof.Proof())
assert.Equal(t, proof.Proof(), retrievedProof.Proof())
})
t.Run("UpdateRollupStatus", func(t *testing.T) {

View File

@@ -12,6 +12,7 @@ import (
type ChunkMetrics struct {
NumBlocks uint64
TxNum uint64
L2Gas uint64
CrcMax uint64
FirstBlockTimestamp uint64
@@ -35,6 +36,11 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
FirstBlockTimestamp: chunk.Blocks[0].Header.Time,
}
// Get total L2 gas for chunk
for _, block := range chunk.Blocks {
metrics.L2Gas += block.Header.GasUsed
}
var err error
metrics.CrcMax, err = chunk.CrcMax()
if err != nil {
@@ -158,6 +164,7 @@ type BatchMetadata struct {
StartChunkHash common.Hash
EndChunkHash common.Hash
BlobBytes []byte
ChallengeDigest common.Hash
}
// GetBatchMetadata retrieves the metadata of a batch.
@@ -173,10 +180,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash(),
BatchBytes: daBatch.Encode(),
BlobBytes: daBatch.BlobBytes(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash(),
BatchBytes: daBatch.Encode(),
BlobBytes: daBatch.BlobBytes(),
ChallengeDigest: daBatch.ChallengeDigest(),
}
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()

View File

@@ -0,0 +1,40 @@
{
"l2_config": {
"endpoint": "https://rpc.scroll.io",
"chunk_proposer_config": {
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"max_l1_commit_gas_per_chunk": 5000000,
"max_l1_commit_calldata_size_per_chunk": 123740,
"chunk_timeout_sec": 72000000000,
"max_row_consumption_per_chunk": 10000000000,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634693
},
"batch_proposer_config": {
"max_l1_commit_gas_per_batch": 5000000,
"max_l1_commit_calldata_size_per_batch": 123740,
"batch_timeout_sec": 72000000000,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634693,
"max_chunks_per_batch": 45
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 45,
"bundle_timeout_sec": 36000000000
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://postgres:postgres@db:5432/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"db_config_for_replay": {
"driver_name": "postgres",
"dsn": "<mainnet read db config>",
"maxOpenNum": 200,
"maxIdleNum": 20
}
}

View File

@@ -0,0 +1,19 @@
{
"config": {
"chainId": 534352,
"bernoulliBlock": 0,
"curieBlock": 0,
"darwinTime": 0,
"darwinV2Time": 0,
"euclidTime": 0,
"euclidV2Time": 0
},
"nonce": "0x0000000000000033",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"gasLimit": "0x8000000",
"difficulty": "0x100",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {}
}

Some files were not shown because too many files have changed in this diff Show More