Compare commits

..

9 Commits

Author SHA1 Message Date
colin
a55c7bdc77 refactor(coordinator): remove outdated logic (#1668)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-26 14:32:39 +08:00
Alejandro Ranchal-Pedrosa
47b1a037a9 Fix bug sequencer submission strategy and log commit price (#1664)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
2025-05-23 10:02:49 +01:00
Alejandro Ranchal-Pedrosa
ae34020c34 perf(relayer): submission strategy fix logs, use blocktime for submission strategy and log metrics. (#1663)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
Co-authored-by: Jonas Theis <4181434+jonastheis@users.noreply.github.com>
2025-05-22 20:38:10 +08:00
Jonas Theis
fa9fab6e98 fix(relayer): ProcessPendingBatches (#1661)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-05-21 18:23:04 +02:00
colin
c4f869a33a refactor(sender): remove fallback gas limit (#1662) 2025-05-21 22:02:27 +08:00
colin
0cee9a51e6 refactor(rollup-relayer): simplify logic post-Euclid (#1658)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-21 17:59:49 +08:00
colin
97de988228 feat: coordinator and prover support v0.4.2 (#1660)
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
2025-05-20 14:10:52 +08:00
Alejandro Ranchal-Pedrosa
a12175dafc perf(relayer): add sequencer submission strategy with blob‐fee history and target price (#1659)
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-05-19 22:32:28 +02:00
colin
fedfa04c2b refactor(coordinator): simplify logic post-Euclid (#1652) 2025-05-16 23:37:52 +08:00
97 changed files with 2860 additions and 10629 deletions

View File

@@ -42,15 +42,11 @@ jobs:
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
# - name: Lint
# working-directory: 'common'
# run: |
# rm -rf $HOME/.cache/golangci-lint
# make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest

View File

@@ -307,13 +307,48 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Run custom script
- name: Setup SSH for repositories and clone them
run: |
./build/dockerfiles/coordinator-api/init-openvm.sh
mkdir -p ~/.ssh
chmod 700 ~/.ssh
# Setup for plonky3-gpu
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
chmod 600 ~/.ssh/plonky3_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
echo "Loaded plonky3-gpu key"
# Clone plonky3-gpu repository
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
# Setup for openvm-stark-gpu
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
chmod 600 ~/.ssh/openvm_stark_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
echo "Loaded openvm-stark-gpu key"
# Clone openvm-stark-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
# Setup for openvm-gpu
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
chmod 600 ~/.ssh/openvm_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
echo "Loaded openvm-gpu key"
# Clone openvm-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
# Show number of loaded keys
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
- name: Checkout specific commits
run: |
./build/dockerfiles/coordinator-api/checkout_all.sh
- name: Build and push
uses: docker/build-push-action@v3
env:

View File

@@ -1,99 +0,0 @@
name: Prover
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'prover'
jobs:
skip_check:
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
cancel_others: 'true'
concurrent_skipping: 'same_content_newer'
paths_ignore: '["**/README.md"]'
fmt:
needs: [skip_check]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: rustfmt
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Cargo check
run: cargo check --all-features
- name: Cargo fmt
run: cargo fmt --all -- --check
clippy:
needs: [skip_check, fmt]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: clippy
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Run clippy
run: cargo clippy --all-features --all-targets -- -D warnings
compile:
needs: [skip_check, clippy]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Test
run: |
make prover

View File

@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -9,7 +9,9 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
# run scripts to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
@@ -20,7 +22,7 @@ RUN cargo build --release
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
@@ -40,7 +42,7 @@ COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/log
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353

View File

@@ -0,0 +1,17 @@
#!/bin/bash
set -uex
PLONKY3_GPU_COMMIT=261b322 # v0.2.0
OPENVM_STARK_GPU_COMMIT=3082234 # PR#48
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout plonky3-gpu
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
# checkout openvm-stark-gpu
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
# checkout openvm-gpu
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -1,12 +1,10 @@
#!/bin/bash
set -uex
OPENVM_GPU_COMMIT=dfa10b4
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout openvm-gpu
# clone openvm-gpu if not exists
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}
cd $DIR/openvm-gpu && git fetch --all --force

View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-stark-gpu if not exists
if [ ! -d $DIR/openvm-stark-gpu ]; then
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
fi
cd $DIR/openvm-stark-gpu && git fetch --all --force

View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone plonky3-gpu if not exists
if [ ! -d $DIR/plonky3-gpu ]; then
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
fi
cd $DIR/plonky3-gpu && git fetch --all --force

View File

@@ -19,6 +19,74 @@ openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler",
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
# stark-backend
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
# plonky3
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
# gpu crates
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,8 @@ ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-verifier" }
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"
@@ -32,3 +32,35 @@ opt-level = 3
[profile.release]
opt-level = 3
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.0" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }

View File

@@ -26,12 +26,6 @@ pub unsafe extern "C" fn verify_chunk_proof(
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
// Skip verification for darwinV2 as we can't host darwinV2 and euclid verifiers on the same
// binary.
if fork_name_str == "darwinV2" {
return true as c_char;
}
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);

View File

@@ -1,10 +1,7 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
@@ -31,39 +28,25 @@ pub trait ProofVerifier {
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub fork_name: String,
pub params_path: String,
pub assets_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub low_version_circuit: CircuitConfig,
pub high_version_circuit: CircuitConfig,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
@@ -72,12 +55,6 @@ pub fn init(config: VerifierConfig) {
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -14,7 +14,6 @@ const (
EuclidFork = "euclid"
EuclidV2Fork = "euclidV2"
EuclidForkNameForProver = "euclidv1"
EuclidV2ForkNameForProver = "euclidv2"
)
@@ -99,22 +98,22 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
}
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
BatchProofs []BatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
}
// ChunkInfo is for calculating pi_hash for chunk
@@ -143,157 +142,6 @@ type BlockContextV2 struct {
NumL1Msgs uint16 `json:"num_l1_msgs"`
}
// SubCircuitRowUsage tracing info added in v0.11.0rc8
type SubCircuitRowUsage struct {
Name string `json:"name"`
RowNumber uint64 `json:"row_number"`
}
// ChunkProof
type ChunkProof interface {
Proof() []byte
}
// NewChunkProof creates a new ChunkProof instance.
func NewChunkProof(hardForkName string) ChunkProof {
switch hardForkName {
case EuclidFork, EuclidV2Fork:
return &OpenVMChunkProof{}
default:
return &Halo2ChunkProof{}
}
}
// Halo2ChunkProof includes the proof info that are required for chunk verification and rollup.
type Halo2ChunkProof struct {
StorageTrace []byte `json:"storage_trace,omitempty"`
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
}
// Proof returns the proof bytes of a ChunkProof
func (ap *Halo2ChunkProof) Proof() []byte {
return ap.RawProof
}
// BatchProof
type BatchProof interface {
SanityCheck() error
Proof() []byte
}
// NewBatchProof creates a new BatchProof instance.
func NewBatchProof(hardForkName string) BatchProof {
switch hardForkName {
case EuclidFork, EuclidV2Fork:
return &OpenVMBatchProof{}
default:
return &Halo2BatchProof{}
}
}
// Halo2BatchProof includes the proof info that are required for batch verification and rollup.
type Halo2BatchProof struct {
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
BatchHash common.Hash `json:"batch_hash"`
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BatchProof
func (ap *Halo2BatchProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BatchProof is in a legal format
func (ap *Halo2BatchProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// BundleProof
type BundleProof interface {
SanityCheck() error
Proof() []byte
}
// NewBundleProof creates a new BundleProof instance.
func NewBundleProof(hardForkName string) BundleProof {
switch hardForkName {
case EuclidFork, EuclidV2Fork:
return &OpenVMBundleProof{}
default:
return &Halo2BundleProof{}
}
}
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
type Halo2BundleProof struct {
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BundleProof
func (ap *Halo2BundleProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BundleProof is in a legal format
func (ap *Halo2BundleProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// Proof for flatten VM proof
type OpenVMProof struct {
Proof []byte `json:"proofs"`

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.9"
var tag = "v4.5.18"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -90,17 +90,9 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{
MockMode: true,
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwin",
MinProverVersion: "v4.4.57",
},
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwinV2",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
},
},

View File

@@ -62,14 +62,14 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
}
var batchProofs []message.BatchProof
var batchProofs []*message.OpenVMBatchProof
for _, batch := range batches {
proof := message.NewBatchProof("darwinV2")
var proof message.OpenVMBatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
log.Error("failed to unmarshal batch proof")
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{

View File

@@ -7,17 +7,9 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"low_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwin",
"min_prover_version": "v4.4.43"
},
"high_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwinV2",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
}

View File

@@ -51,7 +51,6 @@ type Config struct {
// CircuitConfig circuit items.
type CircuitConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
@@ -59,8 +58,6 @@ type CircuitConfig struct {
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
MockMode bool `json:"mock_mode"`
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
}

View File

@@ -15,15 +15,18 @@ func TestConfig(t *testing.T) {
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",
"agg_vk_path": ""
"high_version_circuit": {
"assets_path": "assets",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
},
"max_verifier_workers": 4,
"min_prover_version": "v1.0.0"
"max_verifier_workers": 4
},
"db": {
"driver_name": "postgres",

View File

@@ -26,7 +26,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap, "openVmVerifier", vf.OpenVMVkMap)
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)

View File

@@ -9,7 +9,6 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
@@ -22,9 +21,6 @@ import (
type LoginLogic struct {
cfg *config.Config
challengeOrm *orm.Challenge
chunkVks map[string]struct{}
batchVKs map[string]struct{}
bundleVks map[string]struct{}
openVmVks map[string]struct{}
@@ -34,28 +30,13 @@ type LoginLogic struct {
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
panic("verifier config file error")
}
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidFork && cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidV2Fork {
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
}
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
return &LoginLogic{
cfg: cfg,
chunkVks: vf.ChunkVKMap,
batchVKs: vf.BatchVKMap,
bundleVks: vf.BundleVkMap,
openVmVks: vf.OpenVMVkMap,
challengeOrm: orm.NewChallenge(db),
proverVersionHardForkMap: proverVersionHardForkMap,
@@ -75,49 +56,25 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
return errors.New("auth message verify failure")
}
// FIXME: for backward compatibility, set prover version as darwin prover version,
// change v4.4.56 to l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion after Euclid upgrade, including the log.
// hardcode the prover version because l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion is used in another check and should be set as v4.4.89 for darwinV2 provers.
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, "v4.4.56") {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
"v4.4.56", login.Message.ProverVersion)
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
}
if len(login.Message.ProverTypes) > 0 {
vks := make(map[string]struct{})
for _, proverType := range login.Message.ProverTypes {
switch proverType {
case types.ProverTypeChunk:
for vk := range l.chunkVks {
vks[vk] = struct{}{}
}
case types.ProverTypeBatch:
for vk := range l.batchVKs {
vks[vk] = struct{}{}
}
for vk := range l.bundleVks {
vks[vk] = struct{}{}
}
case types.ProverTypeOpenVM:
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
default:
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
}
}
vks := make(map[string]struct{})
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}
@@ -142,12 +99,6 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
}
proverVersion := proverVersionSplits[0]
// allowing darwin provers to login, because darwin provers can prove darwinV2 chunk tasks
if proverVersion == "v4.4.56" {
return "darwin", nil
}
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
return strings.Join(hardForkNames, ","), nil
}

View File

@@ -197,34 +197,27 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
}
var chunkProofs []message.ChunkProof
var chunkProofs []*message.OpenVMChunkProof
var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
proof := message.NewChunkProof(hardForkName)
var proof message.OpenVMChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, proof)
chunkProofs = append(chunkProofs, &proof)
chunkInfo := message.ChunkInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
IsPadding: false,
}
if halo2Proof, ok := proof.(*message.Halo2ChunkProof); ok {
if halo2Proof.ChunkInfo != nil {
chunkInfo.TxBytes = halo2Proof.ChunkInfo.TxBytes
}
}
if openvmProof, ok := proof.(*message.OpenVMChunkProof); ok {
chunkInfo.InitialBlockNumber = openvmProof.MetaData.ChunkInfo.InitialBlockNumber
chunkInfo.BlockCtxs = openvmProof.MetaData.ChunkInfo.BlockCtxs
chunkInfo.TxDataLength = openvmProof.MetaData.ChunkInfo.TxDataLength
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
IsPadding: false,
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
@@ -258,7 +251,7 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
}
}
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
@@ -266,8 +259,9 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else if hardForkName == message.EuclidFork {
taskDetail.ForkName = message.EuclidForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)

View File

@@ -200,13 +200,13 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
var batchProofs []message.BatchProof
var batchProofs []*message.OpenVMBatchProof
for _, batch := range batches {
proof := message.NewBatchProof(hardForkName)
var proof message.OpenVMBatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{
@@ -215,8 +215,9 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else if hardForkName == message.EuclidFork {
taskDetail.ForkName = message.EuclidForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
}
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
@@ -227,10 +228,7 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
NumBatches: uint32(len(batches)),
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
}
if hardForkName == message.EuclidV2Fork {
taskDetail.BundleInfo.MsgQueueHash = common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash)
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
}
batchProofsBytes, err := json.Marshal(taskDetail)

View File

@@ -195,8 +195,9 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else if hardForkName == message.EuclidFork {
taskDetail.ForkName = message.EuclidForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
}
var err error

View File

@@ -121,11 +121,6 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
return "", getHardForkErr
}
// for backward compatibility, darwin chunk prover can still prove darwinV2 chunk tasks
if taskCtx.taskType == message.ProofTypeChunk && hardForkName == "darwinV2" && strings.HasPrefix(taskCtx.ProverVersion, "v4.4.56") {
return hardForkName, nil
}
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
}

View File

@@ -171,19 +171,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
chunkProof := message.NewChunkProof(hardForkName)
chunkProof := &message.OpenVMChunkProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
case message.ProofTypeBatch:
batchProof := message.NewBatchProof(hardForkName)
batchProof := &message.OpenVMBatchProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
case message.ProofTypeBundle:
bundleProof := message.NewBundleProof(hardForkName)
bundleProof := &message.OpenVMBundleProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
return unmarshalErr
}

View File

@@ -10,31 +10,26 @@ import (
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]struct{}{"mock_vk": {}}
chunkVKMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBundleProof return a mock verification result for a BundleProof.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
return true, nil
}

View File

@@ -7,11 +7,8 @@ import (
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
// Verifier represents a rust ffi to a verifier.
type Verifier struct {
cfg *config.VerifierConfig
ChunkVKMap map[string]struct{}
BatchVKMap map[string]struct{}
BundleVkMap map[string]struct{}
OpenVMVkMap map[string]struct{}
}

View File

@@ -30,14 +30,12 @@ import (
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
ForkName string `json:"fork_name"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
return &rustCircuitConfig{
ForkName: cfg.ForkName,
ParamsPath: cfg.ParamsPath,
AssetsPath: cfg.AssetsPath,
}
}
@@ -46,13 +44,11 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
}
}
@@ -65,19 +61,6 @@ type rustVkDump struct {
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
chunkVKMap := map[string]struct{}{"mock_vk": {}}
batchVKMap := map[string]struct{}{"mock_vk": {}}
bundleVKMap := map[string]struct{}{"mock_vk": {}}
openVMVkMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{
cfg: cfg,
ChunkVKMap: chunkVKMap,
BatchVKMap: batchVKMap,
BundleVkMap: bundleVKMap,
OpenVMVkMap: openVMVkMap,
}, nil
}
verifierConfig := newRustVerifierConfig(cfg)
configBytes, err := json.Marshal(verifierConfig)
if err != nil {
@@ -93,16 +76,9 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]struct{}),
BatchVKMap: make(map[string]struct{}),
BundleVkMap: make(map[string]struct{}),
OpenVMVkMap: make(map[string]struct{}),
}
if err := v.loadLowVersionVKs(cfg); err != nil {
return nil, err
}
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
return nil, err
}
@@ -111,21 +87,11 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return nil, err
}
v.loadDarwinVKs()
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
@@ -143,16 +109,8 @@ func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (
return verified != 0, nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
@@ -171,15 +129,7 @@ func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
@@ -209,32 +159,6 @@ func (v *Verifier) readVK(filePat string) (string, error) {
return base64.StdEncoding.EncodeToString(byt), nil
}
// load low version vks, current is darwin
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
if err != nil {
return err
}
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
if err != nil {
return err
}
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
if err != nil {
return err
}
v.BundleVkMap[bundleVK] = struct{}{}
v.BatchVKMap[batchVK] = struct{}{}
v.ChunkVKMap[chunkVK] = struct{}{}
return nil
}
func (v *Verifier) loadDarwinVKs() {
v.BundleVkMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD5dsp1rEy7PSqiIFikkkOPqKokLW2mZSwCbtKdkfLQcvTxARUwHSe4iZe27PRJ5WWaLqtRV1+x6+pSVKtcPtaV4kE7v2YJRf0582hxiAF0IBaOoREdpyNfA2a9cvhWb2TMaPrUYP9EDQ7CUiW1FQzxbjGc95ua2htscnpU7d9S5stHWzKb7okkCG7bTIL9aG6qTQo2YXW7n3H3Ir47oVJB7IKrUzKGvI5Wmanh2zpZOJ9Qm4/wY24cT7cJz+Ux6wAg=="] = struct{}{}
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD1DEjW4Kell67H07wazT5DdzrSh4+amh+cmosQHp9p9snFypyoBGt3UHtoJGQBZlywZWDS9ht5pnaEoGBdaKcQk+lFb+WxTiId0KOAa0mafTZTQw8yToy57Jple64qzlRu1dux30tZZGuerLN1CKzg5Xl2iOpMK+l87jCINwVp5cUtF/XrvhBbU7onKh3KBiy99iUqVyA3Y6iiIZhGKWBSuSA4bNgDYIoVkqjHpdL35aEShoRO6pNXt7rDzxFoPzH0JuPI54nE4OhVrzZXwtkAEosxVa/fszcE092FH+HhhtxZBYe/KEzwdISU9TOPdId3UF/UMYC0MiYOlqffVTgAg="] = struct{}{}
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
}
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {

View File

@@ -29,17 +29,9 @@ func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
MockMode: false,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathLo,
ForkName: "darwin",
MinProverVersion: "",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathHi,
ForkName: "darwinV2",
ForkName: "euclidV2",
MinProverVersion: "",
},
}
@@ -48,43 +40,43 @@ func TestFFI(t *testing.T) {
as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as)
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
as.NoError(err)
as.True(chunkOk1)
t.Log("Verified chunk proof 1")
chunkProof2 := readChunkProof(*chunkProofPath2, as)
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
as.NoError(err)
as.True(chunkOk2)
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) types.BatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2BatchProof{}
proof := &types.OpenVMBatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) types.ChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2ChunkProof{}
proof := &types.OpenVMChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -22,7 +22,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
ProverVersion: "v0.0.1",
Challenge: "abcdef",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeBatch},
ProverTypes: []ProverType{ProverTypeOpenVM},
VKs: []string{"vk1", "vk2"},
},
PublicKey: publicKeyHex,
@@ -64,7 +64,7 @@ func TestGenerateSignature(t *testing.T) {
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeChunk},
ProverTypes: []ProverType{ProverTypeOpenVM},
VKs: []string{"mock_vk"},
},
PublicKey: publicKeyHex,

View File

@@ -2,7 +2,6 @@ package types
import (
"fmt"
"scroll-tech/common/types/message"
)
@@ -21,10 +20,10 @@ type ProverType uint8
func (r ProverType) String() string {
switch r {
case ProverTypeChunk:
return "prover type chunk"
case ProverTypeBatch:
return "prover type batch"
case ProverTypeChunkDeprecated:
return "prover type chunk (deprecated)"
case ProverTypeBatchDeprecated:
return "prover type batch (deprecated)"
case ProverTypeOpenVM:
return "prover type openvm"
default:
@@ -35,10 +34,10 @@ func (r ProverType) String() string {
const (
// ProverTypeUndefined is an unknown prover type
ProverTypeUndefined ProverType = iota
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
ProverTypeChunk
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
ProverTypeBatch
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks, which is deprecated
ProverTypeChunkDeprecated
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks, which is deprecated
ProverTypeBatchDeprecated
// ProverTypeOpenVM
ProverTypeOpenVM
)
@@ -47,9 +46,9 @@ const (
func MakeProverType(proofType message.ProofType) ProverType {
switch proofType {
case message.ProofTypeChunk:
return ProverTypeChunk
return ProverTypeChunkDeprecated
case message.ProofTypeBatch, message.ProofTypeBundle:
return ProverTypeBatch
return ProverTypeBatchDeprecated
default:
return ProverTypeUndefined
}

View File

@@ -67,7 +67,7 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
db, err = testApps.GetGormDBClient()
@@ -84,17 +84,9 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
MockMode: true,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "homestead",
MinProverVersion: "v4.4.57",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "bernoulli",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
},
},
@@ -109,20 +101,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
},
}
var chainConf params.ChainConfig
for _, forkName := range forks {
switch forkName {
case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(100)
case "homestead":
chainConf.HomesteadBlock = big.NewInt(0)
}
}
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
router := gin.New()
api.InitController(conf, &chainConf, db, nil)
api.InitController(conf, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
route.Route(router, conf, nil)
srv := &http.Server{
Addr: coordinatorURL,
@@ -142,7 +131,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
func setEnv(t *testing.T) {
var err error
version.Version = "v4.4.57"
version.Version = "v4.4.89"
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
@@ -198,7 +187,7 @@ func TestApis(t *testing.T) {
func testHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -211,7 +200,7 @@ func testHandshake(t *testing.T) {
func testFailedHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
}()
@@ -229,7 +218,7 @@ func testFailedHandshake(t *testing.T) {
func testGetTaskBlocked(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -273,7 +262,7 @@ func testGetTaskBlocked(t *testing.T) {
func testOutdatedProverVersion(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -285,12 +274,12 @@ func testOutdatedProverVersion(t *testing.T) {
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", chunkProver.proverVersion)
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", batchProver.proverVersion)
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
@@ -298,7 +287,7 @@ func testOutdatedProverVersion(t *testing.T) {
func testValidProof(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -381,7 +370,7 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -469,7 +458,7 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -570,7 +559,7 @@ func testProofGeneratedFailed(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -593,7 +582,9 @@ func testTimeoutProof(t *testing.T) {
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
assert.NoError(t, err)
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)

View File

@@ -207,14 +207,16 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
var proof []byte
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
encodeData, err := json.Marshal(message.Halo2BatchProof{})
case message.ProofTypeBatch:
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
@@ -223,16 +225,14 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
if proofStatus == verifiedFailed {
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
chunkProof := message.Halo2ChunkProof{}
chunkProof.RawProof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&chunkProof)
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
batchProof := message.Halo2BatchProof{}
batchProof.RawProof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&batchProof)
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData

View File

@@ -1357,8 +1357,7 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6yba
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=

5854
prover/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,50 +0,0 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
anyhow = "1.0"
log = "0.4"
env_logger = "0.11.3"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
prover_darwin_v2 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "160db6c"}
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"

View File

@@ -1,48 +0,0 @@
.PHONY: prover lint tests_binary
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
ifeq (${ZKEVM_VERSION},)
$(error ZKEVM_VERSION not set)
else
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
endif
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
ifeq (${GO_TAG},)
$(error GO_TAG not set)
else
$(info GO_TAG is ${GO_TAG})
endif
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
endif
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
tests_binary:
cargo clean && cargo test --release --no-run
ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
lint:
cargo check --all-features
cargo clippy --all-features --all-targets -- -D warnings
cargo fmt --all

View File

@@ -1,30 +0,0 @@
{
"sdk_config": {
"prover_name_prefix": "prover-1",
"keys_dir": "keys",
"coordinator": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999"
},
"prover": {
"circuit_types": [1,2,3],
"circuit_version": "v0.13.1"
},
"db_path": "unique-db-path-for-prover-1"
},
"low_version_circuit": {
"hard_fork_name": "darwin",
"params_path": "params",
"assets_path": "assets"
},
"high_version_circuit": {
"hard_fork_name": "darwinV2",
"params_path": "params",
"assets_path": "assets"
}
}

View File

@@ -1,21 +0,0 @@
#!/bin/bash
config_file="$HOME/.cargo/config"
if [ ! -e "$config_file" ]; then
exit 0
fi
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
exit 0
fi
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
pushd $halo2gpu_path
commit_hash=$(git log --pretty=format:%h -n 1)
echo "${commit_hash:0:7}"
popd

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -ue
higher_zkevm_item=`grep "zkevm-circuits.git" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
higher_version=`echo $higher_zkevm_item | awk '{print $1}'`
higher_commit=`echo $higher_zkevm_item | cut -d ' ' -f2 | cut -c-7`
echo "$higher_version $higher_commit"

View File

@@ -1 +0,0 @@
nightly-2023-12-03

View File

@@ -1,9 +0,0 @@
edition = "2021"
comment_width = 100
imports_granularity = "Crate"
max_width = 100
newline_style = "Unix"
# normalize_comments = true
reorder_imports = true
wrap_comments = true

View File

@@ -1,51 +0,0 @@
use anyhow::{bail, Result};
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
#[derive(Debug)]
pub struct AssetsDirEnvConfig {}
impl AssetsDirEnvConfig {
pub fn init() -> Result<()> {
let value = std::env::var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME)?;
let dirs: Vec<&str> = value.split(',').collect();
if dirs.len() != 2 {
bail!("env variable SCROLL_PROVER_ASSETS_DIR value must be 2 parts seperated by comma.")
}
unsafe {
SCROLL_PROVER_ASSETS_DIRS = dirs.into_iter().map(|s| s.to_string()).collect();
log::info!(
"init SCROLL_PROVER_ASSETS_DIRS: {:?}",
SCROLL_PROVER_ASSETS_DIRS
);
}
Ok(())
}
pub fn enable_first() {
unsafe {
log::info!(
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
&SCROLL_PROVER_ASSETS_DIRS[0]
);
std::env::set_var(
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
&SCROLL_PROVER_ASSETS_DIRS[0],
);
}
}
pub fn enable_second() {
unsafe {
log::info!(
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
&SCROLL_PROVER_ASSETS_DIRS[1]
);
std::env::set_var(
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
&SCROLL_PROVER_ASSETS_DIRS[1],
);
}
}
}

View File

@@ -1,76 +0,0 @@
#![feature(lazy_cell)]
#![feature(core_intrinsics)]
mod config;
mod prover;
mod types;
mod utils;
mod zk_circuits_handler;
use clap::{ArgAction, Parser};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::ProverBuilder,
utils::{get_version, init_tracing},
};
use tokio::runtime;
use utils::get_prover_type;
#[derive(Parser, Debug)]
#[command(disable_version_flag = true)]
struct Args {
/// Path of config file
#[arg(long = "config", default_value = "conf/config.json")]
config_file: String,
/// Version of this prover
#[arg(short, long, action = ArgAction::SetTrue)]
version: bool,
/// Path of log file
#[arg(long = "log.file")]
log_file: Option<String>,
}
fn main() -> anyhow::Result<()> {
let rt = runtime::Builder::new_multi_thread()
.thread_stack_size(16 * 1024 * 1024) // Set stack size to 16MB
.enable_all()
.build()
.expect("Failed to create Tokio runtime");
rt.block_on(async {
init_tracing();
let args = Args::parse();
if args.version {
println!("version is {}", get_version());
std::process::exit(0);
}
let cfg = LocalProverConfig::from_file(args.config_file)?;
let sdk_config = cfg.sdk_config.clone();
let mut prover_types = vec![];
sdk_config
.prover
.circuit_types
.iter()
.for_each(|circuit_type| {
if let Some(pt) = get_prover_type(*circuit_type) {
if !prover_types.contains(&pt) {
prover_types.push(pt);
}
}
});
let local_prover = LocalProver::new(cfg, prover_types);
let prover = ProverBuilder::new(sdk_config)
.with_proving_service(Box::new(local_prover))
.build()
.await?;
prover.run().await;
Ok(())
})
}

View File

@@ -1,192 +0,0 @@
use crate::{
types::ProverType,
utils::get_prover_type,
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
};
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
ProvingService,
},
};
use serde::{Deserialize, Serialize};
use std::{
fs::File,
sync::{Arc, Mutex},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::RwLock, task::JoinHandle};
#[derive(Clone, Serialize, Deserialize)]
pub struct LocalProverConfig {
pub sdk_config: SdkConfig,
pub high_version_circuit: CircuitConfig,
pub low_version_circuit: CircuitConfig,
}
impl LocalProverConfig {
pub fn from_reader<R>(reader: R) -> Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
}
pub fn from_file(file_name: String) -> Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CircuitConfig {
pub hard_fork_name: String,
pub params_path: String,
pub assets_path: String,
}
pub struct LocalProver {
config: LocalProverConfig,
prover_types: Vec<ProverType>,
circuits_handler_provider: RwLock<CircuitsHandlerProvider>,
next_task_id: Arc<Mutex<u64>>,
current_task: Arc<Mutex<Option<JoinHandle<Result<String>>>>>,
}
#[async_trait]
impl ProvingService for LocalProver {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
let mut prover_types = vec![];
req.circuit_types.iter().for_each(|circuit_type| {
if let Some(pt) = get_prover_type(*circuit_type) {
if !prover_types.contains(&pt) {
prover_types.push(pt);
}
}
});
let vks = self
.circuits_handler_provider
.read()
.await
.init_vks(&self.config, prover_types)
.await;
GetVkResponse { vks, error: None }
}
async fn prove(&self, req: ProveRequest) -> ProveResponse {
let handler = self
.circuits_handler_provider
.write()
.await
.get_circuits_handler(&req.hard_fork_name, self.prover_types.clone())
.expect("failed to get circuit handler");
match self.do_prove(req, handler).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to request proof: {}", e)),
..Default::default()
},
}
}
async fn query_task(&self, req: QueryTaskRequest) -> QueryTaskResponse {
let handle = self.current_task.lock().unwrap().take();
if let Some(handle) = handle {
if handle.is_finished() {
return match handle.await {
Ok(Ok(proof)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Success,
proof: Some(proof),
..Default::default()
},
Ok(Err(e)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
};
} else {
*self.current_task.lock().unwrap() = Some(handle);
return QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Proving,
..Default::default()
};
}
}
// If no handle is found
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("no proving task is running".to_string()),
..Default::default()
}
}
}
impl LocalProver {
pub fn new(config: LocalProverConfig, prover_types: Vec<ProverType>) -> Self {
let circuits_handler_provider = CircuitsHandlerProvider::new(config.clone())
.expect("failed to create circuits handler provider");
Self {
config,
prover_types,
circuits_handler_provider: RwLock::new(circuits_handler_provider),
next_task_id: Arc::new(Mutex::new(0)),
current_task: Arc::new(Mutex::new(None)),
}
}
async fn do_prove(
&self,
req: ProveRequest,
handler: Arc<Box<dyn CircuitsHandler>>,
) -> Result<ProveResponse> {
let task_id = {
let mut next_task_id = self.next_task_id.lock().unwrap();
*next_task_id += 1;
*next_task_id
};
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
let req_clone = req.clone();
let handle = Handle::current();
let task_handle =
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
*self.current_task.lock().unwrap() = Some(task_handle);
Ok(ProveResponse {
task_id: task_id.to_string(),
circuit_type: req.circuit_type,
circuit_version: req.circuit_version,
hard_fork_name: req.hard_fork_name,
status: TaskStatus::Proving,
created_at,
input: Some(req.input),
..Default::default()
})
}
}

View File

@@ -1,153 +0,0 @@
use ethers_core::types::H256;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use scroll_proving_sdk::prover::types::CircuitType;
pub type CommonHash = H256;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ProverType {
Chunk,
Batch,
}
impl ProverType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProverType::Chunk,
2 => ProverType::Batch,
_ => {
panic!("invalid prover_type")
}
}
}
}
impl Serialize for ProverType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProverType::Chunk => serializer.serialize_u8(1),
ProverType::Batch => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProverType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProverType::from_u8(v))
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
#[serde(rename = "type", default)]
pub task_type: CircuitType,
pub task_data: String,
#[serde(default)]
pub hard_fork_name: String,
}
#[derive(Serialize, Deserialize, Default)]
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: CircuitType,
pub proof_data: String,
pub error: String,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofFailureType {
Undefined,
Panic,
NoPanic,
}
impl ProofFailureType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofFailureType::Panic,
2 => ProofFailureType::NoPanic,
_ => ProofFailureType::Undefined,
}
}
}
impl Serialize for ProofFailureType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofFailureType::Undefined => serializer.serialize_u8(0),
ProofFailureType::Panic => serializer.serialize_u8(1),
ProofFailureType::NoPanic => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofFailureType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofFailureType::from_u8(v))
}
}
impl Default for ProofFailureType {
fn default() -> Self {
Self::Undefined
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofStatus {
Ok,
Error,
}
impl ProofStatus {
fn from_u8(v: u8) -> Self {
match v {
0 => ProofStatus::Ok,
_ => ProofStatus::Error,
}
}
}
impl Serialize for ProofStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofStatus::Ok => serializer.serialize_u8(0),
ProofStatus::Error => serializer.serialize_u8(1),
}
}
}
impl<'de> Deserialize<'de> for ProofStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofStatus::from_u8(v))
}
}
impl Default for ProofStatus {
fn default() -> Self {
Self::Ok
}
}

View File

@@ -1,18 +0,0 @@
use crate::types::ProverType;
use scroll_proving_sdk::prover::types::CircuitType;
pub fn get_circuit_types(prover_type: ProverType) -> Vec<CircuitType> {
match prover_type {
ProverType::Chunk => vec![CircuitType::Chunk],
ProverType::Batch => vec![CircuitType::Batch, CircuitType::Bundle],
}
}
pub fn get_prover_type(task_type: CircuitType) -> Option<ProverType> {
match task_type {
CircuitType::Undefined => None,
CircuitType::Chunk => Some(ProverType::Chunk),
CircuitType::Batch => Some(ProverType::Batch),
CircuitType::Bundle => Some(ProverType::Batch),
}
}

View File

@@ -1,165 +0,0 @@
mod common;
mod darwin;
mod darwin_v2;
use crate::{
config::AssetsDirEnvConfig, prover::LocalProverConfig, types::ProverType,
utils::get_circuit_types,
};
use anyhow::{bail, Result};
use async_trait::async_trait;
use darwin::DarwinHandler;
use darwin_v2::DarwinV2Handler;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
use std::{collections::HashMap, sync::Arc};
type HardForkName = String;
pub mod utils {
pub fn encode_vk(vk: Vec<u8>) -> String {
base64::encode(vk)
}
}
#[async_trait]
pub trait CircuitsHandler: Send + Sync {
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>>;
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
}
type CircuitsHandlerBuilder = fn(
prover_types: Vec<ProverType>,
config: &LocalProverConfig,
) -> Result<Box<dyn CircuitsHandler>>;
pub struct CircuitsHandlerProvider {
config: LocalProverConfig,
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
current_fork_name: Option<HardForkName>,
current_circuit: Option<Arc<Box<dyn CircuitsHandler>>>,
}
impl CircuitsHandlerProvider {
pub fn new(config: LocalProverConfig) -> Result<Self> {
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
if let Err(e) = AssetsDirEnvConfig::init() {
panic!("AssetsDirEnvConfig init failed: {:#}", e);
}
fn handler_builder(
prover_types: Vec<ProverType>,
config: &LocalProverConfig,
) -> Result<Box<dyn CircuitsHandler>> {
log::info!(
"now init zk circuits handler, hard_fork_name: {}",
&config.low_version_circuit.hard_fork_name
);
AssetsDirEnvConfig::enable_first();
DarwinHandler::new(
prover_types,
&config.low_version_circuit.params_path,
&config.low_version_circuit.assets_path,
)
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
}
m.insert(
config.low_version_circuit.hard_fork_name.clone(),
handler_builder,
);
fn next_handler_builder(
prover_types: Vec<ProverType>,
config: &LocalProverConfig,
) -> Result<Box<dyn CircuitsHandler>> {
log::info!(
"now init zk circuits handler, hard_fork_name: {}",
&config.high_version_circuit.hard_fork_name
);
AssetsDirEnvConfig::enable_second();
DarwinV2Handler::new(
prover_types,
&config.high_version_circuit.params_path,
&config.high_version_circuit.assets_path,
)
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
}
m.insert(
config.high_version_circuit.hard_fork_name.clone(),
next_handler_builder,
);
let provider = CircuitsHandlerProvider {
config,
circuits_handler_builder_map: m,
current_fork_name: None,
current_circuit: None,
};
Ok(provider)
}
pub fn get_circuits_handler(
&mut self,
hard_fork_name: &String,
prover_types: Vec<ProverType>,
) -> Result<Arc<Box<dyn CircuitsHandler>>> {
match &self.current_fork_name {
Some(fork_name) if fork_name == hard_fork_name => {
log::info!("get circuits handler from cache");
if let Some(handler) = &self.current_circuit {
Ok(handler.clone())
} else {
bail!("missing cached handler, there must be something wrong.")
}
}
_ => {
log::info!(
"failed to get circuits handler from cache, create a new one: {hard_fork_name}"
);
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
log::info!("building circuits handler for {hard_fork_name}");
let handler = builder(prover_types, &self.config)
.expect("failed to build circuits handler");
self.current_fork_name = Some(hard_fork_name.clone());
let arc_handler = Arc::new(handler);
self.current_circuit = Some(arc_handler.clone());
Ok(arc_handler)
} else {
bail!("missing builder, there must be something wrong.")
}
}
}
}
pub async fn init_vks(
&self,
config: &LocalProverConfig,
prover_types: Vec<ProverType>,
) -> Vec<String> {
let mut vks = Vec::new();
for (hard_fork_name, build) in self.circuits_handler_builder_map.iter() {
let handler =
build(prover_types.clone(), config).expect("failed to build circuits handler");
for prover_type in prover_types.iter() {
for task_type in get_circuit_types(*prover_type).into_iter() {
let vk = handler
.get_vk(task_type)
.await
.map_or("".to_string(), utils::encode_vk);
log::info!(
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
task_type
);
if !vk.is_empty() {
vks.push(vk)
}
}
}
}
vks
}
}

View File

@@ -1,33 +0,0 @@
use std::{collections::BTreeMap, rc::Rc};
use crate::types::ProverType;
use once_cell::sync::OnceCell;
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
static mut PARAMS_MAP: OnceCell<Rc<BTreeMap<u32, ParamsKZG<Bn256>>>> = OnceCell::new();
pub fn get_params_map_instance<'a, F>(load_params_func: F) -> &'a BTreeMap<u32, ParamsKZG<Bn256>>
where
F: FnOnce() -> BTreeMap<u32, ParamsKZG<Bn256>>,
{
unsafe {
PARAMS_MAP.get_or_init(|| {
let params_map = load_params_func();
Rc::new(params_map)
})
}
}
pub fn get_degrees<F>(prover_types: &std::collections::HashSet<ProverType>, f: F) -> Vec<u32>
where
F: FnMut(&ProverType) -> Vec<u32>,
{
prover_types
.iter()
.flat_map(f)
.collect::<std::collections::HashSet<u32>>()
.into_iter()
.collect()
}

View File

@@ -1,401 +0,0 @@
use super::{common::*, CircuitsHandler};
use crate::types::ProverType;
use anyhow::{bail, Context, Ok, Result};
use async_trait::async_trait;
use once_cell::sync::Lazy;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
use serde::Deserialize;
use tokio::sync::RwLock;
use crate::types::CommonHash;
use std::env;
use prover_darwin::{
aggregator::Prover as BatchProver,
check_chunk_hashes,
common::Prover as CommonProver,
config::{AGG_DEGREES, ZKEVM_DEGREES},
zkevm::Prover as ChunkProver,
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
ChunkProof, ChunkProvingTask,
};
// Only used for debugging.
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
#[derive(Debug, Clone, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkInfo>,
#[serde(flatten)]
pub batch_proving_task: BatchProvingTask,
}
type BundleTaskDetail = BundleProvingTask;
#[derive(Debug, Clone, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Default)]
pub struct DarwinHandler {
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
batch_prover: Option<RwLock<BatchProver<'static>>>,
}
impl DarwinHandler {
pub fn new_multi(
prover_types: Vec<ProverType>,
params_dir: &str,
assets_dir: &str,
) -> Result<Self> {
let class_name = std::intrinsics::type_name::<Self>();
let prover_types_set = prover_types
.into_iter()
.collect::<std::collections::HashSet<ProverType>>();
let mut handler = Self {
batch_prover: None,
chunk_prover: None,
};
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
ProverType::Chunk => ZKEVM_DEGREES.clone(),
ProverType::Batch => AGG_DEGREES.clone(),
});
let params_map = get_params_map_instance(|| {
log::info!(
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
class_name,
prover_types_set,
degrees
);
CommonProver::load_params_map(params_dir, &degrees)
});
for prover_type in prover_types_set {
match prover_type {
ProverType::Chunk => {
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
params_map, assets_dir,
)));
}
ProverType::Batch => {
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
params_map, assets_dir,
)))
}
}
}
Ok(handler)
}
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
Self::new_multi(prover_types, params_dir, assets_dir)
}
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
if let Some(prover) = self.chunk_prover.as_ref() {
let chunk = ChunkProvingTask::from(chunk_trace);
let chunk_proof =
prover
.write()
.await
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
return Ok(chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
Ok(serde_json::to_string(&chunk_proof)?)
}
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
.collect();
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = prover.read().await.check_protocol_of_chunks(&chunk_proofs);
if !is_valid {
bail!("non-match chunk protocol")
}
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
let batch_proof = prover.write().await.gen_batch_proof(
batch_task_detail.batch_proving_task,
None,
self.get_output_dir(),
)?;
return Ok(batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
Ok(serde_json::to_string(&batch_proof)?)
}
async fn gen_bundle_proof_raw(
&self,
bundle_task_detail: BundleTaskDetail,
) -> Result<BundleProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let bundle_proof = prover.write().await.gen_bundle_proof(
bundle_task_detail,
None,
self.get_output_dir(),
)?;
return Ok(bundle_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
Ok(serde_json::to_string(&bundle_proof)?)
}
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
}
#[async_trait]
impl CircuitsHandler for DarwinHandler {
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
match task_type {
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
CircuitType::Batch => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_batch_vk(),
CircuitType::Bundle => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_bundle_vk(),
_ => unreachable!(),
}
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.circuit_type {
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
_ => unreachable!(),
}
}
}
// =================================== tests module ========================================
#[cfg(test)]
mod tests {
use super::*;
use crate::zk_circuits_handler::utils::encode_vk;
use prover_darwin::utils::chunk_trace_to_witness_block;
use scroll_proving_sdk::utils::init_tracing;
use std::{path::PathBuf, sync::LazyLock};
#[ctor::ctor]
fn init() {
init_tracing();
log::info!("logger initialized");
}
static DEFAULT_WORK_DIR: &str = "/assets";
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
std::env::var("DARWIN_TEST_DIR")
.unwrap_or(String::from(DEFAULT_WORK_DIR))
.trim_end_matches('/')
.to_string()
});
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
static PROOF_DUMP_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
static BATCH_DIR_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
static BATCH_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
static CHUNK_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
#[test]
fn it_works() {
let result = true;
assert!(result);
}
#[tokio::test]
async fn test_circuits() -> Result<()> {
let bi_handler = DarwinHandler::new_multi(
vec![ProverType::Chunk, ProverType::Batch],
&PARAMS_PATH,
&ASSETS_PATH,
)?;
let chunk_handler = bi_handler;
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
let chunk_dir_paths = get_chunk_dir_paths()?;
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
let mut chunk_infos = vec![];
let mut chunk_proofs = vec![];
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
let chunk_id = format!("chunk_proof{}", id + 1);
log::info!("start to process {chunk_id}");
let chunk_trace = read_chunk_trace(chunk_path)?;
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
chunk_infos.push(chunk_info);
log::info!("start to prove {chunk_id}");
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
let proof_data = serde_json::to_string(&chunk_proof)?;
dump_proof(chunk_id, proof_data)?;
chunk_proofs.push(chunk_proof);
}
let batch_handler = chunk_handler;
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs);
log::info!("start to prove batch");
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
let proof_data = serde_json::to_string(&batch_proof)?;
dump_proof("batch_proof".to_string(), proof_data)?;
Ok(())
}
fn make_batch_task_detail(_: Vec<ChunkInfo>, _: Vec<ChunkProof>) -> BatchTaskDetail {
todo!();
// BatchTaskDetail {
// chunk_infos,
// batch_proving_task: BatchProvingTask {
// parent_batch_hash: todo!(),
// parent_state_root: todo!(),
// batch_header: todo!(),
// chunk_proofs,
// },
// }
}
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
log::info!("check_vk, {:?}", proof_type);
let vk_from_file = read_vk(proof_type).unwrap();
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
}
fn read_vk(proof_type: CircuitType) -> Result<String> {
log::info!("read_vk, {:?}", proof_type);
let vk_file = match proof_type {
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
CircuitType::Batch => BATCH_VK_PATH.clone(),
CircuitType::Bundle => todo!(),
CircuitType::Undefined => unreachable!(),
};
let data = std::fs::read(vk_file)?;
Ok(encode_vk(data))
}
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
log::info!("read_chunk_trace, {:?}", path);
let mut chunk_trace: Vec<BlockTrace> = vec![];
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
let f = std::fs::File::open(file)?;
Ok(serde_json::from_reader(&f)?)
}
if path.is_dir() {
let entries = std::fs::read_dir(&path)?;
let mut files: Vec<String> = entries
.into_iter()
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
return None;
}
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
})
.collect();
files.sort();
log::info!("files in chunk {:?} is {:?}", path, files);
for file in files {
let block_trace = read_block_trace(&path.join(file))?;
chunk_trace.push(block_trace);
}
} else {
let block_trace = read_block_trace(&path)?;
chunk_trace.push(block_trace);
}
Ok(chunk_trace)
}
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
let entries = std::fs::read_dir(&batch_path)?;
let mut files: Vec<String> = entries
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
} else {
None
}
})
.collect();
files.sort();
log::info!("files in batch {:?} is {:?}", batch_path, files);
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
}
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
Ok(ChunkInfo::from_witness_block(&witness_block, false))
}
fn dump_proof(id: String, proof_data: String) -> Result<()> {
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
Ok(std::fs::write(dump_path.join(id), proof_data)?)
}
}

View File

@@ -1,459 +0,0 @@
use super::{common::*, CircuitsHandler};
use crate::types::ProverType;
use anyhow::{bail, Context, Ok, Result};
use async_trait::async_trait;
use once_cell::sync::Lazy;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
use serde::Deserialize;
use tokio::sync::RwLock;
use crate::types::CommonHash;
use std::env;
use prover_darwin_v2::{
aggregator::Prover as BatchProver,
check_chunk_hashes,
common::Prover as CommonProver,
config::{AGG_DEGREES, ZKEVM_DEGREES},
zkevm::Prover as ChunkProver,
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
ChunkProof, ChunkProvingTask,
};
// Only used for debugging.
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
#[derive(Debug, Clone, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkInfo>,
#[serde(flatten)]
pub batch_proving_task: BatchProvingTask,
}
type BundleTaskDetail = BundleProvingTask;
#[derive(Debug, Clone, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Default)]
pub struct DarwinV2Handler {
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
batch_prover: Option<RwLock<BatchProver<'static>>>,
}
impl DarwinV2Handler {
pub fn new_multi(
prover_types: Vec<ProverType>,
params_dir: &str,
assets_dir: &str,
) -> Result<Self> {
let class_name = std::intrinsics::type_name::<Self>();
let prover_types_set = prover_types
.into_iter()
.collect::<std::collections::HashSet<ProverType>>();
let mut handler = Self {
batch_prover: None,
chunk_prover: None,
};
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
ProverType::Chunk => ZKEVM_DEGREES.clone(),
ProverType::Batch => AGG_DEGREES.clone(),
});
let params_map = get_params_map_instance(|| {
log::info!(
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
class_name,
prover_types_set,
degrees
);
CommonProver::load_params_map(params_dir, &degrees)
});
for prover_type in prover_types_set {
match prover_type {
ProverType::Chunk => {
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
params_map, assets_dir,
)));
}
ProverType::Batch => {
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
params_map, assets_dir,
)))
}
}
}
Ok(handler)
}
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
Self::new_multi(prover_types, params_dir, assets_dir)
}
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
if let Some(prover) = self.chunk_prover.as_ref() {
let chunk = ChunkProvingTask::from(chunk_trace);
let chunk_proof =
prover
.write()
.await
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
return Ok(chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
Ok(serde_json::to_string(&chunk_proof)?)
}
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
.collect();
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = prover.write().await.check_protocol_of_chunks(&chunk_proofs);
if !is_valid {
bail!("non-match chunk protocol")
}
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
let batch_proof = prover.write().await.gen_batch_proof(
batch_task_detail.batch_proving_task,
None,
self.get_output_dir(),
)?;
return Ok(batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
Ok(serde_json::to_string(&batch_proof)?)
}
async fn gen_bundle_proof_raw(
&self,
bundle_task_detail: BundleTaskDetail,
) -> Result<BundleProof> {
if let Some(prover) = self.batch_prover.as_ref() {
let bundle_proof = prover.write().await.gen_bundle_proof(
bundle_task_detail,
None,
self.get_output_dir(),
)?;
return Ok(bundle_proof);
}
unreachable!("please check errors in proof_type logic")
}
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
Ok(serde_json::to_string(&bundle_proof)?)
}
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
}
#[async_trait]
impl CircuitsHandler for DarwinV2Handler {
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
match task_type {
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
CircuitType::Batch => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_batch_vk(),
CircuitType::Bundle => self
.batch_prover
.as_ref()
.unwrap()
.read()
.await
.get_bundle_vk(),
_ => unreachable!(),
}
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.circuit_type {
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
_ => unreachable!(),
}
}
}
// =================================== tests module ========================================
#[cfg(test)]
mod tests {
use super::*;
use crate::zk_circuits_handler::utils::encode_vk;
use ethers_core::types::H256;
use prover_darwin_v2::{
aggregator::eip4844, utils::chunk_trace_to_witness_block, BatchData, BatchHeader,
MAX_AGG_SNARKS,
};
use scroll_proving_sdk::utils::init_tracing;
use std::{path::PathBuf, sync::LazyLock};
#[ctor::ctor]
fn init() {
init_tracing();
log::info!("logger initialized");
}
static DEFAULT_WORK_DIR: &str = "/assets";
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
std::env::var("DARWIN_V2_TEST_DIR")
.unwrap_or(String::from(DEFAULT_WORK_DIR))
.trim_end_matches('/')
.to_string()
});
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
static PROOF_DUMP_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
static BATCH_DIR_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
static BATCH_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
static CHUNK_VK_PATH: LazyLock<String> =
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
#[test]
fn it_works() {
let result = true;
assert!(result);
}
#[tokio::test]
async fn test_circuits() -> Result<()> {
let bi_handler = DarwinV2Handler::new_multi(
vec![ProverType::Chunk, ProverType::Batch],
&PARAMS_PATH,
&ASSETS_PATH,
)?;
let chunk_handler = bi_handler;
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
let chunk_dir_paths = get_chunk_dir_paths()?;
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
let mut chunk_traces = vec![];
let mut chunk_infos = vec![];
let mut chunk_proofs = vec![];
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
let chunk_id = format!("chunk_proof{}", id + 1);
log::info!("start to process {chunk_id}");
let chunk_trace = read_chunk_trace(chunk_path)?;
chunk_traces.push(chunk_trace.clone());
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
chunk_infos.push(chunk_info);
log::info!("start to prove {chunk_id}");
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
let proof_data = serde_json::to_string(&chunk_proof)?;
dump_proof(chunk_id, proof_data)?;
chunk_proofs.push(chunk_proof);
}
let batch_handler = chunk_handler;
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
let batch_task_detail = make_batch_task_detail(chunk_traces, chunk_proofs, None);
log::info!("start to prove batch");
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
let proof_data = serde_json::to_string(&batch_proof)?;
dump_proof("batch_proof".to_string(), proof_data)?;
Ok(())
}
// copied from https://github.com/scroll-tech/scroll-prover/blob/main/integration/src/prove.rs
fn get_blob_from_chunks(chunks: &[ChunkInfo]) -> Vec<u8> {
let num_chunks = chunks.len();
let padded_chunk =
ChunkInfo::mock_padded_chunk_info_for_testing(chunks.last().as_ref().unwrap());
let chunks_with_padding = [
chunks.to_vec(),
vec![padded_chunk; MAX_AGG_SNARKS - num_chunks],
]
.concat();
let batch_data = BatchData::<{ MAX_AGG_SNARKS }>::new(chunks.len(), &chunks_with_padding);
let batch_bytes = batch_data.get_batch_data_bytes();
let blob_bytes = eip4844::get_blob_bytes(&batch_bytes);
log::info!("blob_bytes len {}", blob_bytes.len());
blob_bytes
}
// TODO: chunk_infos can be extracted from chunk_proofs.
// Still needed?
fn make_batch_task_detail(
chunk_traces: Vec<Vec<BlockTrace>>,
chunk_proofs: Vec<ChunkProof>,
last_batcher_header: Option<BatchHeader<{ MAX_AGG_SNARKS }>>,
) -> BatchTaskDetail {
// dummy parent batch hash
let dummy_parent_batch_hash = H256([
0xab, 0xac, 0xad, 0xae, 0xaf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
]);
let chunk_infos: Vec<_> = chunk_proofs.iter().map(|p| p.chunk_info.clone()).collect();
let l1_message_popped = chunk_traces
.iter()
.flatten()
.map(|chunk| chunk.num_l1_txs())
.sum();
let last_block_timestamp = chunk_traces.last().map_or(0, |block_traces| {
block_traces
.last()
.map_or(0, |block_trace| block_trace.header.timestamp.as_u64())
});
let blob_bytes = get_blob_from_chunks(&chunk_infos);
let batch_header = BatchHeader::construct_from_chunks(
last_batcher_header.map_or(4, |header| header.version),
last_batcher_header.map_or(123, |header| header.batch_index + 1),
l1_message_popped,
last_batcher_header.map_or(l1_message_popped, |header| {
header.total_l1_message_popped + l1_message_popped
}),
last_batcher_header.map_or(dummy_parent_batch_hash, |header| header.batch_hash()),
last_block_timestamp,
&chunk_infos,
&blob_bytes,
);
BatchTaskDetail {
chunk_infos,
batch_proving_task: BatchProvingTask {
chunk_proofs,
batch_header,
blob_bytes,
},
}
}
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
log::info!("check_vk, {:?}", proof_type);
let vk_from_file = read_vk(proof_type).unwrap();
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
}
fn read_vk(proof_type: CircuitType) -> Result<String> {
log::info!("read_vk, {:?}", proof_type);
let vk_file = match proof_type {
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
CircuitType::Batch => BATCH_VK_PATH.clone(),
CircuitType::Bundle => todo!(),
CircuitType::Undefined => unreachable!(),
};
let data = std::fs::read(vk_file)?;
Ok(encode_vk(data))
}
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
log::info!("read_chunk_trace, {:?}", path);
let mut chunk_trace: Vec<BlockTrace> = vec![];
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
let f = std::fs::File::open(file)?;
Ok(serde_json::from_reader(&f)?)
}
if path.is_dir() {
let entries = std::fs::read_dir(&path)?;
let mut files: Vec<String> = entries
.into_iter()
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
return None;
}
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
})
.collect();
files.sort();
log::info!("files in chunk {:?} is {:?}", path, files);
for file in files {
let block_trace = read_block_trace(&path.join(file))?;
chunk_trace.push(block_trace);
}
} else {
let block_trace = read_block_trace(&path)?;
chunk_trace.push(block_trace);
}
Ok(chunk_trace)
}
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
let entries = std::fs::read_dir(&batch_path)?;
let mut files: Vec<String> = entries
.filter_map(|e| {
if e.is_err() {
return None;
}
let entry = e.unwrap();
if entry.path().is_dir() {
if let Result::Ok(file_name) = entry.file_name().into_string() {
Some(file_name)
} else {
None
}
} else {
None
}
})
.collect();
files.sort();
log::info!("files in batch {:?} is {:?}", batch_path, files);
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
}
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
Ok(ChunkInfo::from_witness_block(&witness_block, false))
}
fn dump_proof(id: String, proof_data: String) -> Result<()> {
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
Ok(std::fs::write(dump_path.join(id), proof_data)?)
}
}

View File

@@ -10,33 +10,6 @@ import (
"github.com/stretchr/testify/assert"
)
func TestPackCommitBatch(t *testing.T) {
scrollChainABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
version := uint8(1)
var parentBatchHeader []byte
var chunks [][]byte
var skippedL1MessageBitmap []byte
_, err = scrollChainABI.Pack("commitBatch", version, parentBatchHeader, chunks, skippedL1MessageBitmap)
assert.NoError(t, err)
}
func TestPackCommitBatchWithBlobProof(t *testing.T) {
scrollChainABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
version := uint8(1)
var parentBatchHeader []byte
var chunks [][]byte
var skippedL1MessageBitmap []byte
var blobDataProof []byte
_, err = scrollChainABI.Pack("commitBatchWithBlobProof", version, parentBatchHeader, chunks, skippedL1MessageBitmap, blobDataProof)
assert.NoError(t, err)
}
func TestPackCommitBatches(t *testing.T) {
scrollChainABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
@@ -49,58 +22,6 @@ func TestPackCommitBatches(t *testing.T) {
assert.NoError(t, err)
}
func TestPackFinalizeBatchWithProof(t *testing.T) {
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
var batchHeader []byte
var prevStateRoot common.Hash
var postStateRoot common.Hash
var withdrawRoot common.Hash
var aggrProof []byte
_, err = l1RollupABI.Pack("finalizeBatchWithProof", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, aggrProof)
assert.NoError(t, err)
}
func TestPackFinalizeBatchWithProof4844(t *testing.T) {
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
var batchHeader []byte
var prevStateRoot common.Hash
var postStateRoot common.Hash
var withdrawRoot common.Hash
var blobDataProof []byte
var aggrProof []byte
_, err = l1RollupABI.Pack("finalizeBatchWithProof4844", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, blobDataProof, aggrProof)
assert.NoError(t, err)
}
func TestPackFinalizeBundleWithProof(t *testing.T) {
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
var batchHeader []byte
var postStateRoot common.Hash
var withdrawRoot common.Hash
var aggrProof []byte
_, err = l1RollupABI.Pack("finalizeBundleWithProof", batchHeader, postStateRoot, withdrawRoot, aggrProof)
assert.NoError(t, err)
}
func TestPackFinalizeEuclidInitialBatch(t *testing.T) {
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)
var postStateRoot common.Hash
_, err = l1RollupABI.Pack("finalizeEuclidInitialBatch", postStateRoot)
assert.NoError(t, err)
}
func TestPackFinalizeBundlePostEuclidV2(t *testing.T) {
l1RollupABI, err := ScrollChainMetaData.GetAbi()
assert.NoError(t, err)

View File

@@ -54,6 +54,9 @@ func action(ctx *cli.Context) error {
}
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
if minCodecVersion < encoding.CodecV7 {
log.Crit("min codec version must be greater than or equal to CodecV7", "minCodecVersion", minCodecVersion)
}
// sanity check config
if cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch <= 0 {

View File

@@ -102,6 +102,10 @@ func action(ctx *cli.Context) error {
}
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
if minCodecVersion < encoding.CodecV7 {
log.Crit("min codec version must be greater than or equal to CodecV7", "minCodecVersion", minCodecVersion)
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)

View File

@@ -54,7 +54,8 @@
"batch_submission": {
"min_batches": 1,
"max_batches": 6,
"timeout": 300
"timeout": 7200,
"backlog_max": 75
},
"gas_oracle_config": {
"min_gas_price": 0,
@@ -87,29 +88,18 @@
"private_key_signer_config": {
"private_key": "1515151515151515151515151515151515151515151515151515151515151515"
}
},
"l1_commit_gas_limit_multiplier": 1.2
}
},
"chunk_proposer_config": {
"propose_interval_milliseconds": 100,
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634880
"chunk_timeout_sec": 300
},
"batch_proposer_config": {
"propose_interval_milliseconds": 1000,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634880,
"max_chunks_per_batch": 12
"max_chunks_per_batch": 45
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 20,

View File

@@ -27,7 +27,7 @@ services:
command: [
"--config", "/app/conf/proposer-tool-config.json",
"--genesis", "/app/conf/proposer-tool-genesis.json",
"--min-codec-version", "4",
"--min-codec-version", "7",
"--start-l2-block", "10000",
"--log.debug", "--verbosity", "3"
]

View File

@@ -11,7 +11,7 @@ require (
github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0

View File

@@ -249,8 +249,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1 h1:6aKqJSal+QVdB5HMWMs0JTbAIZ6/iAHJx9qizz0w9dU=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=

View File

@@ -28,27 +28,17 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
}
// BatchProposerConfig loads batch_proposer configuration items.
type BatchProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
}
// BundleProposerConfig loads bundle_proposer configuration items.

View File

@@ -38,6 +38,8 @@ type BatchSubmission struct {
MaxBatches int `json:"max_batches"`
// The time in seconds after which a batch is considered stale and should be submitted ignoring the min batch count.
TimeoutSec int64 `json:"timeout"`
// The maximum number of pending batches to keep in the backlog.
BacklogMax int64 `json:"backlog_max"`
}
// ChainMonitor this config is used to get batch status from chain_monitor API.
@@ -63,8 +65,6 @@ type RelayerConfig struct {
GasOracleConfig *GasOracleConfig `json:"gas_oracle_config"`
// ChainMonitor config of monitoring service
ChainMonitor *ChainMonitor `json:"chain_monitor"`
// L1CommitGasLimitMultiplier multiplier for fallback gas limit in commitBatch txs
L1CommitGasLimitMultiplier float64 `json:"l1_commit_gas_limit_multiplier,omitempty"`
// Configs of transaction signers (GasOracle, Commit, Finalize)
GasOracleSenderSignerConfig *SignerConfig `json:"gas_oracle_sender_signer_config"`
@@ -73,8 +73,6 @@ type RelayerConfig struct {
// Indicates if bypass features specific to testing environments are enabled.
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// Sets rollup-relayer to stop fake finalizing at the fork boundary
TestEnvBypassOnlyUntilForkBoundary bool `json:"test_env_bypass_only_until_fork_boundary"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.

View File

@@ -1,20 +1,11 @@
package relayer
import "errors"
const (
gasPriceDiffPrecision = 1000000
defaultGasPriceDiff = 50000 // 5%
)
var (
// ErrExecutionRevertedMessageExpired error of Message expired
ErrExecutionRevertedMessageExpired = errors.New("execution reverted: Message expired")
// ErrExecutionRevertedAlreadySuccessExecuted error of Message was already successfully executed
ErrExecutionRevertedAlreadySuccessExecuted = errors.New("execution reverted: Message was already successfully executed")
)
// ServiceType defines the various types of services within the relayer.
type ServiceType int

View File

@@ -179,13 +179,13 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
txHash, _, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
if err != nil {
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
return
}
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, txHash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
@@ -195,7 +195,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.lastBlobBaseFee = blobBaseFee
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
log.Info("Update l1 base fee", "txHash", txHash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
}
}
}

View File

@@ -146,13 +146,13 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
convey.Convey("send transaction failure", t, func() {
targetErr := errors.New("send transaction failure")
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob, uint64) (hash common.Hash, err error) {
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob) (hash common.Hash, err error) {
return common.Hash{}, targetErr
})
l1Relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob, uint64) (hash common.Hash, err error) {
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob) (hash common.Hash, err error) {
return common.Hash{}, nil
})

View File

@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"math"
"math/big"
"sort"
"strings"
@@ -14,7 +15,6 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/ethclient"
@@ -33,6 +33,32 @@ import (
rutils "scroll-tech/rollup/internal/utils"
)
// RelaxType enumerates the relaxation functions we support when
// turning a baseline fee into a “target” fee.
type RelaxType int
const (
// NoRelaxation means “dont touch the baseline” (i.e. fallback/default).
NoRelaxation RelaxType = iota
Exponential
Sigmoid
)
const secondsPerBlock = 12
// BaselineType enumerates the baseline types we support when
// turning a baseline fee into a “target” fee.
type BaselineType int
const (
// PctMin means “take the minimum of the last N blocks fees, then
// take the PCT of that”.
PctMin BaselineType = iota
// EWMA means “take the exponentiallyweighted moving average of
// the last N blocks fees”.
EWMA
)
// Layer2Relayer is responsible for:
// i. committing and finalizing L2 blocks on L1.
// ii. updating L2 gas price oracle contract on L1.
@@ -46,6 +72,7 @@ type Layer2Relayer struct {
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
l1BlockOrm *orm.L1Block
cfg *config.RelayerConfig
@@ -61,6 +88,31 @@ type Layer2Relayer struct {
metrics *l2RelayerMetrics
chainCfg *params.ChainConfig
lastFetchedBlock uint64 // highest block number ever pulled
feeHistory []*big.Int // sliding window of blob fees
batchStrategy StrategyParams
}
// StrategyParams holds the perwindow feesubmission rules.
type StrategyParams struct {
BaselineType BaselineType // "pct_min" or "ewma"
BaselineParam float64 // percentile (01) or α for EWMA
Gamma float64 // relaxation γ
Beta float64 // relaxation β
RelaxType RelaxType // Exponential or Sigmoid
}
// bestParams maps your 2h/5h/12h windows to their best rules.
// Timeouts are in seconds, 2, 5 and 12 hours (and same + 20 mins to account for
// time to create batch currently roughly, as time is measured from block creation)
var bestParams = map[uint64]StrategyParams{
7200: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
8400: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
18000: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
19200: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
42800: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
44400: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
@@ -100,12 +152,18 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
}
strategy, ok := bestParams[uint64(cfg.BatchSubmission.TimeoutSec)]
if !ok {
return nil, fmt.Errorf("invalid timeout for batch submission: %v", cfg.BatchSubmission.TimeoutSec)
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
bundleOrm: orm.NewBundle(db),
batchOrm: orm.NewBatch(db),
l1BlockOrm: orm.NewL1Block(db),
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
@@ -116,9 +174,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
l1RollupABI: bridgeAbi.ScrollChainABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
cfg: cfg,
chainCfg: chainCfg,
batchStrategy: strategy,
cfg: cfg,
chainCfg: chainCfg,
}
// chain_monitor client
@@ -159,14 +217,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{{
Header: genesis,
Transactions: nil,
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
}},
}
chunk := &encoding.Chunk{Blocks: []*encoding.Block{{Header: genesis}}}
err = r.db.Transaction(func(dbTX *gorm.DB) error {
if err = r.l2BlockOrm.InsertL2Blocks(r.ctx, chunk.Blocks); err != nil {
@@ -230,11 +281,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
// submit genesis batch to L1 rollup contract
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil, 0)
txHash, _, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil)
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -266,6 +317,10 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
// Pending batchess are submitted if one of the following conditions is met:
// - the first batch is too old -> forceSubmit
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
// - we have at least minBatches AND price hits a desired target price
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
@@ -274,15 +329,56 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
var batchesToSubmit []*dbBatchWithChunksAndParent
// nothing to do if we don't have any pending batches
if len(dbBatches) == 0 {
return
}
// if backlog outgrow max size, forcesubmit enough oldest batches
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
var forceSubmit bool
for i, dbBatch := range dbBatches {
if i == 0 && encoding.CodecVersion(dbBatch.CodecVersion) < encoding.CodecV7 {
// if the first batch is not >= V7 then we need to submit batches one by one
r.processPendingBatchesV4(dbBatches)
startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
if err != nil {
log.Error("failed to get first chunk", "err", err, "batch index", dbBatches[0].Index, "chunk index", dbBatches[0].StartChunkIndex)
return
}
oldestBlockTimestamp := time.Unix(int64(startChunk.StartBlockTime), 0)
// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBlockTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}
// force submit if backlog is too big
if backlogCount > r.cfg.BatchSubmission.BacklogMax {
forceSubmit = true
}
if !forceSubmit {
// check if we should skip submitting the batch based on the fee target
skip, err := r.skipSubmitByFee(oldestBlockTimestamp, r.metrics)
// return if not hitting target price
if skip {
log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
log.Debug("first batch index", dbBatches[0].Index)
log.Debug("backlog count", backlogCount)
return
}
if err != nil {
log.Warn("Failed to check if we should skip batch submission, fallback to immediate submission", "err", err)
}
}
var batchesToSubmit []*dbBatchWithChunksAndParent
for i, dbBatch := range dbBatches {
var dbChunks []*orm.Chunk
var dbParentBatch *orm.Batch
@@ -336,11 +432,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
break
}
// if one of the batches is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && !forceSubmit && time.Since(dbBatch.CreatedAt) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunksAndParent{
Batch: dbBatch,
@@ -361,7 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
if forceSubmit {
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "created at", batchesToSubmit[0].Batch.CreatedAt)
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "first block created at", oldestBlockTimestamp)
}
// We have at least 1 batch to commit
@@ -386,7 +477,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
txHash, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs, 0)
txHash, blobBaseFee, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
if err != nil {
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
@@ -426,6 +517,8 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerCommitLatency.Set(time.Since(oldestBlockTimestamp).Seconds())
r.metrics.rollupL2RelayerCommitPrice.Set(float64(blobBaseFee))
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
@@ -454,117 +547,6 @@ type dbBatchWithChunksAndParent struct {
ParentBatch *orm.Batch
}
func (r *Layer2Relayer) processPendingBatchesV4(dbBatches []*orm.Batch) {
for _, dbBatch := range dbBatches {
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
log.Error("failed to get chunks in range", "err", err)
return
}
// check codec version
for _, dbChunk := range dbChunks {
if dbBatch.CodecVersion != dbChunk.CodecVersion {
log.Error("batch codec version is different from chunk codec version", "batch index", dbBatch.Index, "chunk index", dbChunk.Index, "batch codec version", dbBatch.CodecVersion, "chunk codec version", dbChunk.CodecVersion)
return
}
}
chunks := make([]*encoding.Chunk, len(dbChunks))
for i, c := range dbChunks {
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
if getErr != nil {
log.Error("failed to get blocks in range", "err", getErr)
return
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
}
if dbBatch.Index == 0 {
log.Error("invalid args: batch index is 0, should only happen in committing genesis batch")
return
}
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
if getErr != nil {
log.Error("failed to get parent batch header", "err", getErr)
return
}
if dbParentBatch.CodecVersion > dbBatch.CodecVersion {
log.Error("parent batch codec version is greater than current batch codec version", "index", dbBatch.Index, "hash", dbBatch.Hash, "parent codec version", dbParentBatch.CodecVersion, "current codec version", dbBatch.CodecVersion)
return
}
var calldata []byte
var blob *kzg4844.Blob
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV4, encoding.CodecV5, encoding.CodecV6:
calldata, blob, err = r.constructCommitBatchPayloadCodecV4(dbBatch, dbParentBatch, dbChunks, chunks)
if err != nil {
log.Error("failed to construct commitBatchWithBlobProof payload for V4", "codecVersion", codecVersion, "index", dbBatch.Index, "err", err)
return
}
default:
log.Error("unsupported codec version in processPendingBatchesV4", "codecVersion", codecVersion)
return
}
// fallbackGasLimit is non-zero only in sending non-blob transactions.
fallbackGasLimit := uint64(float64(dbBatch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
if types.RollupStatus(dbBatch.RollupStatus) == types.RollupCommitFailed {
// use eth_estimateGas if this batch has been committed and failed at least once.
fallbackGasLimit = 0
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", dbBatch.Hash)
}
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, []*kzg4844.Blob{blob}, fallbackGasLimit)
if err != nil {
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
log.Debug(
"Skipped sending commitBatch tx to L1: too many pending blob txs",
"maxPending", r.cfg.SenderConfig.MaxPendingBlobTxs,
"err", err,
)
return
}
log.Error(
"Failed to send commitBatch tx to layer1",
"index", dbBatch.Index,
"hash", dbBatch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
"calldata", common.Bytes2Hex(calldata),
)
return
}
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", dbBatch.Hash, "index", dbBatch.Index, "err", err)
return
}
var maxBlockHeight uint64
var totalGasUsed uint64
for _, dbChunk := range dbChunks {
if dbChunk.EndBlockNumber > maxBlockHeight {
maxBlockHeight = dbChunk.EndBlockNumber
}
totalGasUsed += dbChunk.TotalL2TxGas
}
r.metrics.rollupL2RelayerCommitBlockHeight.Set(float64(maxBlockHeight))
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
log.Info("Sent the commitBatch tx to layer1", "batch index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
}
}
// ProcessPendingBundles submits proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBundles() {
r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc()
@@ -599,33 +581,6 @@ func (r *Layer2Relayer) ProcessPendingBundles() {
return
}
lastFinalizedChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, lastBatch.EndChunkIndex)
if err != nil {
log.Error("failed to get last finalized chunk", "chunk index", lastBatch.EndChunkIndex)
return
}
firstUnfinalizedBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.StartBatchIndex)
if err != nil {
log.Error("failed to get first unfinalized batch", "batch index", bundle.StartBatchIndex)
return
}
firstUnfinalizedChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, firstUnfinalizedBatch.StartChunkIndex)
if err != nil {
log.Error("failed to get firsr unfinalized chunk", "chunk index", firstUnfinalizedBatch.StartChunkIndex)
return
}
if r.cfg.TestEnvBypassOnlyUntilForkBoundary {
lastFork := encoding.GetHardforkName(r.chainCfg, lastFinalizedChunk.StartBlockNumber, lastFinalizedChunk.StartBlockTime)
nextFork := encoding.GetHardforkName(r.chainCfg, firstUnfinalizedChunk.StartBlockNumber, firstUnfinalizedChunk.StartBlockTime)
if lastFork != nextFork {
log.Info("not fake finalizing past the fork boundary", "last fork", lastFork, "next fork", nextFork)
return
}
}
if err := r.finalizeBundle(bundle, false); err != nil {
log.Error("failed to finalize timeout bundle without proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
return
@@ -724,11 +679,9 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
return fmt.Errorf("failed to get end chunk of batch: %w", err)
}
hardForkName := encoding.GetHardforkName(r.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
var aggProof message.BundleProof
var aggProof *message.OpenVMBundleProof
if withProof {
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash, hardForkName)
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash)
if err != nil {
return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err)
}
@@ -740,11 +693,6 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV4, encoding.CodecV5, encoding.CodecV6:
calldata, err = r.constructFinalizeBundlePayloadCodecV4(dbBatch, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv4, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
}
case encoding.CodecV7:
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
@@ -754,7 +702,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
}
txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
txHash, _, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil)
if err != nil {
log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index,
"start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex,
@@ -950,48 +898,6 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
}
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV4(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
if err != nil {
return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
}
daBatch, createErr := codec.NewDABatch(batch)
if createErr != nil {
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
}
encodedChunks := make([][]byte, len(dbChunks))
for i, c := range dbChunks {
daChunk, createErr := codec.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
if createErr != nil {
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
}
encodedChunks[i], err = daChunk.Encode()
if err != nil {
return nil, nil, fmt.Errorf("failed to encode DA chunk: %w", err)
}
}
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
if err != nil {
return nil, nil, fmt.Errorf("failed to get blob data proof for point evaluation: %w", err)
}
calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version(), dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap(), blobDataProof)
if packErr != nil {
return nil, nil, fmt.Errorf("failed to pack commitBatchWithBlobProof: %w", packErr)
}
return calldata, daBatch.Blob(), nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*dbBatchWithChunksAndParent, firstBatch, lastBatch *orm.Batch) ([]byte, []*kzg4844.Blob, uint64, uint64, error) {
var maxBlockHeight uint64
var totalGasUsed uint64
@@ -1049,35 +955,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof message.BundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundleWithProof",
dbBatch.BatchHeader,
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
aggProof.Proof(),
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBundleWithProof: %w", packErr)
}
return calldata, nil
}
// finalizeBundle without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundle",
dbBatch.BatchHeader,
common.HexToHash(dbBatch.StateRoot),
common.HexToHash(dbBatch.WithdrawRoot),
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack finalizeBundle: %w", packErr)
}
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof message.BundleProof) ([]byte, error) {
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2",
@@ -1120,6 +998,141 @@ func (r *Layer2Relayer) StopSenders() {
}
}
// fetchBlobFeeHistory returns the last WindowSec seconds of blobfee samples,
// by reading L1Block tables BlobBaseFee column.
func (r *Layer2Relayer) fetchBlobFeeHistory(windowSec uint64) ([]*big.Int, error) {
latest, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
return nil, fmt.Errorf("GetLatestL1BlockHeight: %w", err)
}
// bootstrap on first call
if r.lastFetchedBlock == 0 {
// start window
r.lastFetchedBlock = latest - windowSec/secondsPerBlock
}
from := r.lastFetchedBlock + 1
//if new blocks
if from <= latest {
raw, err := r.l1BlockOrm.GetBlobFeesInRange(r.ctx, from, latest)
if err != nil {
return nil, fmt.Errorf("GetBlobFeesInRange: %w", err)
}
// append them
for _, v := range raw {
r.feeHistory = append(r.feeHistory, new(big.Int).SetUint64(v))
r.lastFetchedBlock++
}
}
maxLen := int(windowSec / secondsPerBlock)
if len(r.feeHistory) > maxLen {
r.feeHistory = r.feeHistory[len(r.feeHistory)-maxLen:]
}
return r.feeHistory, nil
}
// calculateTargetPrice applies pct_min/ewma + relaxation to get a BigInt target
func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime time.Time, history []*big.Int) *big.Int {
var baseline float64 // baseline in Gwei (converting to float, small loss of precision)
n := len(history)
if n == 0 {
return big.NewInt(0)
}
switch strategy.BaselineType {
case PctMin:
// make a copy, sort by big.Int.Cmp, then pick the percentile element
sorted := make([]*big.Int, n)
copy(sorted, history)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Cmp(sorted[j]) < 0
})
idx := int(strategy.BaselineParam * float64(n-1))
if idx < 0 {
idx = 0
}
baseline, _ = new(big.Float).
Quo(new(big.Float).SetInt(sorted[idx]), big.NewFloat(1e9)).
Float64()
case EWMA:
one := big.NewFloat(1)
alpha := big.NewFloat(strategy.BaselineParam)
oneMinusAlpha := new(big.Float).Sub(one, alpha)
// start from first history point
ewma := new(big.Float).
Quo(new(big.Float).SetInt(history[0]), big.NewFloat(1e9))
for i := 1; i < n; i++ {
curr := new(big.Float).
Quo(new(big.Float).SetInt(history[i]), big.NewFloat(1e9))
term1 := new(big.Float).Mul(alpha, curr)
term2 := new(big.Float).Mul(oneMinusAlpha, ewma)
ewma = new(big.Float).Add(term1, term2)
}
baseline, _ = ewma.Float64()
default:
// fallback to last element
baseline, _ = new(big.Float).
Quo(new(big.Float).SetInt(history[n-1]), big.NewFloat(1e9)).
Float64()
} // now baseline holds our baseline in float64 Gwei
// relaxation
age := time.Since(firstTime).Seconds()
frac := age / float64(windowSec)
var adjusted float64
switch strategy.RelaxType {
case Exponential:
adjusted = baseline * (1 + strategy.Gamma*math.Exp(strategy.Beta*(frac-1)))
case Sigmoid:
adjusted = baseline * (1 + strategy.Gamma/(1+math.Exp(-strategy.Beta*(frac-0.5))))
default:
adjusted = baseline
}
// back to wei
f := new(big.Float).Mul(big.NewFloat(adjusted), big.NewFloat(1e9))
out, _ := f.Int(nil)
return out
}
// skipSubmitByFee returns (true, nil) when submission should be skipped right now
// because the blobfee is above target and the timeout window hasnt yet elapsed.
// Otherwise returns (false, err)
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetrics) (bool, error) {
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)
hist, err := r.fetchBlobFeeHistory(windowSec)
if err != nil || len(hist) == 0 {
return false, fmt.Errorf(
"blob-fee history unavailable or empty: %w (history_length=%d)",
err, len(hist),
)
}
// calculate target & get current (in wei)
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1]
currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
// if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf(
"blob-fee above target & window not yet passed; current=%s target=%s age=%s",
current.String(), target.String(), time.Since(oldest),
)
}
// otherwise proceed with submission
return false, nil
}
func addrFromSignerConfig(config *config.SignerConfig) (common.Address, error) {
switch config.SignerType {
case sender.PrivateKeySignerType:

View File

@@ -26,6 +26,12 @@ type l2RelayerMetrics struct {
rollupL2RelayerCommitBlockHeight prometheus.Gauge
rollupL2RelayerCommitThroughput prometheus.Counter
rollupL2RelayerCurrentBlobPrice prometheus.Gauge
rollupL2RelayerTargetBlobPrice prometheus.Gauge
rollupL2RelayerCommitLatency prometheus.Gauge
rollupL2RelayerBacklogCounts prometheus.Gauge
rollupL2RelayerCommitPrice prometheus.Gauge
}
var (
@@ -104,6 +110,26 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_l2_relayer_commit_throughput",
Help: "The cumulative gas used in blocks committed by the L2 relayer",
}),
rollupL2RelayerTargetBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_target_blob_price",
Help: "The target blob price for the L2 relayer's submission strategy",
}),
rollupL2RelayerCurrentBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_current_blob_price",
Help: "The current blob price",
}),
rollupL2RelayerCommitLatency: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_commit_latency",
Help: "The latency of the commit measured from oldest blocktime",
}),
rollupL2RelayerBacklogCounts: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_backlog_counts",
Help: "The number of pending batches in the backlog",
}),
rollupL2RelayerCommitPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_commit_price",
Help: "The commit price for the L2 relayer's submission strategy",
}),
}
})
return l2RelayerMetric

View File

@@ -48,202 +48,186 @@ func testCreateNewRelayer(t *testing.T) {
}
func testL2RelayerProcessPendingBatches(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
var chainConfig *params.ChainConfig
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
} else {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
l2Cfg := cfg.L2Config
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
return nil
})
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
return nil
})
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
relayer.StopSenders()
patchGuard.Reset()
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
assert.NoError(t, err)
relayer.ProcessPendingBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitting, statuses[0])
relayer.StopSenders()
patchGuard.Reset()
}
func testL2RelayerProcessPendingBundles(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
var chainConfig *params.ChainConfig
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2Cfg := cfg.L2Config
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
assert.NoError(t, err)
bundleOrm := orm.NewBundle(db)
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
assert.NoError(t, err)
err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending)
assert.NoError(t, err)
err = bundleOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessPendingBundles()
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(bundles))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
proof := &message.Halo2BundleProof{
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
assert.NoError(t, err)
relayer.ProcessPendingBundles()
bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(bundles))
assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus))
relayer.StopSenders()
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
assert.NoError(t, err)
bundleOrm := orm.NewBundle(db)
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV7)
assert.NoError(t, err)
err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending)
assert.NoError(t, err)
err = bundleOrm.UpdateProvingStatus(context.Background(), bundle.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessPendingBundles()
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(bundles))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
patchGuard := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
return nil
})
defer patchGuard.Reset()
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
assert.NoError(t, err)
relayer.ProcessPendingBundles()
bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(bundles))
assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus))
relayer.StopSenders()
}
func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
for _, codecVersion := range codecVersions {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
var chainConfig *params.ChainConfig
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
assert.NoError(t, err)
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
assert.NoError(t, err)
bundleOrm := orm.NewBundle(db)
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
assert.NoError(t, err)
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
relayer.ProcessPendingBundles()
bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
if bundleErr != nil {
return false
}
bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing &&
types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
if batchErr != nil {
return false
}
batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
if chunkErr != nil {
return false
}
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
return bundleStatus && batchStatus && chunkStatus
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected")
relayer.StopSenders()
batch := &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
assert.NoError(t, err)
bundleOrm := orm.NewBundle(db)
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV7)
assert.NoError(t, err)
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
relayer.ProcessPendingBundles()
bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
if bundleErr != nil {
return false
}
bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing &&
types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
if batchErr != nil {
return false
}
batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
if chunkErr != nil {
return false
}
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
return bundleStatus && batchStatus && chunkStatus
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected")
relayer.StopSenders()
}
func testL2RelayerCommitConfirm(t *testing.T) {
@@ -268,6 +252,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
@@ -326,13 +311,14 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
assert.NoError(t, err)
batchHashes[i] = dbBatch.Hash
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV4)
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV7)
assert.NoError(t, err)
bundleHashes[i] = bundle.Hash
@@ -412,6 +398,7 @@ func testGetBatchStatusByIndex(t *testing.T) {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1, chunk2},
Blocks: []*encoding.Block{block1, block2},
}
batchOrm := orm.NewBatch(db)

View File

@@ -7,11 +7,10 @@ import (
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
func (s *Sender) estimateLegacyGas(to *common.Address, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
func (s *Sender) estimateLegacyGas(to *common.Address, data []byte) (*FeeData, error) {
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
if err != nil {
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
@@ -26,21 +25,17 @@ func (s *Sender) estimateLegacyGas(to *common.Address, data []byte, fallbackGasL
gasLimit, _, err := s.estimateGasLimit(to, data, nil, gasPrice, nil, nil, nil)
if err != nil {
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", s.transactionSigner.GetAddr().String(),
"nonce", s.transactionSigner.GetNonce(), "to address", to.String(), "fallback gas limit", fallbackGasLimit, "error", err)
if fallbackGasLimit == 0 {
return nil, err
}
gasLimit = fallbackGasLimit
} else {
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
"nonce", s.transactionSigner.GetNonce(), "to address", to.String(), "error", err)
return nil, err
}
return &FeeData{
gasPrice: gasPrice,
gasLimit: gasLimit,
gasLimit: gasLimit * 12 / 10, // 20% extra gas to avoid out of gas error
}, nil
}
func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uint64, fallbackGasLimit uint64) (*FeeData, error) {
func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uint64) (*FeeData, error) {
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
@@ -57,16 +52,12 @@ func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uin
if err != nil {
log.Error("estimateDynamicGas estimateGasLimit failure",
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(),
"fallback gas limit", fallbackGasLimit, "error", err)
if fallbackGasLimit == 0 {
return nil, err
}
gasLimit = fallbackGasLimit
} else {
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
"fallback gas limit", "error", err)
return nil, err
}
feeData := &FeeData{
gasLimit: gasLimit,
gasLimit: gasLimit * 12 / 10, // 20% extra gas to avoid out of gas error,
gasTipCap: gasTipCap,
gasFeeCap: gasFeeCap,
}
@@ -76,7 +67,7 @@ func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uin
return feeData, nil
}
func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, baseFee, blobBaseFee uint64, fallbackGasLimit uint64) (*FeeData, error) {
func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *types.BlobTxSidecar, baseFee, blobBaseFee uint64) (*FeeData, error) {
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
log.Error("estimateBlobGas SuggestGasTipCap failure", "error", err)
@@ -93,17 +84,12 @@ func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethT
gasLimit, accessList, err := s.estimateGasLimit(to, data, sidecar, nil, gasTipCap, gasFeeCap, blobGasFeeCap)
if err != nil {
log.Error("estimateBlobGas estimateGasLimit failure",
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(),
"fallback gas limit", fallbackGasLimit, "error", err)
if fallbackGasLimit == 0 {
return nil, err
}
gasLimit = fallbackGasLimit
} else {
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(), "error", err)
return nil, err
}
feeData := &FeeData{
gasLimit: gasLimit,
gasLimit: gasLimit * 12 / 10, // 20% extra gas to avoid out of gas error
gasTipCap: gasTipCap,
gasFeeCap: gasFeeCap,
blobGasFeeCap: blobGasFeeCap,
@@ -115,7 +101,7 @@ func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethT
return feeData, nil
}
func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, gasPrice, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int) (uint64, *types.AccessList, error) {
func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *types.BlobTxSidecar, gasPrice, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int) (uint64, *types.AccessList, error) {
msg := ethereum.CallMsg{
From: s.transactionSigner.GetAddr(),
To: to,

View File

@@ -156,22 +156,22 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
s.confirmCh <- cfm
}
func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, baseFee, blobBaseFee uint64, fallbackGasLimit uint64) (*FeeData, error) {
func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, baseFee, blobBaseFee uint64) (*FeeData, error) {
switch s.config.TxType {
case LegacyTxType:
return s.estimateLegacyGas(target, data, fallbackGasLimit)
return s.estimateLegacyGas(target, data)
case DynamicFeeTxType:
if sidecar == nil {
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
return s.estimateDynamicGas(target, data, baseFee)
}
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit)
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee)
default:
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
}
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob, fallbackGasLimit uint64) (common.Hash, error) {
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob) (common.Hash, uint64, error) {
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
var (
feeData *FeeData
@@ -190,37 +190,37 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
numPendingTransactions, err = s.pendingTransactionOrm.GetCountPendingTransactionsBySenderType(s.ctx, s.senderType)
if err != nil {
log.Error("failed to count pending transactions", "err: %w", err)
return common.Hash{}, fmt.Errorf("failed to count pending transactions, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to count pending transactions, err: %w", err)
}
if numPendingTransactions >= s.config.MaxPendingBlobTxs {
return common.Hash{}, ErrTooManyPendingBlobTxs
return common.Hash{}, 0, ErrTooManyPendingBlobTxs
}
}
sidecar, err = makeSidecar(blobs)
if err != nil {
log.Error("failed to make sidecar for blob transaction", "error", err)
return common.Hash{}, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
}
}
blockNumber, baseFee, blobBaseFee, err := s.getBlockNumberAndBaseFeeAndBlobFee(s.ctx)
if err != nil {
log.Error("failed to get block number and base fee", "error", err)
return common.Hash{}, fmt.Errorf("failed to get block number and base fee, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to get block number and base fee, err: %w", err)
}
if feeData, err = s.getFeeData(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit); err != nil {
if feeData, err = s.getFeeData(target, data, sidecar, baseFee, blobBaseFee); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to get fee data", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "fallback gas limit", fallbackGasLimit, "err", err)
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
log.Error("failed to get fee data", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
return common.Hash{}, 0, fmt.Errorf("failed to get fee data, err: %w", err)
}
signedTx, err := s.createTx(feeData, target, data, sidecar, s.transactionSigner.GetNonce())
if err != nil {
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to create signed tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
return common.Hash{}, fmt.Errorf("failed to create signed transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to create signed transaction, err: %w", err)
}
// Insert the transaction into the pending transaction table.
@@ -228,14 +228,14 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
// This case will be handled by the checkPendingTransaction function.
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), signedTx, blockNumber); err != nil {
log.Error("failed to insert transaction", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to insert transaction, err: %w", err)
}
if err := s.client.SendTransaction(s.ctx, signedTx); err != nil {
// Delete the transaction from the pending transaction table if it fails to send.
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr)
return common.Hash{}, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
return common.Hash{}, 0, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
}
log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err)
@@ -244,12 +244,12 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
if strings.Contains(err.Error(), "nonce too low") {
s.resetNonce(context.Background())
}
return common.Hash{}, fmt.Errorf("failed to send transaction, err: %w", err)
return common.Hash{}, 0, fmt.Errorf("failed to send transaction, err: %w", err)
}
s.transactionSigner.SetNonce(signedTx.Nonce() + 1)
return signedTx.Hash(), nil
return signedTx.Hash(), blobBaseFee, nil
}
func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, nonce uint64) (*gethTypes.Transaction, error) {

View File

@@ -21,7 +21,6 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
@@ -34,7 +33,6 @@ import (
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/mock_bridge"
)
@@ -136,7 +134,6 @@ func TestSender(t *testing.T) {
setupEnv(t)
t.Run("test new sender", testNewSender)
t.Run("test send and retrieve transaction", testSendAndRetrieveTransaction)
t.Run("test fallback gas limit", testFallbackGasLimit)
t.Run("test access list transaction gas limit", testAccessListTransactionGasLimit)
t.Run("test resubmit zero gas price transaction", testResubmitZeroGasPriceTransaction)
t.Run("test resubmit non-zero gas price transaction", testResubmitNonZeroGasPriceTransaction)
@@ -190,7 +187,7 @@ func testSendAndRetrieveTransaction(t *testing.T) {
if txBlob[i] != nil {
blobs = []*kzg4844.Blob{txBlob[i]}
}
hash, err := s.SendTransaction("0", &common.Address{}, nil, blobs, 0)
hash, _, err := s.SendTransaction("0", &common.Address{}, nil, blobs)
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
assert.NoError(t, err)
@@ -214,63 +211,6 @@ func testSendAndRetrieveTransaction(t *testing.T) {
}
}
func testFallbackGasLimit(t *testing.T) {
for i, txType := range txTypes {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
s, err := NewSender(context.Background(), &cfgCopy, signerConfig, "test", "test", types.SenderTypeUnknown, db, nil)
assert.NoError(t, err)
client, err := ethclient.Dial(cfgCopy.Endpoint)
assert.NoError(t, err)
var blobs []*kzg4844.Blob
if txBlob[i] != nil {
blobs = []*kzg4844.Blob{txBlob[i]}
}
// FallbackGasLimit = 0
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, blobs, 0)
assert.NoError(t, err)
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
assert.NoError(t, err)
assert.Greater(t, tx0.Gas(), uint64(0))
assert.Eventually(t, func() bool {
var txs []orm.PendingTransaction
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 100)
assert.NoError(t, err)
return len(txs) == 0
}, 30*time.Second, time.Second)
// FallbackGasLimit = 100000
patchGuard := gomonkey.ApplyPrivateMethod(s, "estimateGasLimit",
func(contract *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, gasPrice, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int) (uint64, *gethTypes.AccessList, error) {
return 0, nil, errors.New("estimateGasLimit error")
},
)
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, blobs, 100000)
assert.NoError(t, err)
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
assert.NoError(t, err)
assert.Equal(t, uint64(100000), tx1.Gas())
assert.Eventually(t, func() bool {
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 100)
assert.NoError(t, err)
return len(txs) == 0
}, 30*time.Second, time.Second)
s.Stop()
patchGuard.Reset()
}
}
func testResubmitZeroGasPriceTransaction(t *testing.T) {
for i, txType := range txTypes {
if txBlob[i] != nil {
@@ -605,10 +545,10 @@ func testResubmitNonceGappedTransaction(t *testing.T) {
if txBlob[i] != nil {
blobs = []*kzg4844.Blob{txBlob[i]}
}
_, err = s.SendTransaction("test-1", &common.Address{}, nil, blobs, 0)
_, _, err = s.SendTransaction("test-1", &common.Address{}, nil, blobs)
assert.NoError(t, err)
_, err = s.SendTransaction("test-2", &common.Address{}, nil, blobs, 0)
_, _, err = s.SendTransaction("test-2", &common.Address{}, nil, blobs)
assert.NoError(t, err)
s.checkPendingTransaction()
@@ -649,7 +589,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
return nil
})
_, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
_, _, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -691,7 +631,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
return nil
})
originTxHash, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
originTxHash, _, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -751,7 +691,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
return nil
})
txHash, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
txHash, _, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -821,7 +761,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
return nil
})
_, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
_, _, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
@@ -895,7 +835,7 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
assert.NoError(t, err)
defer s.Stop()
_, err = s.SendTransaction("0", &testContractsAddress, data, blobs, 0)
_, _, err = s.SendTransaction("0", &testContractsAddress, data, blobs)
assert.NoError(t, err)
var txHash common.Hash
@@ -953,10 +893,10 @@ func testSendBlobCarryingTxOverLimit(t *testing.T) {
assert.NoError(t, err)
for i := 0; i < int(cfgCopy.MaxPendingBlobTxs); i++ {
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1), 0)
_, _, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
assert.NoError(t, err)
}
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1), 0)
_, _, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
assert.ErrorIs(t, err, ErrTooManyPendingBlobTxs)
s.Stop()
}

View File

@@ -29,12 +29,7 @@ type BatchProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
maxUncompressedBatchBytesSize uint64
maxChunksPerBatch int
cfg *config.BatchProposerConfig
replayMode bool
minCodecVersion encoding.CodecVersion
@@ -44,14 +39,10 @@ type BatchProposer struct {
proposeBatchFailureTotal prometheus.Counter
proposeBatchUpdateInfoTotal prometheus.Counter
proposeBatchUpdateInfoFailureTotal prometheus.Counter
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
batchEstimateGasTime prometheus.Gauge
batchEstimateCalldataSizeTime prometheus.Gauge
batchEstimateBlobSizeTime prometheus.Gauge
// total number of times that batch proposer stops early due to compressed data compatibility breach
@@ -63,29 +54,18 @@ type BatchProposer struct {
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Info("new batch proposer",
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec,
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"maxBlobSize", maxBlobSize,
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize)
p := &BatchProposer{
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
maxChunksPerBatch: cfg.MaxChunksPerBatch,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
ctx: ctx,
db: db,
batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
cfg: cfg,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_propose_batch_circle_total",
@@ -107,14 +87,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
Name: "rollup_propose_batch_due_to_compressed_data_compatibility_breach_total",
Help: "Total number of propose batch due to compressed data compatibility breach.",
}),
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_total_l1_commit_gas",
Help: "The total l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_total_l1_commit_blob_size",
Help: "The total l1 commit blob size",
@@ -131,14 +103,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
Name: "rollup_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
}),
batchEstimateGasTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_estimate_gas_time",
Help: "Time taken to estimate gas for the chunk.",
}),
batchEstimateCalldataSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_estimate_calldata_size_time",
Help: "Time taken to estimate calldata size for the chunk.",
}),
batchEstimateBlobSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_batch_estimate_blob_size_time",
Help: "Time taken to estimate blob size for the chunk.",
@@ -197,6 +161,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
}
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
batch.PostL1MessageQueueHash = batch.Chunks[len(batch.Chunks)-1].PostL1MessageQueueHash
log.Info("Batch not compatible with compressed data, removing last chunk", "batch index", batch.Index, "truncated chunk length", len(batch.Chunks))
}
@@ -277,7 +242,7 @@ func (p *BatchProposer) proposeBatch() error {
}
// always take the minimum of the configured max chunks per batch and the codec's max chunks per batch
maxChunksThisBatch := min(codec.MaxNumChunksPerBatch(), p.maxChunksPerBatch)
maxChunksThisBatch := min(codec.MaxNumChunksPerBatch(), p.cfg.MaxChunksPerBatch)
// select at most maxChunkNumPerBatch chunks
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, maxChunksThisBatch)
@@ -319,9 +284,7 @@ func (p *BatchProposer) proposeBatch() error {
for i, chunk := range daChunks {
batch.Chunks = append(batch.Chunks, chunk)
if codec.Version() >= encoding.CodecV7 {
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
}
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
@@ -331,33 +294,21 @@ func (p *BatchProposer) proposeBatch() error {
p.recordTimerBatchMetrics(metrics)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
if metrics.L1CommitBlobSize > maxBlobSize {
if i == 0 {
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize, p.maxUncompressedBatchBytesSize)
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxBlobSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, maxBlobSize)
}
log.Debug("breaking limit condition in batching",
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerBatch,
"l1CommitGas", metrics.L1CommitGas,
"overEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGas", p.maxL1CommitGasPerBatch,
"l1CommitBlobSize", metrics.L1CommitBlobSize,
"maxBlobSize", maxBlobSize,
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
"maxBlobSize", maxBlobSize)
lastChunk := batch.Chunks[len(batch.Chunks)-1]
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
if codec.Version() >= encoding.CodecV7 {
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
}
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
if err != nil {
@@ -374,7 +325,7 @@ func (p *BatchProposer) proposeBatch() error {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == uint64(maxChunksThisBatch) {
if metrics.FirstBlockTimestamp+p.cfg.BatchTimeoutSec < currentTimeSec || metrics.NumChunks == uint64(maxChunksThisBatch) {
log.Info("reached maximum number of chunks in batch or first block timeout",
"chunk count", metrics.NumChunks,
"start block number", dbChunks[0].StartBlockNumber,
@@ -410,17 +361,11 @@ func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, e
}
func (p *BatchProposer) recordAllBatchMetrics(metrics *utils.BatchMetrics) {
p.totalL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.batchChunksNum.Set(float64(metrics.NumChunks))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
p.batchEstimateGasTime.Set(float64(metrics.EstimateGasTime))
p.batchEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
p.batchEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
}
func (p *BatchProposer) recordTimerBatchMetrics(metrics *utils.BatchMetrics) {
p.batchEstimateGasTime.Set(float64(metrics.EstimateGasTime))
p.batchEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
p.batchEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
}

View File

@@ -20,60 +20,24 @@ import (
"scroll-tech/rollup/internal/utils"
)
func testBatchProposerLimitsCodecV4(t *testing.T) {
func testBatchProposerLimitsCodecV7(t *testing.T) {
tests := []struct {
name string
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
batchTimeoutSec uint64
expectedBatchesLen int
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
}{
{
name: "NoLimitReached",
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
name: "NoLimitReached",
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "Timeout",
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 0,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 2,
},
{
name: "MaxL1CommitGasPerBatchIs0",
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerBatchIs0",
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 0,
},
{
name: "MaxL1CommitGasPerBatchIsFirstChunk",
maxL1CommitGas: 249179,
maxL1CommitCalldataSize: 1000000,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
batchTimeoutSec: 1000000000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
}
for _, tt := range tests {
@@ -86,7 +50,6 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
@@ -109,45 +72,32 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL2GasPerChunk: 20000000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, &params.ChainConfig{
MaxBlockNumPerChunk: 1,
MaxL2GasPerChunk: 20000000,
ChunkTimeoutSec: 300,
}, encoding.CodecV7, &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
BatchTimeoutSec: tt.batchTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxChunksPerBatch: math.MaxInt32,
}, encoding.CodecV4, &params.ChainConfig{
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: tt.batchTimeoutSec,
}, encoding.CodecV7, &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
bp.TryProposeBatch()
@@ -173,7 +123,7 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
}
}
func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
@@ -182,7 +132,6 @@ func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
@@ -200,230 +149,104 @@ func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL2GasPerChunk: 20_000_000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
MaxBlockNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
}, encoding.CodecV7, chainConfig, db, nil)
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
blockHeight := uint64(0)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for total := int64(0); total < 90; total++ {
for i := int64(0); i < 30; i++ {
blockHeight++
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = new(big.Int).SetUint64(blockHeight)
block.Header.Time = blockHeight
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
cp.TryProposeChunk()
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 0,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxChunksPerBatch: math.MaxInt32,
}, encoding.CodecV4, &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
}, encoding.CodecV7, chainConfig, db, nil)
for i := 0; i < 2; i++ {
bp.TryProposeBatch()
}
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
batches = batches[1:]
assert.NoError(t, err)
var expectedNumBatches int = 1
var numChunksMultiplier uint64 = 64
assert.Len(t, batches, expectedNumBatches)
for i, batch := range batches {
assert.Equal(t, numChunksMultiplier*(uint64(i)+1), batch.EndChunkIndex)
}
}
func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
assert.NoError(t, err)
var expectedChunkNum uint64 = 45
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
}, encoding.CodecV7, chainConfig, db, nil)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for blockHeight := uint64(1); blockHeight <= 60; blockHeight++ {
block.Header.Number = new(big.Int).SetUint64(blockHeight)
block.Header.Time = blockHeight
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
cp.TryProposeChunk()
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
}, encoding.CodecV7, chainConfig, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
batches = batches[1:]
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
dbBatch := batches[1]
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Len(t, dbChunks, 2)
for _, chunk := range dbChunks {
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
assert.Equal(t, uint64(209350), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
}
func testBatchProposerBlobSizeLimitCodecV4(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
for _, codecVersion := range codecVersions {
db := setupDB(t)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
assert.NoError(t, err)
var chainConfig *params.ChainConfig
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
} else {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, chainConfig, db, nil)
blockHeight := int64(0)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for total := int64(0); total < 90; total++ {
for i := int64(0); i < 30; i++ {
blockHeight++
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = big.NewInt(blockHeight)
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
cp.TryProposeChunk()
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: math.MaxUint64,
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
BatchTimeoutSec: math.MaxUint32,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxChunksPerBatch: math.MaxInt32,
}, encoding.CodecV4, chainConfig, db, nil)
for i := 0; i < 2; i++ {
bp.TryProposeBatch()
}
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
batches = batches[1:]
assert.NoError(t, err)
var expectedNumBatches int
var numChunksMultiplier uint64
if codecVersion == encoding.CodecV4 {
expectedNumBatches = 2
numChunksMultiplier = 45
} else {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
assert.Len(t, batches, expectedNumBatches)
for i, batch := range batches {
assert.Equal(t, numChunksMultiplier*(uint64(i)+1), batch.EndChunkIndex)
}
database.CloseDB(db)
}
}
func testBatchProposerMaxChunkNumPerBatchLimitCodecV4(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
for _, codecVersion := range codecVersions {
db := setupDB(t)
// Add genesis batch.
block := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
assert.NoError(t, err)
var expectedChunkNum uint64
var chainConfig *params.ChainConfig
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
expectedChunkNum = 45
} else {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, chainConfig, db, nil)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for blockHeight := int64(1); blockHeight <= 60; blockHeight++ {
block.Header.Number = big.NewInt(blockHeight)
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
cp.TryProposeChunk()
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: math.MaxUint64,
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
BatchTimeoutSec: math.MaxUint32,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxChunksPerBatch: math.MaxInt32,
}, encoding.CodecV4, chainConfig, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
dbBatch := batches[1]
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
database.CloseDB(db)
}
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
}

View File

@@ -26,8 +26,7 @@ type BundleProposer struct {
batchOrm *orm.Batch
bundleOrm *orm.Bundle
maxBatchNumPerBundle uint64
bundleTimeoutSec uint64
cfg *config.BundleProposerConfig
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -46,15 +45,14 @@ func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, mi
log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec)
p := &BundleProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle,
bundleTimeoutSec: cfg.BundleTimeoutSec,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
cfg: cfg,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_propose_bundle_circle_total",
@@ -132,7 +130,7 @@ func (p *BundleProposer) proposeBundle() error {
}
// select at most maxBlocksThisChunk blocks
maxBatchesThisBundle := p.maxBatchNumPerBundle
maxBatchesThisBundle := p.cfg.MaxBatchNumPerBundle
batches, err := p.batchOrm.GetCommittedBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, p.minCodecVersion, int(maxBatchesThisBundle))
if err != nil {
return err
@@ -161,11 +159,6 @@ func (p *BundleProposer) proposeBundle() error {
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion)
}
if codecVersion == encoding.CodecV5 {
maxBatchesThisBundle = 1
batches = batches[:maxBatchesThisBundle]
}
for i := 1; i < len(batches); i++ {
// Make sure that all batches have been committed.
if len(batches[i].CommitTxHash) == 0 {
@@ -198,8 +191,8 @@ func (p *BundleProposer) proposeBundle() error {
}
currentTimeSec := uint64(time.Now().Unix())
if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec {
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.bundleTimeoutSec, "current time", currentTimeSec)
if firstChunk.StartBlockTime+p.cfg.BundleTimeoutSec < currentTimeSec {
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.cfg.BundleTimeoutSec, "current time", currentTimeSec)
batches, err = p.allBatchesCommittedInSameTXIncluded(batches)
if err != nil {

View File

@@ -23,7 +23,7 @@ import (
"scroll-tech/rollup/internal/utils"
)
func testBundleProposerLimitsCodecV4(t *testing.T) {
func testBundleProposerLimitsCodecV7(t *testing.T) {
tests := []struct {
name string
maxBatchNumPerBundle uint64
@@ -69,7 +69,6 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
@@ -91,27 +90,18 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, chainConfig, db, nil)
MaxBlockNumPerChunk: 1,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
}, encoding.CodecV7, chainConfig, db, nil)
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: math.MaxUint64,
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
BatchTimeoutSec: 0,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, chainConfig, db, nil)
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
}, encoding.CodecV7, chainConfig, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
bap.TryProposeBatch() // batch1 contains chunk1
@@ -121,7 +111,7 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
MaxBatchNumPerBundle: tt.maxBatchNumPerBundle,
BundleTimeoutSec: tt.bundleTimeoutSec,
}, encoding.CodecV4, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, nil)
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
require.NoError(t, err)

View File

@@ -26,15 +26,7 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxBlockNumPerChunk uint64
maxTxNumPerChunk uint64
maxL2GasPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
maxUncompressedBatchBytesSize uint64
cfg *config.ChunkProposerConfig
replayMode bool
minCodecVersion encoding.CodecVersion
@@ -46,15 +38,10 @@ type ChunkProposer struct {
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkTxNum prometheus.Gauge
chunkL2Gas prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalL1CommitBlobSize prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
chunkBlocksProposeNotEnoughTotal prometheus.Counter
chunkEstimateGasTime prometheus.Gauge
chunkEstimateCalldataSizeTime prometheus.Gauge
chunkEstimateBlobSizeTime prometheus.Gauge
// total number of times that chunk proposer stops early due to compressed data compatibility breach
@@ -68,33 +55,19 @@ type ChunkProposer struct {
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Info("new chunk proposer",
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
"maxL2GasPerChunk", cfg.MaxL2GasPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"maxBlobSize", maxBlobSize,
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
"maxBlobSize", maxBlobSize)
p := &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxBlockNumPerChunk: cfg.MaxBlockNumPerChunk,
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
maxL2GasPerChunk: cfg.MaxL2GasPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
cfg: cfg,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_propose_chunk_circle_total",
@@ -124,22 +97,11 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
Name: "rollup_propose_chunk_l2_gas",
Help: "The chunk l2 gas",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_estimate_l1_commit_gas",
Help: "The chunk estimate l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_total_l1_commit_blob_size",
Help: "The total l1 commit blob size",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
}),
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
@@ -152,14 +114,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
Name: "rollup_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
}),
chunkEstimateGasTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_estimate_gas_time",
Help: "Time taken to estimate gas for the chunk.",
}),
chunkEstimateCalldataSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_estimate_calldata_size_time",
Help: "Time taken to estimate calldata size for the chunk.",
}),
chunkEstimateBlobSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_propose_chunk_estimate_blob_size_time",
Help: "Time taken to estimate blob size for the chunk.",
@@ -196,7 +150,7 @@ func (p *ChunkProposer) TryProposeChunk() {
}
func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics *utils.ChunkMetrics) error {
if chunk == nil {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil
}
@@ -221,6 +175,11 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
}
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(chunk.PrevL1MessageQueueHash, chunk.Blocks)
if err != nil {
log.Error("Failed to calculate last L1 message queue hash for block", "block number", chunk.Blocks[0].Header.Number, "err", err)
return err
}
log.Info("Chunk not compatible with compressed data, removing last block", "start block number", chunk.Blocks[0].Header.Number, "truncated block length", len(chunk.Blocks))
}
@@ -239,9 +198,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
p.recordAllChunkMetrics(metrics)
}
if len(chunk.Blocks) > 0 {
p.chunkProposeBlockHeight.Set(float64(chunk.Blocks[len(chunk.Blocks)-1].Header.Number.Uint64()))
}
p.chunkProposeBlockHeight.Set(float64(chunk.Blocks[len(chunk.Blocks)-1].Header.Number.Uint64()))
p.chunkProposeThroughput.Add(float64(chunk.TotalGasUsed()))
p.proposeChunkUpdateInfoTotal.Inc()
@@ -275,7 +232,7 @@ func (p *ChunkProposer) proposeChunk() error {
return err
}
maxBlocksThisChunk := p.maxBlockNumPerChunk
maxBlocksThisChunk := p.cfg.MaxBlockNumPerChunk
// select at most maxBlocksThisChunk blocks
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
@@ -305,53 +262,30 @@ func (p *ChunkProposer) proposeChunk() error {
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion)
}
// Including Curie block in a sole chunk.
if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 {
chunk := encoding.Chunk{Blocks: blocks[:1]}
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
}
p.recordTimerChunkMetrics(metrics)
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
}
if proposed, err := p.tryProposeEuclidTransitionChunk(blocks); proposed || err != nil {
return err
}
var chunk encoding.Chunk
// From CodecV7 / EuclidV2 onwards we need to provide the PrevL1MessageQueueHash and PostL1MessageQueueHash.
// PrevL1MessageQueueHash of the first chunk in the fork needs to be the empty hash.
if codecVersion >= encoding.CodecV7 {
parentChunk, err := p.chunkOrm.GetLatestChunk(p.ctx)
if err != nil || parentChunk == nil {
return fmt.Errorf("failed to get parent chunk: %w", err)
}
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
chunk.PrevL1MessageQueueHash = common.Hash{}
}
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
parentChunk, err := p.chunkOrm.GetLatestChunk(p.ctx)
if err != nil || parentChunk == nil {
return fmt.Errorf("failed to get parent chunk: %w", err)
}
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
chunk.PrevL1MessageQueueHash = common.Hash{}
}
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
var previousPostL1MessageQueueHash common.Hash
chunk.Blocks = make([]*encoding.Block, 0, len(blocks))
for i, block := range blocks {
chunk.Blocks = append(chunk.Blocks, block)
// Compute rolling PostL1MessageQueueHash for the chunk. Each block's L1 messages are applied to the previous
// hash starting from the PrevL1MessageQueueHash for the chunk.
if codecVersion >= encoding.CodecV7 {
previousPostL1MessageQueueHash = chunk.PostL1MessageQueueHash
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(previousPostL1MessageQueueHash, []*encoding.Block{block})
if err != nil {
return fmt.Errorf("failed to calculate last L1 message queue hash for block %d: %w", block.Header.Number.Uint64(), err)
}
previousPostL1MessageQueueHash = chunk.PostL1MessageQueueHash
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(previousPostL1MessageQueueHash, []*encoding.Block{block})
if err != nil {
return fmt.Errorf("failed to calculate last L1 message queue hash for block %d: %w", block.Header.Number.Uint64(), err)
}
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
@@ -361,36 +295,17 @@ func (p *ChunkProposer) proposeChunk() error {
p.recordTimerChunkMetrics(metrics)
overEstimatedL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.TxNum > p.maxTxNumPerChunk ||
metrics.L2Gas > p.maxL2GasPerChunk ||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
metrics.L1CommitBlobSize > maxBlobSize ||
metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
if metrics.L2Gas > p.cfg.MaxL2GasPerChunk || metrics.L1CommitBlobSize > maxBlobSize {
if i == 0 {
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize, p.maxUncompressedBatchBytesSize)
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxBlobSize: %v", block.Header.Number, metrics, maxBlobSize)
}
log.Debug("breaking limit condition in chunking",
"txNum", metrics.TxNum,
"maxTxNum", p.maxTxNumPerChunk,
"l2Gas", metrics.L2Gas,
"maxL2Gas", p.maxL2GasPerChunk,
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerChunk,
"l1CommitGas", metrics.L1CommitGas,
"overEstimatedL1CommitGas", overEstimatedL1CommitGas,
"maxL1CommitGas", p.maxL1CommitGasPerChunk,
"rowConsumption", metrics.CrcMax,
"maxRowConsumption", p.maxRowConsumptionPerChunk,
"maxL2Gas", p.cfg.MaxL2GasPerChunk,
"l1CommitBlobSize", metrics.L1CommitBlobSize,
"maxBlobSize", maxBlobSize,
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
"maxBlobSize", maxBlobSize)
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
chunk.PostL1MessageQueueHash = previousPostL1MessageQueueHash
@@ -411,7 +326,7 @@ func (p *ChunkProposer) proposeChunk() error {
}
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
log.Info("reached maximum number of blocks in chunk or first block timeout",
"block count", len(chunk.Blocks),
"start block number", chunk.Blocks[0].Header.Number,
@@ -431,54 +346,12 @@ func (p *ChunkProposer) proposeChunk() error {
func (p *ChunkProposer) recordAllChunkMetrics(metrics *utils.ChunkMetrics) {
p.chunkTxNum.Set(float64(metrics.TxNum))
p.maxTxConsumption.Set(float64(metrics.CrcMax))
p.chunkBlocksNum.Set(float64(metrics.NumBlocks))
p.chunkL2Gas.Set(float64(metrics.L2Gas))
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
p.chunkEstimateL1CommitGas.Set(float64(metrics.L1CommitGas))
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
p.chunkEstimateGasTime.Set(float64(metrics.EstimateGasTime))
p.chunkEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
p.chunkEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
}
func (p *ChunkProposer) recordTimerChunkMetrics(metrics *utils.ChunkMetrics) {
p.chunkEstimateGasTime.Set(float64(metrics.EstimateGasTime))
p.chunkEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
p.chunkEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
}
func (p *ChunkProposer) tryProposeEuclidTransitionChunk(blocks []*encoding.Block) (bool, error) {
// If we are in replay mode, there is a corner case when StartL2Block is set as 0 in this check,
// it needs to get genesis block, but in mainnet db there is no genesis block, so we need to bypass this check.
if p.replayMode {
return false, nil
}
if !p.chainCfg.IsEuclid(blocks[0].Header.Time) {
return false, nil
}
prevBlocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, blocks[0].Header.Number.Uint64()-1, 1)
if err != nil || len(prevBlocks) == 0 || prevBlocks[0].Header.Hash() != blocks[0].Header.ParentHash {
return false, fmt.Errorf("failed to get parent block: %w", err)
}
if p.chainCfg.IsEuclid(prevBlocks[0].Header.Time) {
// Parent is still Euclid, transition happened already
return false, nil
}
// blocks[0] is Euclid, but parent is not, propose a chunk with only blocks[0]
chunk := encoding.Chunk{Blocks: blocks[:1]}
codecVersion := encoding.CodecV5
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if calcErr != nil {
return false, fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
}
p.recordTimerChunkMetrics(metrics)
if err := p.updateDBChunkInfo(&chunk, codecVersion, metrics); err != nil {
return false, err
}
return true, nil
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common/math"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
@@ -14,119 +15,44 @@ import (
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
)
func testChunkProposerLimitsCodecV4(t *testing.T) {
func testChunkProposerLimitsCodecV7(t *testing.T) {
tests := []struct {
name string
maxBlockNum uint64
maxTxNum uint64
maxL2Gas uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint64
maxRowConsumption uint64
chunkTimeoutSec uint64
expectedChunksLen int
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
}{
{
name: "NoLimitReached",
maxBlockNum: 100,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "NoLimitReached",
maxBlockNum: 100,
maxL2Gas: 20_000_000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "Timeout",
maxBlockNum: 100,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 0,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 2,
},
{
name: "MaxTxNumPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 0,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL2GasPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10,
maxL2Gas: 0,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL1CommitGasPerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerChunkIs0",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxRowConsumptionPerChunkIs0",
maxBlockNum: 100,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
name: "MaxL2GasPerChunkIs0",
maxBlockNum: 10,
maxL2Gas: 0,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 0,
},
{
name: "MaxBlockNumPerChunkIs1",
maxBlockNum: 1,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxTxNumPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 2,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
@@ -136,47 +62,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
// with the first block it exceeds the maxL2GasPerChunk limit.
name: "MaxL2GasPerChunkIsSecondBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 1_153_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitGasPerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 62500,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 60,
maxRowConsumption: 1000000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "MaxRowConsumptionPerChunkIs1",
maxBlockNum: 10,
maxTxNum: 10000,
maxL2Gas: 20_000_000,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
maxRowConsumption: 1,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
@@ -192,21 +78,19 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
// Add genesis chunk.
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxTxNumPerChunk: tt.maxTxNum,
MaxL2GasPerChunk: tt.maxL2Gas,
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
ChunkTimeoutSec: tt.chunkTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxL2GasPerChunk: tt.maxL2Gas,
ChunkTimeoutSec: tt.chunkTimeoutSec,
}, encoding.CodecV7, &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
assert.NoError(t, err)
assert.Len(t, chunks, tt.expectedChunksLen)
@@ -224,61 +108,48 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
}
}
func testChunkProposerBlobSizeLimitCodecV4(t *testing.T) {
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
for _, codecVersion := range codecVersions {
db := setupDB(t)
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for i := int64(0); i < 510; i++ {
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = big.NewInt(i + 1)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
var chainConfig *params.ChainConfig
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
} else {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 255,
MaxTxNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
MaxL1CommitGasPerChunk: math.MaxUint64,
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, chainConfig, db, nil)
for i := 0; i < 2; i++ {
cp.TryProposeChunk()
}
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
func testChunkProposerBlobSizeLimitCodecV7(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
for i := uint64(0); i < 510; i++ {
l2BlockOrm := orm.NewL2Block(db)
block.Header.Number = new(big.Int).SetUint64(i + 1)
block.Header.Time = i + 1
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
assert.NoError(t, err)
}
var expectedNumChunks int = 2
var numBlocksMultiplier uint64
if codecVersion == encoding.CodecV4 {
numBlocksMultiplier = 255
} else {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
assert.Len(t, chunks, expectedNumChunks)
// Add genesis chunk.
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
for i, chunk := range chunks {
expected := numBlocksMultiplier * (uint64(i) + 1)
if expected > 2000 {
expected = 2000
}
assert.Equal(t, expected, chunk.EndBlockNumber)
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 255,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
}, encoding.CodecV7, chainConfig, db, nil)
for i := 0; i < 2; i++ {
cp.TryProposeChunk()
}
chunkOrm = orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
assert.NoError(t, err)
var expectedNumChunks int = 2
var numBlocksMultiplier uint64 = 255
assert.Len(t, chunks, expectedNumChunks)
for i, chunk := range chunks {
expected := numBlocksMultiplier * (uint64(i) + 1)
if expected > 2000 {
expected = 2000
}
database.CloseDB(db)
assert.Equal(t, expected, chunk.EndBlockNumber)
}
}

View File

@@ -92,10 +92,6 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
if err != nil {
return fmt.Errorf("failed to GetBlockByNumberOrHash: %v. number: %v", err, number)
}
if block.RowConsumption == nil && !w.chainCfg.IsEuclid(block.Time()) {
w.metrics.fetchNilRowConsumptionBlockTotal.Inc()
return fmt.Errorf("fetched block does not contain RowConsumption. number: %v", number)
}
var count int
for _, tx := range block.Transactions() {
@@ -110,10 +106,9 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &encoding.Block{
Header: block.Header(),
Transactions: encoding.TxsToTxsData(block.Transactions()),
WithdrawRoot: common.BytesToHash(withdrawRoot),
RowConsumption: block.RowConsumption,
Header: block.Header(),
Transactions: encoding.TxsToTxsData(block.Transactions()),
WithdrawRoot: common.BytesToHash(withdrawRoot),
})
}
@@ -123,11 +118,6 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
if codec == nil {
return fmt.Errorf("failed to retrieve codec for block number %v and time %v", block.Header.Number, block.Header.Time)
}
blockL1CommitCalldataSize, err := codec.EstimateBlockL1CommitCalldataSize(block)
if err != nil {
return fmt.Errorf("failed to estimate block L1 commit calldata size: %v", err)
}
w.metrics.rollupL2BlockL1CommitCalldataSize.Set(float64(blockL1CommitCalldataSize))
w.metrics.rollupL2WatcherSyncThroughput.Add(float64(block.Header.GasUsed))
}
if err := w.l2BlockOrm.InsertL2Blocks(w.ctx, blocks); err != nil {

View File

@@ -8,11 +8,9 @@ import (
)
type l2WatcherMetrics struct {
fetchRunningMissingBlocksTotal prometheus.Counter
fetchRunningMissingBlocksHeight prometheus.Gauge
rollupL2BlocksFetchedGap prometheus.Gauge
rollupL2BlockL1CommitCalldataSize prometheus.Gauge
fetchNilRowConsumptionBlockTotal prometheus.Counter
fetchRunningMissingBlocksTotal prometheus.Counter
fetchRunningMissingBlocksHeight prometheus.Gauge
rollupL2BlocksFetchedGap prometheus.Gauge
rollupL2WatcherSyncThroughput prometheus.Counter
}
@@ -37,14 +35,6 @@ func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
Name: "rollup_l2_watcher_blocks_fetched_gap",
Help: "The gap of l2 fetch",
}),
rollupL2BlockL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_block_l1_commit_calldata_size",
Help: "The l1 commitBatch calldata size of the l2 block",
}),
fetchNilRowConsumptionBlockTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_l2_watcher_fetch_nil_row_consumption_block_total",
Help: "The total number of occurrences where a fetched block has nil RowConsumption",
}),
rollupL2WatcherSyncThroughput: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_l2_watcher_sync_throughput",
Help: "The cumulative gas used in blocks that L2 watcher sync",

View File

@@ -101,17 +101,16 @@ func TestFunction(t *testing.T) {
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
// Run chunk proposer test cases.
t.Run("TestChunkProposerLimitsCodecV4", testChunkProposerLimitsCodecV4)
t.Run("TestChunkProposerBlobSizeLimitCodecV4", testChunkProposerBlobSizeLimitCodecV4)
t.Run("TestChunkProposerLimitsCodecV7", testChunkProposerLimitsCodecV7)
t.Run("TestChunkProposerBlobSizeLimitCodecV7", testChunkProposerBlobSizeLimitCodecV7)
// Run batch proposer test cases.
t.Run("TestBatchProposerLimitsCodecV4", testBatchProposerLimitsCodecV4)
t.Run("TestBatchCommitGasAndCalldataSizeEstimationCodecV4", testBatchCommitGasAndCalldataSizeEstimationCodecV4)
t.Run("TestBatchProposerBlobSizeLimitCodecV4", testBatchProposerBlobSizeLimitCodecV4)
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV4", testBatchProposerMaxChunkNumPerBatchLimitCodecV4)
t.Run("TestBatchProposerLimitsCodecV7", testBatchProposerLimitsCodecV7)
t.Run("TestBatchProposerBlobSizeLimitCodecV7", testBatchProposerBlobSizeLimitCodecV7)
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV7", testBatchProposerMaxChunkNumPerBatchLimitCodecV7)
// Run bundle proposer test cases.
t.Run("TestBundleProposerLimitsCodecV4", testBundleProposerLimitsCodecV4)
t.Run("TestBundleProposerLimitsCodecV7", testBundleProposerLimitsCodecV7)
}
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {

View File

@@ -121,7 +121,7 @@ func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
}
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName string) (message.BatchProof, error) {
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.OpenVMBatchProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Select("proof")
@@ -132,11 +132,11 @@ func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName s
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
proof := message.NewBatchProof(hardForkName)
var proof message.OpenVMBatchProof
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
}
return proof, nil
return &proof, nil
}
// GetLatestBatch retrieves the latest batch from the database.
@@ -218,6 +218,18 @@ func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string)
return statuses, nil
}
func (o *Batch) GetFailedAndPendingBatchesCount(ctx context.Context) (int64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("rollup_status = ? OR rollup_status = ?", types.RollupCommitFailed, types.RollupPending)
var count int64
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Batch.GetFailedAndPendingBatchesCount error: %w", err)
}
return count, nil
}
// GetFailedAndPendingBatches retrieves batches with failed or pending status up to the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetFailedAndPendingBatches(ctx context.Context, limit int) ([]*Batch, error) {
@@ -288,30 +300,28 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
}
newBatch := Batch{
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
DataHash: batchMeta.BatchDataHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
EndChunkIndex: startChunkIndex + numChunks - 1,
StateRoot: batch.StateRoot().Hex(),
WithdrawRoot: batch.WithdrawRoot().Hex(),
ParentBatchHash: batch.ParentBatchHash.Hex(),
BatchHeader: batchMeta.BatchBytes,
CodecVersion: int16(codecVersion),
PrevL1MessageQueueHash: batch.PrevL1MessageQueueHash.Hex(),
PostL1MessageQueueHash: batch.PostL1MessageQueueHash.Hex(),
EnableCompress: enableCompress,
BlobBytes: batchMeta.BlobBytes,
ChallengeDigest: batchMeta.ChallengeDigest.Hex(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
BlobSize: metrics.L1CommitBlobSize,
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
DataHash: batchMeta.BatchDataHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
EndChunkIndex: startChunkIndex + numChunks - 1,
StateRoot: batch.StateRoot().Hex(),
WithdrawRoot: batch.WithdrawRoot().Hex(),
ParentBatchHash: batch.ParentBatchHash.Hex(),
BatchHeader: batchMeta.BatchBytes,
CodecVersion: int16(codecVersion),
PrevL1MessageQueueHash: batch.PrevL1MessageQueueHash.Hex(),
PostL1MessageQueueHash: batch.PostL1MessageQueueHash.Hex(),
EnableCompress: enableCompress,
BlobBytes: batchMeta.BlobBytes,
ChallengeDigest: batchMeta.ChallengeDigest.Hex(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
BlobDataProof: batchMeta.BatchBlobDataProof,
BlobSize: metrics.L1CommitBlobSize,
}
db := o.db
@@ -432,7 +442,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
// UpdateProofByHash updates the batch proof by hash.
// for unit test.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof message.BatchProof, proofTimeSec uint64) error {
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.OpenVMBatchProof, proofTimeSec uint64) error {
proofBytes, err := json.Marshal(proof)
if err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)

View File

@@ -134,7 +134,7 @@ func (o *Bundle) GetFirstPendingBundle(ctx context.Context) (*Bundle, error) {
}
// GetVerifiedProofByHash retrieves the verified aggregate proof for a bundle with the given hash.
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName string) (message.BundleProof, error) {
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.OpenVMBundleProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Select("proof")
@@ -145,11 +145,11 @@ func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
}
proof := message.NewBundleProof(hardForkName)
var proof message.OpenVMBundleProof
if err := json.Unmarshal(bundle.Proof, &proof); err != nil {
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
}
return proof, nil
return &proof, nil
}
// InsertBundle inserts a new bundle into the database.
@@ -256,7 +256,7 @@ func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status typ
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
// only used in unit tests.
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof message.BundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.OpenVMBundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]

View File

@@ -50,14 +50,14 @@ type Chunk struct {
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"` // deprecated
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"` // deprecated
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"` // deprecated
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
@@ -246,8 +246,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: chunk.TotalGasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
TotalL1CommitGas: metrics.L1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
@@ -260,7 +258,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
CodecVersion: int16(codecVersion),
EnableCompress: enableCompress,
ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: metrics.CrcMax,
BlobSize: metrics.L1CommitBlobSize,
}

View File

@@ -71,6 +71,20 @@ func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}
return l1Blocks, nil
}
// GetBlobFeesInRange returns all blob_base_fee values for blocks
// with number ∈ [startBlock..endBlock], ordered by block number ascending.
func (o *L1Block) GetBlobFeesInRange(ctx context.Context, startBlock, endBlock uint64) ([]uint64, error) {
var fees []uint64
db := o.db.WithContext(ctx).
Model(&L1Block{}).
Where("number >= ? AND number <= ?", startBlock, endBlock).
Order("number ASC")
if err := db.Pluck("blob_base_fee", &fees).Error; err != nil {
return nil, fmt.Errorf("L1Block.GetBlobFeesInRange error: %w", err)
}
return fees, nil
}
// InsertL1Blocks batch inserts l1 blocks.
// If there's a block number conflict (e.g., due to reorg), soft deletes the existing block and inserts the new one.
func (o *L1Block) InsertL1Blocks(ctx context.Context, blocks []L1Block) error {

View File

@@ -96,11 +96,6 @@ func (o *L2Block) GetL2BlocksGEHeight(ctx context.Context, height uint64, limit
}
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksGEHeight error: %w", err)
}
blocks = append(blocks, &block)
}
@@ -171,11 +166,6 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
}
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
blocks = append(blocks, &block)
}
@@ -198,12 +188,6 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
rc, err := json.Marshal(block.RowConsumption)
if err != nil {
log.Error("failed to marshal RowConsumption", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
l2Block := L2Block{
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
@@ -214,7 +198,6 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block)
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
RowConsumption: string(rc),
Header: string(header),
}
l2Blocks = append(l2Blocks, l2Block)

View File

@@ -71,12 +71,14 @@ func setupEnv(t *testing.T) {
block1 = &encoding.Block{}
err = json.Unmarshal(templateBlockTrace, block1)
assert.NoError(t, err)
block1.RowConsumption = nil
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
block2 = &encoding.Block{}
err = json.Unmarshal(templateBlockTrace, block2)
assert.NoError(t, err)
block2.RowConsumption = nil
}
func tearDownEnv(t *testing.T) {
@@ -292,7 +294,7 @@ func TestBatchOrm(t *testing.T) {
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
assert.NoError(t, err)
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1, "darwinV2")
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
assert.Error(t, err)
assert.Nil(t, dbProof)
@@ -451,18 +453,16 @@ func TestBundleOrm(t *testing.T) {
})
t.Run("GetVerifiedProofByHash", func(t *testing.T) {
proof := &message.Halo2BundleProof{
RawProof: []byte("test proof"),
}
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
proofBytes, err := json.Marshal(proof)
assert.NoError(t, err)
err = db.Model(&Bundle{}).Where("hash = ?", bundle1.Hash).Update("proof", proofBytes).Error
assert.NoError(t, err)
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash, "darwinV2")
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash)
assert.NoError(t, err)
assert.Equal(t, proof.RawProof, retrievedProof.Proof())
assert.Equal(t, proof.Proof(), retrievedProof.Proof())
})
t.Run("GetBundles", func(t *testing.T) {
@@ -474,9 +474,7 @@ func TestBundleOrm(t *testing.T) {
})
t.Run("UpdateProofAndProvingStatusByHash", func(t *testing.T) {
proof := &message.Halo2BundleProof{
RawProof: []byte("new test proof"),
}
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
err := bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle2.Hash, proof, types.ProvingTaskVerified, 600)
assert.NoError(t, err)
@@ -487,10 +485,10 @@ func TestBundleOrm(t *testing.T) {
assert.Equal(t, int32(600), bundle.ProofTimeSec)
assert.NotNil(t, bundle.ProvedAt)
retrievedProof := message.Halo2BundleProof{}
retrievedProof := &message.OpenVMBundleProof{}
err = json.Unmarshal(bundle.Proof, &retrievedProof)
assert.NoError(t, err)
assert.Equal(t, proof.RawProof, retrievedProof.Proof())
assert.Equal(t, proof.Proof(), retrievedProof.Proof())
})
t.Run("UpdateRollupStatus", func(t *testing.T) {

View File

@@ -13,19 +13,12 @@ type ChunkMetrics struct {
NumBlocks uint64
TxNum uint64
L2Gas uint64
CrcMax uint64
FirstBlockTimestamp uint64
L1CommitCalldataSize uint64
L1CommitGas uint64
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
L1CommitBlobSize uint64
// timing metrics
EstimateGasTime time.Duration
EstimateCalldataSizeTime time.Duration
EstimateBlobSizeTime time.Duration
EstimateBlobSizeTime time.Duration
}
// CalculateChunkMetrics calculates chunk metrics.
@@ -42,34 +35,13 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
}
var err error
metrics.CrcMax, err = chunk.CrcMax()
if err != nil {
return nil, fmt.Errorf("failed to get crc max, version: %v, err: %w", codecVersion, err)
}
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
}
metrics.EstimateGasTime, err = measureTime(func() error {
metrics.L1CommitGas, err = codec.EstimateChunkL1CommitGas(chunk)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas, version: %v, err: %w", codecVersion, err)
}
metrics.EstimateCalldataSizeTime, err = measureTime(func() error {
metrics.L1CommitCalldataSize, err = codec.EstimateChunkL1CommitCalldataSize(chunk)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size, version: %v, err: %w", codecVersion, err)
}
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
_, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
return err
})
if err != nil {
@@ -84,16 +56,10 @@ type BatchMetrics struct {
NumChunks uint64
FirstBlockTimestamp uint64
L1CommitCalldataSize uint64
L1CommitGas uint64
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
L1CommitBlobSize uint64
// timing metrics
EstimateGasTime time.Duration
EstimateCalldataSizeTime time.Duration
EstimateBlobSizeTime time.Duration
EstimateBlobSizeTime time.Duration
}
// CalculateBatchMetrics calculates batch metrics.
@@ -108,24 +74,8 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
}
metrics.EstimateGasTime, err = measureTime(func() error {
metrics.L1CommitGas, err = codec.EstimateBatchL1CommitGas(batch)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to estimate batch L1 commit gas, version: %v, err: %w", codecVersion, err)
}
metrics.EstimateCalldataSizeTime, err = measureTime(func() error {
metrics.L1CommitCalldataSize, err = codec.EstimateBatchL1CommitCalldataSize(batch)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to estimate batch L1 commit calldata size, version: %v, err: %w", codecVersion, err)
}
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
_, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
return err
})
if err != nil {

View File

@@ -3,21 +3,11 @@
"endpoint": "https://rpc.scroll.io",
"chunk_proposer_config": {
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"max_l1_commit_gas_per_chunk": 5000000,
"max_l1_commit_calldata_size_per_chunk": 123740,
"chunk_timeout_sec": 72000000000,
"max_row_consumption_per_chunk": 10000000000,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634693
"chunk_timeout_sec": 72000000000
},
"batch_proposer_config": {
"max_l1_commit_gas_per_batch": 5000000,
"max_l1_commit_calldata_size_per_batch": 123740,
"batch_timeout_sec": 72000000000,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634693,
"max_chunks_per_batch": 45
},
"bundle_proposer_config": {

View File

@@ -208,7 +208,6 @@ func TestFunction(t *testing.T) {
// l1 rollup and watch rollup events
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
t.Run("testCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
t.Run("TestCommitBatchAndFinalizeBundleCodecV7", testCommitBatchAndFinalizeBundleCodecV7)
// l1 gas oracle

View File

@@ -66,9 +66,8 @@ func testImportL1GasPrice(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
Transactions: nil,
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
Transactions: nil,
WithdrawRoot: common.Hash{},
},
},
}
@@ -141,9 +140,8 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
Transactions: nil,
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
Transactions: nil,
WithdrawRoot: common.Hash{},
},
},
}

View File

@@ -19,7 +19,7 @@ func testProcessStart(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
rollupApp.RunApp(t, cutils.GasOracleApp, "--genesis", "../conf/genesis.json")
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "7")
rollupApp.WaitExit()
}
@@ -36,7 +36,7 @@ func testProcessStartEnableMetrics(t *testing.T) {
port, err = rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err)
svrPort = strconv.FormatInt(port.Int64()+30000, 10)
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "7")
rollupApp.WaitExit()
}

View File

@@ -18,7 +18,6 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/relayer"
@@ -55,170 +54,6 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus))
}
func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
db := setupDB(t)
prepareContracts(t)
euclidTime := uint64(3)
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: &euclidTime}
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
// add some blocks to db
var blocks []*encoding.Block
for i := int64(0); i < 10; i++ {
header := gethTypes.Header{
Number: big.NewInt(i + 1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
Root: common.HexToHash("0x1"),
Time: uint64(i),
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: nil,
WithdrawRoot: common.HexToHash("0x2"),
RowConsumption: &gethTypes.RowConsumption{},
})
}
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV4, chainConfig, db, nil)
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxChunksPerBatch: math.MaxInt32,
}, encoding.CodecV4, chainConfig, db, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
MaxBatchNumPerBundle: 1000000,
BundleTimeoutSec: 300,
}, encoding.CodecV4, chainConfig, db, nil)
l2BlockOrm := orm.NewL2Block(db)
batchOrm := orm.NewBatch(db)
bundleOrm := orm.NewBundle(db)
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5])
assert.NoError(t, err)
cp.TryProposeChunk()
bap.TryProposeBatch()
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[5:])
assert.NoError(t, err)
cp.TryProposeChunk()
bap.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
// make sure that batches are committed before proposing bundles (as bundle proposing depends on batches being committed).
require.Eventually(t, func() bool {
batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
assert.NoError(t, getErr)
assert.Len(t, batches, 3)
batches = batches[1:]
for _, batch := range batches {
if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) {
return false
}
}
// make sure that batches 1 and 2 have been committed in separate transactions
return batches[0].CommitTxHash != batches[1].CommitTxHash
}, 30*time.Second, time.Second)
bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3.
batchProof := &message.Halo2BatchProof{
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
assert.NoError(t, err)
batches = batches[1:]
for _, batch := range batches {
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 100)
assert.NoError(t, err)
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
assert.NoError(t, err)
}
bundleProof := &message.Halo2BundleProof{
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
assert.NoError(t, err)
for _, bundle := range bundles {
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100)
assert.NoError(t, err)
}
assert.Eventually(t, func() bool {
l2Relayer.ProcessPendingBundles()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
assert.NoError(t, err)
assert.Len(t, batches, 3)
batches = batches[1:]
for _, batch := range batches {
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
return false
}
assert.NotEmpty(t, batch.FinalizeTxHash)
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
assert.NoError(t, getErr)
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
}
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
assert.NoError(t, err)
assert.Len(t, bundles, 1)
bundle := bundles[0]
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
return false
}
assert.NotEmpty(t, bundle.FinalizeTxHash)
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
assert.NoError(t, err)
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2)
for _, batch := range batches {
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
}
return true
}, 30*time.Second, time.Second)
l2Relayer.StopSenders()
database.CloseDB(db)
}
func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
db := setupDB(t)
@@ -275,29 +110,22 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
}
blocks = append(blocks, &encoding.Block{
Header: &header,
Transactions: transactions,
WithdrawRoot: common.HexToHash("0x2"),
RowConsumption: &gethTypes.RowConsumption{},
Header: &header,
Transactions: transactions,
WithdrawRoot: common.HexToHash("0x2"),
})
parentHash = header.Hash()
}
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxBlockNumPerChunk: 100,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 300,
}, encoding.CodecV7, chainConfig, db, nil)
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
}, encoding.CodecV7, chainConfig, db, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{

510
zkvm-prover/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@ serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.3.0", package = "scroll-zkvm-prover" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.4.2", package = "scroll-zkvm-prover" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "main", features = [
@@ -51,28 +51,29 @@ openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gp
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.1.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
], tag = "v0.2.0" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }