mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 15:38:18 -05:00
Compare commits
21 Commits
libzkp/deb
...
fix/handle
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0e6f9d14f5 | ||
|
|
96d1a247a1 | ||
|
|
1985e54ab3 | ||
|
|
bfc0fdd7ce | ||
|
|
426c57a5fa | ||
|
|
b7fdf48c30 | ||
|
|
ad0c918944 | ||
|
|
1098876183 | ||
|
|
9e520e7769 | ||
|
|
de7f6e56a9 | ||
|
|
3b323198dc | ||
|
|
c11e0283e8 | ||
|
|
a5a7844646 | ||
|
|
7ff5b190ec | ||
|
|
b297edd28d | ||
|
|
47c85d4983 | ||
|
|
1552e98b79 | ||
|
|
a65b3066a3 | ||
|
|
1f2b397bbd | ||
|
|
ae791a0714 | ||
|
|
c012f7132d |
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-02-14
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-02-14
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
18
.github/workflows/docker.yml
vendored
18
.github/workflows/docker.yml
vendored
@@ -51,9 +51,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup_relayer:
|
||||
runs-on:
|
||||
@@ -97,9 +95,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on:
|
||||
@@ -143,9 +139,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on:
|
||||
@@ -189,9 +183,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on:
|
||||
@@ -235,9 +227,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on:
|
||||
@@ -281,9 +271,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on:
|
||||
@@ -327,9 +315,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on:
|
||||
@@ -372,9 +358,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-cron:
|
||||
runs-on:
|
||||
@@ -418,6 +402,4 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
9
.github/workflows/intermediate-docker.yml
vendored
9
.github/workflows/intermediate-docker.yml
vendored
@@ -22,11 +22,9 @@ on:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
- nightly-2025-02-14
|
||||
default: "nightly-2023-12-03"
|
||||
- nightly-2025-08-18
|
||||
default: "nightly-2025-08-18"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
required: false
|
||||
@@ -41,7 +39,8 @@ on:
|
||||
options:
|
||||
- "11.7.1"
|
||||
- "12.2.2"
|
||||
default: "11.7.1"
|
||||
- "12.9.1"
|
||||
default: "12.9.1"
|
||||
CARGO_CHEF_TAG:
|
||||
description: "Cargo chef version"
|
||||
required: true
|
||||
|
||||
2428
Cargo.lock
generated
2428
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
43
Cargo.toml
43
Cargo.toml
@@ -14,15 +14,17 @@ edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.8"
|
||||
version = "4.5.47"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-prover" }
|
||||
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-verifier" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1" }
|
||||
# include compatiblity fixing from "fix/coordinator"
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "a71dd2b" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "a71dd2b" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "a71dd2b" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll"] }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
@@ -30,7 +32,7 @@ metrics-tracing-context = "0.16.0"
|
||||
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.2", default-features = false, features = ["tiny-keccak"] }
|
||||
alloy-primitives = { version = "1.3", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
@@ -46,21 +48,20 @@ once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
[patch.crates-io]
|
||||
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm" }
|
||||
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "feat/rkyv" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
L2GETH_TAG=scroll-v5.9.7
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -10,15 +10,15 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251017081611-2bc7a5482dcc // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
@@ -30,10 +30,10 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/consensys/bavard v0.1.27 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
@@ -41,7 +41,7 @@ require (
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -98,7 +98,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
@@ -110,7 +110,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
||||
@@ -53,16 +53,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
|
||||
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -88,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -214,8 +214,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178 h1:4utngmJHXSOS5FoSdZhEV1xMRirpArbXvyoCZY9nYj0=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017081611-2bc7a5482dcc h1:zSO+VMyzmEVezVuMC7jZ9PcvihwmrlKt+7cyv9rpq2s=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017081611-2bc7a5482dcc/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -341,10 +341,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
@@ -387,8 +387,8 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
||||
@@ -38,6 +38,7 @@ type FetcherConfig struct {
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
AwsS3Endpoint string `json:"AwsS3Endpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -39,6 +39,9 @@ type L1MessageFetcher struct {
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.AwsS3Endpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
|
||||
}
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
|
||||
@@ -15,7 +15,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -64,7 +64,7 @@ require (
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
@@ -79,7 +79,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -198,7 +198,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
|
||||
@@ -155,8 +155,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -214,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178 h1:4utngmJHXSOS5FoSdZhEV1xMRirpArbXvyoCZY9nYj0=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63 h1:xuqdhD4w/zcI5T8Ty1wHvqB75P2HNg3jTH/kUEHGt9Y=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -707,8 +707,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
|
||||
@@ -135,10 +135,18 @@ type BlockContextV2 struct {
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Metric data carried with OpenVMProof
|
||||
type OpenVMProofStat struct {
|
||||
TotalCycle uint64 `json:"total_cycles"`
|
||||
ExecutionTimeMills uint64 `json:"execution_time_mills"`
|
||||
ProvingTimeMills uint64 `json:"proving_time_mills"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Stat *OpenVMProofStat `json:"stat,omitempty"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
@@ -150,7 +158,8 @@ type OpenVMEvmProof struct {
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
TotalGasUsed uint64 `json:"chunk_total_gas"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.38"
|
||||
var tag = "v4.6.1"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -34,6 +34,14 @@ coordinator_cron:
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
cp -r $(PWD)/conf/config.json $(PWD)/build/bin/conf/config.template.json
|
||||
@echo "Setting up releases..."
|
||||
cd $(PWD)/build && bash setup_releases.sh
|
||||
|
||||
|
||||
#coordinator_api_skip_libzkp:
|
||||
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
|
||||
63
coordinator/build/setup_releases.sh
Normal file
63
coordinator/build/setup_releases.sh
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
echo "SCROLL_ZKVM_VERSION not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# set ASSET_DIR by reading from config.json
|
||||
CONFIG_FILE="bin/conf/config.template.json"
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo "Config file $CONFIG_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get the number of verifiers in the array
|
||||
VERIFIER_COUNT=$(jq -r '.prover_manager.verifier.verifiers | length' "$CONFIG_FILE")
|
||||
|
||||
if [ "$VERIFIER_COUNT" = "null" ] || [ "$VERIFIER_COUNT" -eq 0 ]; then
|
||||
echo "No verifiers found in config file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found $VERIFIER_COUNT verifier(s) in config"
|
||||
|
||||
# iterate through each verifier entry
|
||||
for ((i=0; i<$VERIFIER_COUNT; i++)); do
|
||||
# extract assets_path for current verifier
|
||||
ASSETS_PATH=$(jq -r ".prover_manager.verifier.verifiers[$i].assets_path" "$CONFIG_FILE")
|
||||
FORK_NAME=$(jq -r ".prover_manager.verifier.verifiers[$i].fork_name" "$CONFIG_FILE")
|
||||
|
||||
if [ "$ASSETS_PATH" = "null" ]; then
|
||||
echo "Warning: Could not find assets_path for verifier $i, skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Processing verifier $i ($FORK_NAME): assets_path=$ASSETS_PATH"
|
||||
|
||||
# check if it's an absolute path (starts with /)
|
||||
if [[ "$ASSETS_PATH" = /* ]]; then
|
||||
# absolute path, use as is
|
||||
ASSET_DIR="$ASSETS_PATH"
|
||||
else
|
||||
# relative path, prefix with "bin/"
|
||||
ASSET_DIR="bin/$ASSETS_PATH"
|
||||
fi
|
||||
|
||||
echo "Using ASSET_DIR: $ASSET_DIR"
|
||||
|
||||
# create directory if it doesn't exist
|
||||
mkdir -p "$ASSET_DIR"
|
||||
|
||||
# assets for verifier-only mode
|
||||
echo "Downloading assets for $FORK_NAME to $ASSET_DIR..."
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
|
||||
|
||||
echo "Completed downloading assets for $FORK_NAME"
|
||||
echo "---"
|
||||
done
|
||||
|
||||
echo "All verifier assets downloaded successfully"
|
||||
@@ -12,7 +12,7 @@
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2"
|
||||
},
|
||||
},
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "feynman"
|
||||
|
||||
@@ -9,8 +9,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -54,11 +54,11 @@ require (
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
@@ -92,7 +92,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
|
||||
@@ -47,8 +47,8 @@ github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -59,8 +59,8 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -253,10 +253,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178 h1:4utngmJHXSOS5FoSdZhEV1xMRirpArbXvyoCZY9nYj0=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63 h1:xuqdhD4w/zcI5T8Ty1wHvqB75P2HNg3jTH/kUEHGt9Y=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -282,8 +282,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
|
||||
@@ -57,14 +57,16 @@ type Config struct {
|
||||
|
||||
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
|
||||
type AssetConfig struct {
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ForkName string `json:"fork_name"`
|
||||
Vkfile string `json:"vk_file,omitempty"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ForkName string `json:"fork_name"`
|
||||
Vkfile string `json:"vk_file,omitempty"`
|
||||
MinProverVersion string `json:"min_prover_version,omitempty"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Features string `json:"features,omitempty"`
|
||||
Verifiers []AssetConfig `json:"verifiers"`
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
proverTaskManager *provertask.ProverTaskManager
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
|
||||
@@ -32,12 +33,15 @@ type GetTaskController struct {
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, verifier *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
proverTaskManager := provertask.NewProverTaskManager(db)
|
||||
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, verifier.ChunkVk, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, verifier.BatchVk, reg)
|
||||
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, verifier.BundleVk, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
proverTaskManager: proverTaskManager,
|
||||
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_access_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
@@ -99,7 +103,19 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
proofType := ptc.proofType(&getTaskParameter)
|
||||
assigned, err := ptc.proverTaskManager.CheckParameter(ctx)
|
||||
if err != nil {
|
||||
nerr := fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
var proofType message.ProofType
|
||||
if assigned != nil {
|
||||
proofType = message.ProofType(assigned.TaskType)
|
||||
} else {
|
||||
proofType = ptc.proofType(&getTaskParameter)
|
||||
}
|
||||
proverTask, isExist := ptc.proverTasks[proofType]
|
||||
if !isExist {
|
||||
nerr := fmt.Errorf("parameter wrong proof type:%v", proofType)
|
||||
|
||||
@@ -24,18 +24,16 @@ type LoginLogic struct {
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string][]string
|
||||
proverVersionHardForkMap map[string]string
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
proverVersionHardForkMap := make(map[string]string)
|
||||
|
||||
var hardForks []string
|
||||
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
|
||||
hardForks = append(hardForks, cfg.ForkName)
|
||||
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
|
||||
}
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.MinProverVersion] = hardForks
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
@@ -101,9 +99,15 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
|
||||
}
|
||||
|
||||
proverVersion := proverVersionSplits[0]
|
||||
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
var hardForkNames []string
|
||||
for n, minVersion := range l.proverVersionHardForkMap {
|
||||
if minVersion == "" || version.CheckScrollRepoVersion(proverVersion, minVersion) {
|
||||
hardForkNames = append(hardForkNames, n)
|
||||
}
|
||||
}
|
||||
if len(hardForkNames) == 0 {
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
}
|
||||
|
||||
@@ -140,3 +140,27 @@ func DumpVk(forkName, filePath string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set dynamic feature flags that control libzkp runtime behavior
|
||||
func SetDynamicFeature(feats string) {
|
||||
cFeats := goToCString(feats)
|
||||
defer freeCString(cFeats)
|
||||
C.set_dynamic_feature(cFeats)
|
||||
}
|
||||
|
||||
// UnivTaskCompatibilityFix calls the universal task compatibility fix function
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
cTaskJSON := goToCString(taskJSON)
|
||||
defer freeCString(cTaskJSON)
|
||||
|
||||
resultPtr := C.univ_task_compatibility_fix(cTaskJSON)
|
||||
if resultPtr == nil {
|
||||
return "", fmt.Errorf("univ_task_compatibility_fix failed")
|
||||
}
|
||||
|
||||
// Convert result to Go string and free C memory
|
||||
result := C.GoString(resultPtr)
|
||||
C.release_string(resultPtr)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -54,4 +54,9 @@ char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_le
|
||||
// Release memory allocated for a string returned by gen_wrapped_proof
|
||||
void release_string(char* string_ptr);
|
||||
|
||||
void set_dynamic_feature(const char* feats);
|
||||
|
||||
// Universal task compatibility fix function
|
||||
char* univ_task_compatibility_fix(char* task_json);
|
||||
|
||||
#endif /* LIBZKP_H */
|
||||
|
||||
@@ -39,15 +39,14 @@ type BatchProverTask struct {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BatchProverTask {
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_get_task_total",
|
||||
@@ -60,9 +59,9 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
taskCtx := bp.checkParameter(ctx)
|
||||
if taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter missed")
|
||||
}
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
@@ -213,6 +212,14 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
proverTask.Metadata = metadata
|
||||
|
||||
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
|
||||
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
|
||||
if err := fixCompatibility(taskMsg); err != nil {
|
||||
log.Error("apply compatibility failure", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
|
||||
@@ -36,16 +36,15 @@ type BundleProverTask struct {
|
||||
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BundleProverTask {
|
||||
bp := &BundleProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
chainCfg: chainCfg,
|
||||
cfg: cfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
db: db,
|
||||
chainCfg: chainCfg,
|
||||
cfg: cfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
bundleTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_bundle_get_task_total",
|
||||
@@ -58,9 +57,9 @@ func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *g
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
taskCtx := bp.checkParameter(ctx)
|
||||
if taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter missed")
|
||||
}
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
@@ -211,6 +210,14 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
// bundle proof require snark
|
||||
taskMsg.UseSnark = true
|
||||
proverTask.Metadata = metadata
|
||||
|
||||
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
|
||||
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
|
||||
if err := fixCompatibility(taskMsg); err != nil {
|
||||
log.Error("apply compatibility failure", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
|
||||
@@ -36,14 +36,13 @@ type ChunkProverTask struct {
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *ChunkProverTask {
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_get_task_total",
|
||||
@@ -56,9 +55,9 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
taskCtx := cp.checkParameter(ctx)
|
||||
if taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter missed")
|
||||
}
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/libzkp"
|
||||
@@ -36,6 +37,81 @@ type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
// ProverTaskManager manage task which has been assigned
|
||||
type ProverTaskManager struct {
|
||||
proverTaskOrm *orm.ProverTask
|
||||
proverBlockListOrm *orm.ProverBlockList
|
||||
}
|
||||
|
||||
const proverTaskCtxKey = "prover_task_context_key"
|
||||
|
||||
// NewProverTaskManager new a prover task manager
|
||||
func NewProverTaskManager(db *gorm.DB) *ProverTaskManager {
|
||||
return &ProverTaskManager{
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
proverBlockListOrm: orm.NewProverBlockList(db),
|
||||
}
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
func (b *ProverTaskManager) CheckParameter(ctx *gin.Context) (*orm.ProverTask, error) {
|
||||
var ptc proverTaskContext
|
||||
ptc.HardForkNames = make(map[string]struct{})
|
||||
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return nil, errors.New("get public key from context failed")
|
||||
}
|
||||
ptc.PublicKey = publicKey.(string)
|
||||
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return nil, errors.New("get prover name from context failed")
|
||||
}
|
||||
ptc.ProverName = proverName.(string)
|
||||
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return nil, errors.New("get prover version from context failed")
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
|
||||
if !ProverProviderTypeExist {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
ProverProviderType = float64(coordinatorType.ProverProviderTypeInternal)
|
||||
}
|
||||
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
|
||||
|
||||
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, errors.New("get hard fork name from context failed")
|
||||
}
|
||||
hardForkNames := strings.Split(hardForkNamesStr.(string), ",")
|
||||
for _, hardForkName := range hardForkNames {
|
||||
ptc.HardForkNames[hardForkName] = struct{}{}
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
}
|
||||
if isBlocked {
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
|
||||
assigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
|
||||
}
|
||||
|
||||
ptc.hasAssignedTask = assigned
|
||||
|
||||
ctx.Set(proverTaskCtxKey, &ptc)
|
||||
|
||||
return assigned, nil
|
||||
}
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
@@ -43,12 +119,12 @@ type BaseProverTask struct {
|
||||
db *gorm.DB
|
||||
expectedVk map[string][]byte
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
bundleOrm *orm.Bundle
|
||||
blockOrm *orm.L2Block
|
||||
proverTaskOrm *orm.ProverTask
|
||||
proverBlockListOrm *orm.ProverBlockList
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
bundleOrm *orm.Bundle
|
||||
blockOrm *orm.L2Block
|
||||
|
||||
proverTaskOrm *orm.ProverTask
|
||||
}
|
||||
|
||||
type proverTaskContext struct {
|
||||
@@ -131,59 +207,13 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, error) {
|
||||
var ptc proverTaskContext
|
||||
ptc.HardForkNames = make(map[string]struct{})
|
||||
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return nil, errors.New("get public key from context failed")
|
||||
}
|
||||
ptc.PublicKey = publicKey.(string)
|
||||
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return nil, errors.New("get prover name from context failed")
|
||||
}
|
||||
ptc.ProverName = proverName.(string)
|
||||
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return nil, errors.New("get prover version from context failed")
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
|
||||
if !ProverProviderTypeExist {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
ProverProviderType = float64(coordinatorType.ProverProviderTypeInternal)
|
||||
}
|
||||
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
|
||||
|
||||
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, errors.New("get hard fork name from context failed")
|
||||
}
|
||||
hardForkNames := strings.Split(hardForkNamesStr.(string), ",")
|
||||
for _, hardForkName := range hardForkNames {
|
||||
ptc.HardForkNames[hardForkName] = struct{}{}
|
||||
func (b *BaseProverTask) checkParameter(ctx *gin.Context) *proverTaskContext {
|
||||
pctx, exist := ctx.Get(proverTaskCtxKey)
|
||||
if !exist {
|
||||
return nil
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
}
|
||||
if isBlocked {
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
|
||||
assigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
|
||||
}
|
||||
|
||||
ptc.hasAssignedTask = assigned
|
||||
return &ptc, nil
|
||||
return pctx.(*proverTaskContext)
|
||||
}
|
||||
|
||||
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
|
||||
@@ -201,6 +231,23 @@ func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (
|
||||
return schema, []byte(metadata), nil
|
||||
}
|
||||
|
||||
const CompatibilityVersion = "4.5.43"
|
||||
|
||||
func isCompatibilityFixingVersion(ver string) bool {
|
||||
return !version.CheckScrollRepoVersion(ver, CompatibilityVersion)
|
||||
}
|
||||
|
||||
func fixCompatibility(schema *coordinatorType.GetTaskSchema) error {
|
||||
|
||||
fixedTask, err := libzkp.UniversalTaskCompatibilityFix(schema.TaskData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
schema.TaskData = fixedTask
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
|
||||
@@ -71,6 +71,9 @@ type ProofReceiverLogic struct {
|
||||
validateFailureProverTaskStatusNotOk prometheus.Counter
|
||||
validateFailureProverTaskTimeout prometheus.Counter
|
||||
validateFailureProverTaskHaveVerifier prometheus.Counter
|
||||
proverSpeed *prometheus.GaugeVec
|
||||
provingTime prometheus.Gauge
|
||||
evmCyclePerGas prometheus.Gauge
|
||||
|
||||
ChunkTask provertask.ProverTask
|
||||
BundleTask provertask.ProverTask
|
||||
@@ -79,6 +82,7 @@ type ProofReceiverLogic struct {
|
||||
|
||||
// NewSubmitProofReceiverLogic create a proof receiver logic
|
||||
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
|
||||
|
||||
return &ProofReceiverLogic{
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -133,6 +137,18 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
|
||||
Name: "coordinator_validate_failure_submit_have_been_verifier",
|
||||
Help: "Total number of submit proof validate failure proof have been verifier.",
|
||||
}),
|
||||
evmCyclePerGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "evm_circuit_cycle_per_gas",
|
||||
Help: "VM cycles cost for a gas unit cost in evm execution",
|
||||
}),
|
||||
provingTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "chunk_proving_time",
|
||||
Help: "Wall clock time for chunk proving in second",
|
||||
}),
|
||||
proverSpeed: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "prover_speed",
|
||||
Help: "Cycle against running time of prover (in mhz)",
|
||||
}, []string{"type", "phase"}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,12 +220,34 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
|
||||
if stat := chunkProof.VmProof.Stat; stat != nil {
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("chunk", "exec"); g != nil && stat.ExecutionTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ExecutionTimeMills*1000))
|
||||
}
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("chunk", "proving"); g != nil && stat.ProvingTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ProvingTimeMills*1000))
|
||||
}
|
||||
if chunkProof.MetaData.TotalGasUsed > 0 {
|
||||
cycle_per_gas := float64(stat.TotalCycle) / float64(chunkProof.MetaData.TotalGasUsed)
|
||||
m.evmCyclePerGas.Set(cycle_per_gas)
|
||||
}
|
||||
m.provingTime.Set(float64(stat.ProvingTimeMills) / 1000)
|
||||
}
|
||||
|
||||
case message.ProofTypeBatch:
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
|
||||
if stat := batchProof.VmProof.Stat; stat != nil {
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("batch", "exec"); g != nil && stat.ExecutionTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ExecutionTimeMills*1000))
|
||||
}
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("batch", "proving"); g != nil && stat.ProvingTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ProvingTimeMills*1000))
|
||||
}
|
||||
}
|
||||
case message.ProofTypeBundle:
|
||||
bundleProof := &message.OpenVMBundleProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ package verifier
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -66,6 +67,9 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Features != "" {
|
||||
libzkp.SetDynamicFeature(cfg.Features)
|
||||
}
|
||||
libzkp.InitVerifier(string(configBytes))
|
||||
|
||||
v := &Verifier{
|
||||
@@ -129,6 +133,23 @@ const blocked_vks = `
|
||||
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
|
||||
`
|
||||
|
||||
// tries to decode s as hex, and if that fails, as base64.
|
||||
func decodeVkString(s string) ([]byte, error) {
|
||||
// Try hex decoding first
|
||||
if b, err := hex.DecodeString(s); err == nil {
|
||||
return b, nil
|
||||
}
|
||||
// Fallback to base64 decoding
|
||||
b, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil, fmt.Errorf("decode vk string %s fail (empty bytes)", s)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
|
||||
|
||||
vkFileName := cfg.Vkfile
|
||||
@@ -165,17 +186,17 @@ func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
|
||||
v.OpenVMVkMap[dump.Bundle] = struct{}{}
|
||||
log.Info("Load vks", "from", cfg.AssetsPath, "chunk", dump.Chunk, "batch", dump.Batch, "bundle", dump.Bundle)
|
||||
|
||||
decodedBytes, err := base64.StdEncoding.DecodeString(dump.Chunk)
|
||||
decodedBytes, err := decodeVkString(dump.Chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.ChunkVk[cfg.ForkName] = decodedBytes
|
||||
decodedBytes, err = base64.StdEncoding.DecodeString(dump.Batch)
|
||||
decodedBytes, err = decodeVkString(dump.Batch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.BatchVk[cfg.ForkName] = decodedBytes
|
||||
decodedBytes, err = base64.StdEncoding.DecodeString(dump.Bundle)
|
||||
decodedBytes, err = decodeVkString(dump.Bundle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.4.89"
|
||||
version.Version = "v4.5.45"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -234,7 +234,7 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
err := proverBlockListOrm.InsertProverPublicKey(context.Background(), chunkProver.proverName, chunkProver.publicKey())
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
expectedErr := fmt.Errorf("check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
@@ -255,7 +255,7 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
expectedErr = fmt.Errorf("check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
@@ -584,7 +584,8 @@ func testTimeoutProof(t *testing.T) {
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
TotalGasUsed uint64 `json:"chunk_total_gas"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
|
||||
[patch."https://github.com/openvm-org/openvm.git"]
|
||||
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.2.1" }
|
||||
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
11002
crates/gpu_override/Cargo.lock
generated
11002
crates/gpu_override/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
||||
.PHONY: build update clean
|
||||
|
||||
ZKVM_COMMIT ?= freebuild
|
||||
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
|
||||
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
|
||||
|
||||
GIT_REV ?= $(shell git rev-parse --short HEAD)
|
||||
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
|
||||
$(info ZK_GPU_VERSION is ${ZK_VERSION})
|
||||
|
||||
clean:
|
||||
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
|
||||
# build gpu prover, never touch lock file
|
||||
build:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
|
||||
# update Cargo.lock while override config has been updated
|
||||
#update:
|
||||
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_file=.cargo/config.toml
|
||||
plonky3_gpu_path=$(grep 'path.*plonky3-gpu' "$config_file" | cut -d'"' -f2 | head -n 1)
|
||||
plonky3_gpu_path=$(dirname "$plonky3_gpu_path")
|
||||
|
||||
if [ -z $plonky3_gpu_path ]; then
|
||||
exit 0
|
||||
else
|
||||
pushd $plonky3_gpu_path
|
||||
commit_hash=$(git log --pretty=format:%h -n 1)
|
||||
echo "${commit_hash:0:7}"
|
||||
|
||||
popd
|
||||
fi
|
||||
@@ -13,6 +13,7 @@ libzkp = { path = "../libzkp" }
|
||||
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
|
||||
sbv-primitives = { workspace = true, features = ["scroll"] }
|
||||
sbv-utils = { workspace = true, features = ["scroll"] }
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
|
||||
eyre.workspace = true
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ pub fn init(config: &str) -> eyre::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_client() -> rpc_client::RpcClient<'static> {
|
||||
pub fn get_client() -> impl libzkp::tasks::ChunkInterpreter {
|
||||
GLOBAL_L2GETH_CLI
|
||||
.get()
|
||||
.expect("must has been inited")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use alloy::{
|
||||
providers::{Provider, ProviderBuilder, RootProvider},
|
||||
providers::{Provider, ProviderBuilder},
|
||||
rpc::client::ClientBuilder,
|
||||
transports::layers::RetryBackoffLayer,
|
||||
};
|
||||
@@ -49,13 +49,13 @@ pub struct RpcConfig {
|
||||
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
|
||||
pub struct RpcClientCore {
|
||||
/// rpc prover
|
||||
provider: RootProvider<Network>,
|
||||
client: alloy::rpc::client::RpcClient,
|
||||
rt: tokio::runtime::Runtime,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct RpcClient<'a> {
|
||||
provider: &'a RootProvider<Network>,
|
||||
pub struct RpcClient<'a, T: Provider<Network>> {
|
||||
provider: T,
|
||||
handle: &'a tokio::runtime::Handle,
|
||||
}
|
||||
|
||||
@@ -75,80 +75,78 @@ impl RpcClientCore {
|
||||
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
|
||||
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
|
||||
|
||||
Ok(Self {
|
||||
provider: ProviderBuilder::<_, _, Network>::default().connect_client(client),
|
||||
rt,
|
||||
})
|
||||
Ok(Self { client, rt })
|
||||
}
|
||||
|
||||
pub fn get_client(&self) -> RpcClient {
|
||||
pub fn get_client(&self) -> RpcClient<'_, impl Provider<Network>> {
|
||||
RpcClient {
|
||||
provider: &self.provider,
|
||||
provider: ProviderBuilder::<_, _, Network>::default()
|
||||
.connect_client(self.client.clone()),
|
||||
handle: self.rt.handle(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ChunkInterpreter for RpcClient<'_> {
|
||||
impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
|
||||
fn try_fetch_block_witness(
|
||||
&self,
|
||||
block_hash: sbv_primitives::B256,
|
||||
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
|
||||
) -> Result<sbv_primitives::types::BlockWitness> {
|
||||
prev_witness: Option<&sbv_core::BlockWitness>,
|
||||
) -> Result<sbv_core::BlockWitness> {
|
||||
async fn fetch_witness_async(
|
||||
provider: &RootProvider<Network>,
|
||||
provider: impl Provider<Network>,
|
||||
block_hash: sbv_primitives::B256,
|
||||
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
|
||||
) -> Result<sbv_primitives::types::BlockWitness> {
|
||||
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
|
||||
prev_witness: Option<&sbv_core::BlockWitness>,
|
||||
) -> Result<sbv_core::BlockWitness> {
|
||||
use sbv_utils::rpc::ProviderExt;
|
||||
|
||||
let chain_id = provider.get_chain_id().await?;
|
||||
let (chain_id, block_num, prev_state_root) = if let Some(w) = prev_witness {
|
||||
(w.chain_id, w.header.number + 1, w.header.state_root)
|
||||
} else {
|
||||
let chain_id = provider.get_chain_id().await?;
|
||||
let block = provider
|
||||
.get_block_by_hash(block_hash)
|
||||
.full()
|
||||
.await?
|
||||
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
|
||||
|
||||
let block = provider
|
||||
.get_block_by_hash(block_hash)
|
||||
.full()
|
||||
.await?
|
||||
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
|
||||
let parent_block = provider
|
||||
.get_block_by_hash(block.header.parent_hash)
|
||||
.await?
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"parent block for block {} should exist",
|
||||
block.header.number
|
||||
)
|
||||
})?;
|
||||
|
||||
let number = block.header.number;
|
||||
let parent_hash = block.header.parent_hash;
|
||||
if number == 0 {
|
||||
eyre::bail!("no number in header or use block 0");
|
||||
}
|
||||
|
||||
let mut witness_builder = WitnessBuilder::new()
|
||||
.block(block)
|
||||
.chain_id(chain_id)
|
||||
.execution_witness(provider.debug_execution_witness(number.into()).await?);
|
||||
|
||||
let prev_state_root = match prev_witness {
|
||||
Some(witness) => {
|
||||
if witness.header.number != number - 1 {
|
||||
eyre::bail!(
|
||||
"the ref witness is not the previous block, expected {} get {}",
|
||||
number - 1,
|
||||
witness.header.number,
|
||||
);
|
||||
}
|
||||
witness.header.state_root
|
||||
}
|
||||
None => {
|
||||
let parent_block = provider
|
||||
.get_block_by_hash(parent_hash)
|
||||
.await?
|
||||
.expect("parent block should exist");
|
||||
|
||||
parent_block.header.state_root
|
||||
}
|
||||
(
|
||||
chain_id,
|
||||
block.header.number,
|
||||
parent_block.header.state_root,
|
||||
)
|
||||
};
|
||||
witness_builder = witness_builder.prev_state_root(prev_state_root);
|
||||
|
||||
Ok(witness_builder.build()?)
|
||||
let req = provider
|
||||
.dump_block_witness(block_num)
|
||||
.with_chain_id(chain_id)
|
||||
.with_prev_state_root(prev_state_root);
|
||||
|
||||
let witness = req
|
||||
.send()
|
||||
.await
|
||||
.transpose()
|
||||
.ok_or_else(|| eyre::eyre!("Block witness {block_num} not available"))??;
|
||||
|
||||
Ok(witness)
|
||||
}
|
||||
|
||||
tracing::debug!("fetch witness for {block_hash}");
|
||||
self.handle
|
||||
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
|
||||
self.handle.block_on(fetch_witness_async(
|
||||
&self.provider,
|
||||
block_hash,
|
||||
prev_witness,
|
||||
))
|
||||
}
|
||||
|
||||
fn try_fetch_storage_node(
|
||||
@@ -156,7 +154,7 @@ impl ChunkInterpreter for RpcClient<'_> {
|
||||
node_hash: sbv_primitives::B256,
|
||||
) -> Result<sbv_primitives::Bytes> {
|
||||
async fn fetch_storage_node_async(
|
||||
provider: &RootProvider<Network>,
|
||||
provider: impl Provider<Network>,
|
||||
node_hash: sbv_primitives::B256,
|
||||
) -> Result<sbv_primitives::Bytes> {
|
||||
let ret = provider
|
||||
@@ -168,7 +166,7 @@ impl ChunkInterpreter for RpcClient<'_> {
|
||||
|
||||
tracing::debug!("fetch storage node for {node_hash}");
|
||||
self.handle
|
||||
.block_on(fetch_storage_node_async(self.provider, node_hash))
|
||||
.block_on(fetch_storage_node_async(&self.provider, node_hash))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,10 +192,10 @@ mod tests {
|
||||
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
|
||||
let client = client_core.get_client();
|
||||
|
||||
// latest - 1 block in 2025.6.15
|
||||
// latest - 1 block in 2025.9.11
|
||||
let block_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
|
||||
b"0x093fb6bf2e556a659b35428ac447cd9f0635382fc40ffad417b5910824f9e932",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -207,10 +205,10 @@ mod tests {
|
||||
.try_fetch_block_witness(block_hash, None)
|
||||
.expect("should success");
|
||||
|
||||
// latest block in 2025.6.15
|
||||
// block selected in 2025.9.11
|
||||
let block_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
|
||||
b"0x77cc84dd7a4dedf6fe5fb9b443aeb5a4fb0623ad088a365d3232b7b23fc848e5",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -220,26 +218,4 @@ mod tests {
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
|
||||
fn test_try_fetch_storage_node() {
|
||||
let config = create_config_from_env();
|
||||
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
|
||||
let client = client_core.get_client();
|
||||
|
||||
// the root node (state root) of the block in unittest above
|
||||
let node_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// This is expected to fail since we're using a dummy hash, but it tests the code path
|
||||
let node = client
|
||||
.try_fetch_storage_node(node_hash)
|
||||
.expect("should success");
|
||||
println!("{}", serde_json::to_string_pretty(&node).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
1
crates/libzkp/.gitignore
vendored
1
crates/libzkp/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
testdata/*.json
|
||||
@@ -6,10 +6,11 @@ edition.workspace = true
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-verifier-euclid.workspace = true
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
|
||||
alloy-primitives.workspace = true #depress the effect of "native-keccak"
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
base64.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive.workspace = true
|
||||
@@ -18,6 +19,7 @@ tracing.workspace = true
|
||||
eyre.workspace = true
|
||||
|
||||
git-version = "0.3.5"
|
||||
bincode = { version = "2", features = ["serde"] }
|
||||
serde_stacker = "0.1"
|
||||
regex = "1.11"
|
||||
c-kzg = { version = "2.0", features = ["serde"] }
|
||||
|
||||
@@ -5,12 +5,33 @@ pub use verifier::{TaskType, VerifierConfig};
|
||||
mod utils;
|
||||
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::util::vec_as_base64;
|
||||
use scroll_zkvm_types::utils::vec_as_base64;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::value::RawValue;
|
||||
use std::path::Path;
|
||||
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
|
||||
|
||||
/// global features: use legacy encoding for witness
|
||||
static mut LEGACY_WITNESS_ENCODING: bool = false;
|
||||
pub(crate) fn witness_use_legacy_mode() -> bool {
|
||||
unsafe { LEGACY_WITNESS_ENCODING }
|
||||
}
|
||||
|
||||
pub fn set_dynamic_feature(feats: &str) {
|
||||
for feat_s in feats.split(':') {
|
||||
match feat_s.trim().to_lowercase().as_str() {
|
||||
"legacy_witness" => {
|
||||
tracing::info!("set witness encoding for legacy mode");
|
||||
unsafe {
|
||||
// the function is only called while initialize step
|
||||
LEGACY_WITNESS_ENCODING = true;
|
||||
}
|
||||
}
|
||||
s => tracing::warn!("unrecognized dynamic feature: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn the coordinator's chunk task into a json string for formal chunk proving
|
||||
/// task (with full witnesses)
|
||||
pub fn checkout_chunk_task(
|
||||
@@ -25,6 +46,47 @@ pub fn checkout_chunk_task(
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Convert the universal task json into compatible form for old prover
|
||||
pub fn univ_task_compatibility_fix(task_json: &str) -> eyre::Result<String> {
|
||||
use scroll_zkvm_types::proof::VmInternalStarkProof;
|
||||
|
||||
let task: tasks::ProvingTask = serde_json::from_str(task_json)?;
|
||||
let aggregated_proofs: Vec<VmInternalStarkProof> = task
|
||||
.aggregated_proofs
|
||||
.into_iter()
|
||||
.map(|proof| VmInternalStarkProof {
|
||||
proofs: proof.proofs,
|
||||
public_values: proof.public_values,
|
||||
})
|
||||
.collect();
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CompatibleProvingTask {
|
||||
/// seralized witness which should be written into stdin first
|
||||
pub serialized_witness: Vec<Vec<u8>>,
|
||||
/// aggregated proof carried by babybear fields, should be written into stdin
|
||||
/// followed `serialized_witness`
|
||||
pub aggregated_proofs: Vec<VmInternalStarkProof>,
|
||||
/// Fork name specify
|
||||
pub fork_name: String,
|
||||
/// The vk of app which is expcted to prove this task
|
||||
pub vk: Vec<u8>,
|
||||
/// An identifier assigned by coordinator, it should be kept identify for the
|
||||
/// same task (for example, using chunk, batch and bundle hashes)
|
||||
pub identifier: String,
|
||||
}
|
||||
|
||||
let compatible_u_task = CompatibleProvingTask {
|
||||
serialized_witness: task.serialized_witness,
|
||||
aggregated_proofs,
|
||||
fork_name: task.fork_name,
|
||||
vk: task.vk,
|
||||
identifier: task.identifier,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&compatible_u_task)?)
|
||||
}
|
||||
|
||||
/// Generate required staff for proving tasks
|
||||
/// return (pi_hash, metadata, task)
|
||||
pub fn gen_universal_task(
|
||||
@@ -32,7 +94,6 @@ pub fn gen_universal_task(
|
||||
task_json: &str,
|
||||
fork_name_str: &str,
|
||||
expected_vk: &[u8],
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
) -> eyre::Result<(B256, String, String)> {
|
||||
use proofs::*;
|
||||
use tasks::*;
|
||||
@@ -56,10 +117,9 @@ pub fn gen_universal_task(
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
|
||||
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
|
||||
})
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_chunk_task(task, fork_name_str.into()))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Batch as i32 => {
|
||||
|
||||
@@ -7,10 +7,10 @@ use scroll_zkvm_types::{
|
||||
batch::BatchInfo,
|
||||
bundle::BundleInfo,
|
||||
chunk::ChunkInfo,
|
||||
proof::{EvmProof, OpenVmEvmProof, ProofEnum, RootProof},
|
||||
proof::{EvmProof, OpenVmEvmProof, ProofEnum, StarkProof},
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
types_agg::{AggregationInput, ProgramCommitment},
|
||||
util::vec_as_base64,
|
||||
types_agg::AggregationInput,
|
||||
utils::{serialize_vk, vec_as_base64},
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
@@ -40,7 +40,7 @@ pub struct WrappedProof<Metadata> {
|
||||
}
|
||||
|
||||
pub trait AsRootProof {
|
||||
fn as_root_proof(&self) -> &RootProof;
|
||||
fn as_root_proof(&self) -> &StarkProof;
|
||||
}
|
||||
|
||||
pub trait AsEvmProof {
|
||||
@@ -61,17 +61,17 @@ pub type BatchProof = WrappedProof<BatchProofMetadata>;
|
||||
pub type BundleProof = WrappedProof<BundleProofMetadata>;
|
||||
|
||||
impl AsRootProof for ChunkProof {
|
||||
fn as_root_proof(&self) -> &RootProof {
|
||||
fn as_root_proof(&self) -> &StarkProof {
|
||||
self.proof
|
||||
.as_root_proof()
|
||||
.as_stark_proof()
|
||||
.expect("batch proof use root proof")
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRootProof for BatchProof {
|
||||
fn as_root_proof(&self) -> &RootProof {
|
||||
fn as_root_proof(&self) -> &StarkProof {
|
||||
self.proof
|
||||
.as_root_proof()
|
||||
.as_stark_proof()
|
||||
.expect("batch proof use root proof")
|
||||
}
|
||||
}
|
||||
@@ -122,6 +122,8 @@ pub trait PersistableProof: Sized {
|
||||
pub struct ChunkProofMetadata {
|
||||
/// The chunk information describing the list of blocks contained within the chunk.
|
||||
pub chunk_info: ChunkInfo,
|
||||
/// Additional data for stat
|
||||
pub chunk_total_gas: u64,
|
||||
}
|
||||
|
||||
impl ProofMetadata for ChunkProofMetadata {
|
||||
@@ -170,7 +172,7 @@ impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
|
||||
fn from(value: &WrappedProof<Metadata>) -> Self {
|
||||
Self {
|
||||
public_values: value.proof.public_values(),
|
||||
commitment: ProgramCommitment::deserialize(&value.vk),
|
||||
commitment: serialize_vk::deserialize(&value.vk),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,11 @@ use crate::{
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
|
||||
|
||||
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
|
||||
let config = bincode::config::standard();
|
||||
Ok(bincode::serde::encode_to_vec(task, config)?)
|
||||
}
|
||||
|
||||
fn check_aggregation_proofs<Metadata>(
|
||||
proofs: &[proofs::WrappedProof<Metadata>],
|
||||
fork_name: ForkName,
|
||||
@@ -37,19 +42,19 @@ where
|
||||
|
||||
/// Generate required staff for chunk proving
|
||||
pub fn gen_universal_chunk_task(
|
||||
mut task: ChunkProvingTask,
|
||||
task: ChunkProvingTask,
|
||||
fork_name: ForkName,
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
|
||||
if let Some(interpreter) = interpreter {
|
||||
task.prepare_task_via_interpret(interpreter)?;
|
||||
}
|
||||
let chunk_total_gas = task.stats().total_gas_used;
|
||||
let chunk_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
ChunkProofMetadata { chunk_info },
|
||||
ChunkProofMetadata {
|
||||
chunk_info,
|
||||
chunk_total_gas,
|
||||
},
|
||||
proving_task,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ use eyre::Result;
|
||||
use sbv_primitives::{B256, U256};
|
||||
use scroll_zkvm_types::{
|
||||
batch::{
|
||||
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
|
||||
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
|
||||
ToArchievedWitness, N_BLOB_BYTES,
|
||||
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8,
|
||||
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, LegacyBatchWitness,
|
||||
ReferenceHeader, N_BLOB_BYTES,
|
||||
},
|
||||
public_inputs::ForkName,
|
||||
task::ProvingTask,
|
||||
@@ -84,6 +84,12 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
|
||||
|
||||
fn try_from(value: BatchProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
let legacy_witness = LegacyBatchWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.batch_header.batch_hash().to_string(),
|
||||
@@ -91,9 +97,9 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
|
||||
aggregated_proofs: value
|
||||
.chunk_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.into_vec()],
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
@@ -161,10 +167,10 @@ impl BatchProvingTask {
|
||||
assert_eq!(p, kzg_proof);
|
||||
}
|
||||
|
||||
let point_eval_witness = PointEvalWitness {
|
||||
kzg_commitment: kzg_commitment.into_inner(),
|
||||
kzg_proof: kzg_proof.into_inner(),
|
||||
};
|
||||
let point_eval_witness = Some(build_point_eval_witness(
|
||||
kzg_commitment.into_inner(),
|
||||
kzg_proof.into_inner(),
|
||||
));
|
||||
|
||||
let reference_header = match fork_name {
|
||||
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
|
||||
@@ -192,12 +198,7 @@ impl BatchProvingTask {
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let witness = self.build_guest_input();
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve batch witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved batch witness fail: {e}"))?;
|
||||
let metadata: BatchInfo = archieved_witness.into();
|
||||
let metadata = BatchInfo::from(&witness);
|
||||
|
||||
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ pub mod base64 {
|
||||
pub mod point_eval {
|
||||
use c_kzg;
|
||||
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
|
||||
use scroll_zkvm_types::util::sha256_rv32;
|
||||
use scroll_zkvm_types::utils::sha256_rv32;
|
||||
|
||||
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.
|
||||
///
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use crate::proofs::BatchProof;
|
||||
use eyre::Result;
|
||||
use scroll_zkvm_types::{
|
||||
bundle::{BundleInfo, BundleWitness, ToArchievedWitness},
|
||||
bundle::{BundleInfo, BundleWitness},
|
||||
public_inputs::ForkName,
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
|
||||
/// Message indicating a sanity check failure.
|
||||
@@ -56,12 +57,7 @@ impl BundleProvingTask {
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let witness = self.build_guest_input();
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve bundle witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved bundle witness fail: {e}"))?;
|
||||
let metadata: BundleInfo = archieved_witness.into();
|
||||
let metadata = BundleInfo::from(&witness);
|
||||
|
||||
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
|
||||
|
||||
@@ -74,6 +70,11 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
|
||||
|
||||
fn try_from(value: BundleProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
to_rkyv_bytes::<RancorError>(&witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
@@ -81,9 +82,9 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
|
||||
aggregated_proofs: value
|
||||
.batch_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use super::chunk_interpreter::*;
|
||||
use eyre::Result;
|
||||
use sbv_primitives::{types::BlockWitness, B256};
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, ToArchievedWitness},
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
|
||||
/// The type aligned with coordinator's defination
|
||||
@@ -66,12 +68,18 @@ impl TryFrom<ChunkProvingTask> for ProvingTask {
|
||||
|
||||
fn try_from(value: ChunkProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
let legacy_witness = LegacyChunkWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: Vec::new(),
|
||||
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
@@ -83,7 +91,7 @@ impl ChunkProvingTask {
|
||||
let num_txs = self
|
||||
.block_witnesses
|
||||
.iter()
|
||||
.map(|b| b.transaction.len())
|
||||
.map(|b| b.transactions.len())
|
||||
.sum::<usize>();
|
||||
let total_gas_used = self
|
||||
.block_witnesses
|
||||
@@ -131,18 +139,14 @@ impl ChunkProvingTask {
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
|
||||
let witness = self.build_guest_input();
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
|
||||
|
||||
let ret = ChunkInfo::try_from(archieved_witness).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
let ret = ChunkInfo::try_from(witness).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// this method check the validate of current task (there may be missing storage node)
|
||||
/// and try fixing it until everything is ok
|
||||
#[deprecated]
|
||||
pub fn prepare_task_via_interpret(
|
||||
&mut self,
|
||||
interpreter: impl ChunkInterpreter,
|
||||
@@ -166,13 +170,8 @@ impl ChunkProvingTask {
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
let witness = self.build_guest_input();
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
|
||||
|
||||
match execute(archieved_witness) {
|
||||
match execute(witness) {
|
||||
Ok(_) => return Ok(()),
|
||||
Err(e) => {
|
||||
if let Some(caps) = err_parse_re.captures(&e) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use eyre::Result;
|
||||
use sbv_primitives::{types::BlockWitness, Bytes, B256};
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{Bytes, B256};
|
||||
|
||||
/// An interpreter which is cirtical in translating chunk data
|
||||
/// since we need to grep block witness and storage node data
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclidv2;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
mod universal;
|
||||
use eyre::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
@@ -9,6 +8,7 @@ use std::{
|
||||
path::Path,
|
||||
sync::{Arc, Mutex, OnceLock},
|
||||
};
|
||||
use universal::Verifier;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
@@ -61,7 +61,7 @@ pub fn init(config: VerifierConfig) {
|
||||
for cfg in &config.circuits {
|
||||
let canonical_fork_name = cfg.fork_name.to_lowercase();
|
||||
|
||||
let verifier = EuclidV2Verifier::new(&cfg.assets_path, canonical_fork_name.as_str().into());
|
||||
let verifier = Verifier::new(&cfg.assets_path, canonical_fork_name.as_str().into());
|
||||
let ret = verifiers.insert(canonical_fork_name, Arc::new(Mutex::new(verifier)));
|
||||
assert!(
|
||||
ret.is_none(),
|
||||
|
||||
@@ -7,59 +7,51 @@ use crate::{
|
||||
utils::panic_catch,
|
||||
};
|
||||
use scroll_zkvm_types::public_inputs::ForkName;
|
||||
use scroll_zkvm_verifier_euclid::verifier::UniversalVerifier;
|
||||
use scroll_zkvm_verifier::verifier::UniversalVerifier;
|
||||
use std::path::Path;
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
pub struct Verifier {
|
||||
verifier: UniversalVerifier,
|
||||
fork: ForkName,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
impl Verifier {
|
||||
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
let verifier_bin = Path::new(assets_dir);
|
||||
|
||||
Self {
|
||||
verifier: UniversalVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
verifier: UniversalVerifier::setup(verifier_bin).expect("Setting up chunk verifier"),
|
||||
fork,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
impl ProofVerifier for Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: &[u8]) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
|
||||
if !proof.pi_hash_check(self.fork) {
|
||||
return false;
|
||||
}
|
||||
assert!(proof.pi_hash_check(self.fork));
|
||||
self.verifier
|
||||
.verify_proof(proof.as_root_proof(), &proof.vk)
|
||||
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
|
||||
.unwrap()
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
|
||||
if !proof.pi_hash_check(self.fork) {
|
||||
return false;
|
||||
}
|
||||
assert!(proof.pi_hash_check(self.fork));
|
||||
self.verifier
|
||||
.verify_proof(proof.as_root_proof(), &proof.vk)
|
||||
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
|
||||
.unwrap()
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
|
||||
if !proof.pi_hash_check(self.fork) {
|
||||
return false;
|
||||
}
|
||||
assert!(proof.pi_hash_check(self.fork));
|
||||
let vk = proof.vk.clone();
|
||||
let evm_proof = proof.into_evm_proof();
|
||||
self.verifier.verify_proof_evm(&evm_proof, &vk).unwrap()
|
||||
self.verifier.verify_evm_proof(&evm_proof, &vk).unwrap()
|
||||
}
|
||||
})
|
||||
.map(|_| true)
|
||||
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
use libzkp::{gen_universal_task, tasks::chunk_interpreter::ChunkInterpreter, TaskType};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use std::{fs, path::Path};
|
||||
|
||||
// Global constant for testdata directory
|
||||
const TESTDATA_DIR: &str = "./testdata";
|
||||
|
||||
// Mock interpreter for testing chunk tasks
|
||||
#[derive(Clone)]
|
||||
struct MockChunkInterpreter;
|
||||
|
||||
impl ChunkInterpreter for MockChunkInterpreter {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_gen_universal_task(task_file: &str, task_type: TaskType) -> eyre::Result<ProvingTask> {
|
||||
// Load chunk task JSON from testdata file
|
||||
let testdata_path = Path::new(TESTDATA_DIR).join(task_file);
|
||||
let task_json = fs::read_to_string(&testdata_path)?;
|
||||
|
||||
// Parse task_json as raw JSON object and extract fork_name field
|
||||
let raw_json: serde_json::Value = serde_json::from_str(&task_json)?;
|
||||
let fork_name = raw_json["fork_name"]
|
||||
.as_str()
|
||||
.ok_or_else(|| eyre::eyre!("fork_name field not found or not a string"))?
|
||||
.to_lowercase();
|
||||
|
||||
let mocked_vk = vec![0u8; 32]; // Mock verification key
|
||||
let interpreter = Some(MockChunkInterpreter);
|
||||
|
||||
let (pi_hash, metadata, task) = gen_universal_task(
|
||||
task_type as i32,
|
||||
&task_json,
|
||||
&fork_name,
|
||||
&mocked_vk,
|
||||
interpreter,
|
||||
)?;
|
||||
|
||||
assert!(!pi_hash.is_zero(), "PI hash should not be zero");
|
||||
assert!(!metadata.is_empty(), "Metadata should not be empty");
|
||||
assert!(!task.is_empty(), "Task should not be empty");
|
||||
|
||||
// Dump the task content to testdata directory
|
||||
let dump_path = Path::new(TESTDATA_DIR).join("dump_univ_task.json");
|
||||
std::fs::write(&dump_path, &task).ok(); // Dump task content
|
||||
|
||||
Ok(serde_json::from_str(&task)?)
|
||||
}
|
||||
|
||||
#[ignore = "need testing stuff"]
|
||||
#[test]
|
||||
fn test_gen_universal_task_chunk() {
|
||||
let _ = test_gen_universal_task("chunk_proving_task.json", TaskType::Chunk).unwrap();
|
||||
}
|
||||
|
||||
#[ignore = "need testing stuff"]
|
||||
#[test]
|
||||
fn test_gen_universal_task_batch() {
|
||||
let _ = test_gen_universal_task("batch_proving_task.json", TaskType::Batch).unwrap();
|
||||
}
|
||||
|
||||
#[ignore = "need testing stuff"]
|
||||
#[test]
|
||||
fn test_gen_universal_task_bundle() {
|
||||
let _ = test_gen_universal_task("bundle_proving_task.json", TaskType::Bundle).unwrap();
|
||||
}
|
||||
}
|
||||
@@ -153,17 +153,12 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
expected_vk: *const u8,
|
||||
expected_vk_len: usize,
|
||||
) -> HandlingResult {
|
||||
let mut interpreter = None;
|
||||
let task_json = if task_type == TaskType::Chunk as i32 {
|
||||
let pre_task_str = c_char_to_str(task);
|
||||
let cli = l2geth::get_client();
|
||||
match libzkp::checkout_chunk_task(pre_task_str, cli) {
|
||||
Ok(str) => {
|
||||
interpreter.replace(cli);
|
||||
str
|
||||
}
|
||||
Ok(str) => str,
|
||||
Err(e) => {
|
||||
println!("gen_universal_task failed at pre interpret step, error: {e}");
|
||||
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
|
||||
return failed_handling_result();
|
||||
}
|
||||
@@ -178,13 +173,8 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
&[]
|
||||
};
|
||||
|
||||
let ret = libzkp::gen_universal_task(
|
||||
task_type,
|
||||
&task_json,
|
||||
c_char_to_str(fork_name),
|
||||
expected_vk,
|
||||
interpreter,
|
||||
);
|
||||
let ret =
|
||||
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), expected_vk);
|
||||
|
||||
if let Ok((pi_hash, meta_json, task_json)) = ret {
|
||||
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
|
||||
@@ -248,6 +238,19 @@ pub unsafe extern "C" fn gen_wrapped_proof(
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -> *mut c_char {
|
||||
let task_json_str = c_char_to_str(task_json);
|
||||
match libzkp::univ_task_compatibility_fix(task_json_str) {
|
||||
Ok(result) => CString::new(result).unwrap().into_raw(),
|
||||
Err(e) => {
|
||||
tracing::error!("univ_task_compability_fix failed, error: {:#}", e);
|
||||
std::ptr::null_mut()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
|
||||
@@ -255,3 +258,10 @@ pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
|
||||
let _ = CString::from_raw(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn set_dynamic_feature(feats: *const c_char) {
|
||||
let feats_str = c_char_to_str(feats);
|
||||
libzkp::set_dynamic_feature(feats_str);
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-prover-euclid.workspace = true
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "refactor/scroll" }
|
||||
scroll-zkvm-prover.workspace = true
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
once_cell.workspace =true
|
||||
@@ -17,8 +17,9 @@ tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
|
||||
eyre.workspace = true
|
||||
|
||||
futures = "0.3.30"
|
||||
futures-util = "0.3"
|
||||
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest = { version = "0.12.4", features = ["gzip", "stream"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
hex = "0.4.3"
|
||||
@@ -30,5 +31,9 @@ sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
ctor = "0.2.8"
|
||||
url = "2.5.4"
|
||||
url = { version = "2.5.4", features = ["serde",] }
|
||||
serde_bytes = "0.11.15"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
cuda = ["scroll-zkvm-prover/cuda"]
|
||||
7
crates/prover-bin/assets_url_preset.json
Normal file
7
crates/prover-bin/assets_url_preset.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"feynman": {
|
||||
"b68fdc3f28a5ce006280980df70cd3447e56913e5bca6054603ba85f0794c23a6618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/chunk/",
|
||||
"9a3f66370f11e3303f1a1248921025104e83253efea43a70d221cf4e15fc145bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/batch/",
|
||||
"1f8627277e1c1f6e1cc70c03e6fde06929e5ea27ca5b1d56e23b235dfeda282e22c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/bundle/"
|
||||
}
|
||||
}
|
||||
@@ -34,11 +34,6 @@ struct Args {
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Commands {
|
||||
/// Dump vk of this prover
|
||||
Dump {
|
||||
/// path to save the verifier's asset
|
||||
asset_path: String,
|
||||
},
|
||||
Handle {
|
||||
/// path to save the verifier's asset
|
||||
task_path: String,
|
||||
@@ -64,16 +59,10 @@ async fn main() -> eyre::Result<()> {
|
||||
}
|
||||
|
||||
let cfg = LocalProverConfig::from_file(args.config_file)?;
|
||||
let default_fork_name = cfg.circuits.keys().next().unwrap().clone();
|
||||
let sdk_config = cfg.sdk_config.clone();
|
||||
let local_prover = LocalProver::new(cfg.clone());
|
||||
|
||||
match args.command {
|
||||
Some(Commands::Dump { asset_path }) => {
|
||||
let fork_name = args.fork_name.unwrap_or(default_fork_name);
|
||||
println!("dump assets for {fork_name} into {asset_path}");
|
||||
local_prover.dump_verifier_assets(&fork_name, asset_path.as_ref())?;
|
||||
}
|
||||
Some(Commands::Handle { task_path }) => {
|
||||
let file = File::open(Path::new(&task_path))?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::zk_circuits_handler::{euclidV2::EuclidV2Handler, CircuitsHandler};
|
||||
use crate::zk_circuits_handler::{universal::UniversalHandler, CircuitsHandler};
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::{
|
||||
@@ -16,12 +16,111 @@ use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::File,
|
||||
path::Path,
|
||||
sync::{Arc, OnceLock},
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, LazyLock},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct AssetsLocationData {
|
||||
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
|
||||
pub base_url: url::Url,
|
||||
#[serde(default)]
|
||||
/// a altered url for specififed vk
|
||||
pub asset_detours: HashMap<String, url::Url>,
|
||||
}
|
||||
|
||||
impl AssetsLocationData {
|
||||
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
|
||||
Ok(self.base_url.join(
|
||||
match proof_type {
|
||||
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
|
||||
ProofType::Batch => format!("batch/{vk_as_path}/"),
|
||||
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
|
||||
t => eyre::bail!("unrecognized proof type: {}", t as u8),
|
||||
}
|
||||
.as_str(),
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if !self.base_url.path().ends_with('/') {
|
||||
eyre::bail!(
|
||||
"base_url must have a trailing slash, got: {}",
|
||||
self.base_url
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_asset(
|
||||
&self,
|
||||
vk: &str,
|
||||
url_base: &url::Url,
|
||||
base_path: impl AsRef<Path>,
|
||||
) -> Result<PathBuf> {
|
||||
let download_files = ["app.vmexe", "openvm.toml"];
|
||||
|
||||
// Step 1: Create a local path for storage
|
||||
let storage_path = base_path.as_ref().join(vk);
|
||||
std::fs::create_dir_all(&storage_path)?;
|
||||
|
||||
// Step 2 & 3: Download each file if needed
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
for filename in download_files.iter() {
|
||||
let local_file_path = storage_path.join(filename);
|
||||
let download_url = url_base.join(filename)?;
|
||||
|
||||
// Check if file already exists
|
||||
if local_file_path.exists() {
|
||||
// Get file metadata to check size
|
||||
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
|
||||
// Make a HEAD request to get remote file size
|
||||
|
||||
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
|
||||
if let Some(content_length) = head_resp.headers().get("content-length") {
|
||||
if let Ok(remote_size) =
|
||||
content_length.to_str().unwrap_or("0").parse::<u64>()
|
||||
{
|
||||
// If sizes match, skip download
|
||||
if metadata.len() == remote_size {
|
||||
println!("File {} already exists with matching size, skipping download", filename);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Downloading {} from {}", filename, download_url);
|
||||
|
||||
let response = client.get(download_url).send().await?;
|
||||
if !response.status().is_success() {
|
||||
eyre::bail!(
|
||||
"Failed to download {}: HTTP status {}",
|
||||
filename,
|
||||
response.status()
|
||||
);
|
||||
}
|
||||
|
||||
// Stream the content directly to file instead of loading into memory
|
||||
let mut file = std::fs::File::create(&local_file_path)?;
|
||||
let mut stream = response.bytes_stream();
|
||||
|
||||
use futures_util::StreamExt;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
std::io::Write::write_all(&mut file, &chunk?)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Return the storage path
|
||||
Ok(storage_path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
@@ -45,7 +144,11 @@ impl LocalProverConfig {
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
/// The path to save assets for a specified hard fork phase
|
||||
pub workspace_path: String,
|
||||
#[serde(flatten)]
|
||||
/// The location data for dynamic loading
|
||||
pub location_data: AssetsLocationData,
|
||||
/// cached vk value to save some initial cost, for debugging only
|
||||
#[serde(default)]
|
||||
pub vks: HashMap<ProofType, String>,
|
||||
@@ -56,7 +159,7 @@ pub struct LocalProver {
|
||||
next_task_id: u64,
|
||||
current_task: Option<JoinHandle<Result<String>>>,
|
||||
|
||||
handlers: HashMap<String, OnceLock<Arc<dyn CircuitsHandler>>>,
|
||||
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -64,24 +167,15 @@ impl ProvingService for LocalProver {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
|
||||
let mut vks = vec![];
|
||||
for (hard_fork_name, cfg) in self.config.circuits.iter() {
|
||||
for proof_type in &req.proof_types {
|
||||
if let Some(vk) = cfg.vks.get(proof_type) {
|
||||
vks.push(vk.clone())
|
||||
} else {
|
||||
let handler = self.get_or_init_handler(hard_fork_name);
|
||||
vks.push(handler.get_vk(*proof_type).await);
|
||||
}
|
||||
}
|
||||
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
|
||||
// get vk has been deprecated in new prover with dynamic asset loading scheme
|
||||
GetVkResponse {
|
||||
vks: vec![],
|
||||
error: None,
|
||||
}
|
||||
|
||||
GetVkResponse { vks, error: None }
|
||||
}
|
||||
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
|
||||
let handler = self.get_or_init_handler(&req.hard_fork_name);
|
||||
match self.do_prove(req, handler).await {
|
||||
match self.do_prove(req).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
@@ -132,34 +226,91 @@ impl ProvingService for LocalProver {
|
||||
}
|
||||
}
|
||||
|
||||
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
|
||||
LazyLock::new(|| {
|
||||
const ASSETS_JSON: &str = include_str!("../assets_url_preset.json");
|
||||
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
|
||||
});
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(config: LocalProverConfig) -> Self {
|
||||
let handlers = config
|
||||
.circuits
|
||||
.keys()
|
||||
.map(|k| (k.clone(), OnceLock::new()))
|
||||
.collect();
|
||||
pub fn new(mut config: LocalProverConfig) -> Self {
|
||||
for (fork_name, circuit_config) in config.circuits.iter_mut() {
|
||||
// validate each base url
|
||||
circuit_config.location_data.validate().unwrap();
|
||||
let mut template_url_mapping = GLOBAL_ASSET_URLS
|
||||
.get(&fork_name.to_lowercase())
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
// apply default settings in template
|
||||
for (key, url) in circuit_config.location_data.asset_detours.drain() {
|
||||
template_url_mapping.insert(key, url);
|
||||
}
|
||||
|
||||
circuit_config.location_data.asset_detours = template_url_mapping;
|
||||
|
||||
// validate each detours url
|
||||
for url in circuit_config.location_data.asset_detours.values() {
|
||||
assert!(
|
||||
url.path().ends_with('/'),
|
||||
"url {} must be end with /",
|
||||
url.as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
config,
|
||||
next_task_id: 0,
|
||||
current_task: None,
|
||||
handlers,
|
||||
handlers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(
|
||||
&mut self,
|
||||
req: ProveRequest,
|
||||
handler: Arc<dyn CircuitsHandler>,
|
||||
) -> Result<ProveResponse> {
|
||||
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
|
||||
self.next_task_id += 1;
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let req_clone = req.clone();
|
||||
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
|
||||
let vk = hex::encode(&prover_task.vk);
|
||||
let handler = if let Some(handler) = self.handlers.get(&vk) {
|
||||
handler.clone()
|
||||
} else {
|
||||
let base_config = self
|
||||
.config
|
||||
.circuits
|
||||
.get(&req.hard_fork_name)
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"coordinator sent unexpected forkname {}",
|
||||
req.hard_fork_name
|
||||
)
|
||||
})?;
|
||||
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
|
||||
url.clone()
|
||||
} else {
|
||||
base_config
|
||||
.location_data
|
||||
.gen_asset_url(&vk, req.proof_type)?
|
||||
};
|
||||
let asset_path = base_config
|
||||
.location_data
|
||||
.get_asset(&vk, &url_base, &base_config.workspace_path)
|
||||
.await?;
|
||||
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
|
||||
&asset_path,
|
||||
req.proof_type,
|
||||
)?));
|
||||
self.handlers.insert(vk, circuits_handler.clone());
|
||||
circuits_handler
|
||||
};
|
||||
|
||||
let handle = Handle::current();
|
||||
let task_handle =
|
||||
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
|
||||
let is_evm = req.proof_type == ProofType::Bundle;
|
||||
let task_handle = tokio::task::spawn_blocking(move || {
|
||||
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
|
||||
});
|
||||
self.current_task = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
@@ -173,77 +324,4 @@ impl LocalProver {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
fn get_or_init_handler(&self, hard_fork_name: &str) -> Arc<dyn CircuitsHandler> {
|
||||
let lk = self
|
||||
.handlers
|
||||
.get(hard_fork_name)
|
||||
.expect("coordinator should never sent unexpected forkname");
|
||||
lk.get_or_init(|| self.new_handler(hard_fork_name)).clone()
|
||||
}
|
||||
|
||||
pub fn new_handler(&self, hard_fork_name: &str) -> Arc<dyn CircuitsHandler> {
|
||||
// if we got assigned a task for an unknown hard fork, there is something wrong in the
|
||||
// coordinator
|
||||
let config = self.config.circuits.get(hard_fork_name).unwrap();
|
||||
|
||||
match hard_fork_name {
|
||||
// The new EuclidV2Handler is a universal handler
|
||||
// We can add other handler implements if needed
|
||||
"some future forkname" => unreachable!(),
|
||||
_ => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(config))))
|
||||
as Arc<dyn CircuitsHandler>,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dump_verifier_assets(&self, hard_fork_name: &str, out_path: &Path) -> Result<()> {
|
||||
let config = self
|
||||
.config
|
||||
.circuits
|
||||
.get(hard_fork_name)
|
||||
.ok_or_else(|| eyre::eyre!("no corresponding config for fork {hard_fork_name}"))?;
|
||||
|
||||
if !config.vks.is_empty() {
|
||||
eyre::bail!("clean vks cache first or we will have wrong dumped vk");
|
||||
}
|
||||
|
||||
let workspace_path = &config.workspace_path;
|
||||
let universal_prover = EuclidV2Handler::new(config);
|
||||
let _ = universal_prover
|
||||
.get_prover()
|
||||
.dump_universal_verifier(Some(out_path))?;
|
||||
|
||||
#[derive(Debug, serde::Serialize)]
|
||||
struct VKDump {
|
||||
pub chunk_vk: String,
|
||||
pub batch_vk: String,
|
||||
pub bundle_vk: String,
|
||||
}
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: universal_prover.get_vk_and_cache(ProofType::Chunk),
|
||||
batch_vk: universal_prover.get_vk_and_cache(ProofType::Batch),
|
||||
bundle_vk: universal_prover.get_vk_and_cache(ProofType::Bundle),
|
||||
};
|
||||
|
||||
let f = File::create(out_path.join("openVmVk.json"))?;
|
||||
serde_json::to_writer(f, &dump)?;
|
||||
|
||||
// Copy verifier.bin from workspace bundle directory to output path
|
||||
let bundle_verifier_path = Path::new(workspace_path)
|
||||
.join("bundle")
|
||||
.join("verifier.bin");
|
||||
if bundle_verifier_path.exists() {
|
||||
let dest_path = out_path.join("verifier.bin");
|
||||
std::fs::copy(&bundle_verifier_path, &dest_path)
|
||||
.map_err(|e| eyre::eyre!("Failed to copy verifier.bin: {}", e))?;
|
||||
} else {
|
||||
eprintln!(
|
||||
"Warning: verifier.bin not found at {:?}",
|
||||
bundle_verifier_path
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
|
||||
@@ -1,65 +1,13 @@
|
||||
//pub mod euclid;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub mod euclidV2;
|
||||
pub mod universal;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
|
||||
use scroll_zkvm_prover_euclid::ProverConfig;
|
||||
use std::path::Path;
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
|
||||
#[async_trait]
|
||||
pub trait CircuitsHandler: Sync + Send {
|
||||
async fn get_vk(&self, task_type: ProofType) -> String;
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum Phase {
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl Phase {
|
||||
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("chunk/app.vmexe");
|
||||
let path_app_config = workspace_path.join("chunk/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("batch/app.vmexe");
|
||||
let path_app_config = workspace_path.join("batch/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_config = workspace_path.join("bundle/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
segment_len,
|
||||
path_app_exe: workspace_path.join("bundle/app.vmexe"),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
async fn get_proof_data(&self, u_task: &ProvingTask, need_snark: bool) -> Result<String>;
|
||||
}
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use super::CircuitsHandler;
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_trait::async_trait;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
|
||||
use scroll_zkvm_prover_euclid::{
|
||||
task::{batch::BatchProvingTask, bundle::BundleProvingTask, chunk::ChunkProvingTask},
|
||||
BatchProver, BundleProverEuclidV1, ChunkProver, ProverConfig,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
pub struct EuclidHandler {
|
||||
chunk_prover: ChunkProver,
|
||||
batch_prover: BatchProver,
|
||||
bundle_prover: BundleProverEuclidV1,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum Phase {
|
||||
EuclidV1,
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl Phase {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
Phase::EuclidV1 => "euclidv1",
|
||||
Phase::EuclidV2 => "euclidv2",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("chunk/app.vmexe");
|
||||
let path_app_config = workspace_path.join("chunk/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("batch/app.vmexe");
|
||||
let path_app_config = workspace_path.join("batch/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_config = workspace_path.join("bundle/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
match self {
|
||||
Phase::EuclidV1 => ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
segment_len,
|
||||
path_app_exe: workspace_path.join("bundle/app_euclidv1.vmexe"),
|
||||
..Default::default()
|
||||
},
|
||||
Phase::EuclidV2 => ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
segment_len,
|
||||
path_app_exe: workspace_path.join("bundle/app.vmexe"),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for EuclidHandler {}
|
||||
|
||||
impl EuclidHandler {
|
||||
pub fn new(workspace_path: &str) -> Self {
|
||||
let p = Phase::EuclidV1;
|
||||
let workspace_path = Path::new(workspace_path);
|
||||
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
|
||||
.expect("Failed to setup chunk prover");
|
||||
|
||||
let batch_prover = BatchProver::setup(p.phase_spec_batch(workspace_path))
|
||||
.expect("Failed to setup batch prover");
|
||||
|
||||
let bundle_prover = BundleProverEuclidV1::setup(p.phase_spec_bundle(workspace_path))
|
||||
.expect("Failed to setup bundle prover");
|
||||
|
||||
Self {
|
||||
chunk_prover,
|
||||
batch_prover,
|
||||
bundle_prover,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Arc<Mutex<EuclidHandler>> {
|
||||
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
Some(match task_type {
|
||||
ProofType::Chunk => self.try_lock().unwrap().chunk_prover.get_app_vk(),
|
||||
ProofType::Batch => self.try_lock().unwrap().batch_prover.get_app_vk(),
|
||||
ProofType::Bundle => self.try_lock().unwrap().bundle_prover.get_app_vk(),
|
||||
_ => unreachable!("Unsupported proof type"),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.proof_type {
|
||||
ProofType::Chunk => {
|
||||
let task: ChunkProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let proof = self.try_lock().unwrap().chunk_prover.gen_proof(&task)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
ProofType::Batch => {
|
||||
let task: BatchProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let proof = self.try_lock().unwrap().batch_prover.gen_proof(&task)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
ProofType::Bundle => {
|
||||
let batch_proofs: BundleProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let proof = self
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.bundle_prover
|
||||
.gen_proof_evm(&batch_proofs)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
_ => Err(anyhow!("Unsupported proof type")),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::Path,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use super::{CircuitsHandler, Phase};
|
||||
use crate::prover::CircuitConfig;
|
||||
use async_trait::async_trait;
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
|
||||
use scroll_zkvm_prover_euclid::{BatchProver, BundleProverEuclidV2, ChunkProver};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use tokio::sync::Mutex;
|
||||
pub struct EuclidV2Handler {
|
||||
chunk_prover: ChunkProver,
|
||||
batch_prover: BatchProver,
|
||||
bundle_prover: BundleProverEuclidV2,
|
||||
cached_vks: HashMap<ProofType, OnceLock<String>>,
|
||||
}
|
||||
|
||||
unsafe impl Send for EuclidV2Handler {}
|
||||
|
||||
impl EuclidV2Handler {
|
||||
pub fn new(cfg: &CircuitConfig) -> Self {
|
||||
let workspace_path = &cfg.workspace_path;
|
||||
let p = Phase::EuclidV2;
|
||||
let workspace_path = Path::new(workspace_path);
|
||||
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
|
||||
.expect("Failed to setup chunk prover");
|
||||
|
||||
let batch_prover = BatchProver::setup(p.phase_spec_batch(workspace_path))
|
||||
.expect("Failed to setup batch prover");
|
||||
|
||||
let bundle_prover = BundleProverEuclidV2::setup(p.phase_spec_bundle(workspace_path))
|
||||
.expect("Failed to setup bundle prover");
|
||||
|
||||
let build_vk_cache = |proof_type: ProofType| {
|
||||
let vk = if let Some(vk) = cfg.vks.get(&proof_type) {
|
||||
OnceLock::from(vk.clone())
|
||||
} else {
|
||||
OnceLock::new()
|
||||
};
|
||||
(proof_type, vk)
|
||||
};
|
||||
|
||||
Self {
|
||||
chunk_prover,
|
||||
batch_prover,
|
||||
bundle_prover,
|
||||
cached_vks: HashMap::from([
|
||||
build_vk_cache(ProofType::Chunk),
|
||||
build_vk_cache(ProofType::Batch),
|
||||
build_vk_cache(ProofType::Bundle),
|
||||
]),
|
||||
}
|
||||
}
|
||||
|
||||
/// get_prover get the inner prover, later we would replace chunk/batch/bundle_prover with
|
||||
/// universal prover, before that, use bundle_prover as the represent one
|
||||
pub fn get_prover(&self) -> &BundleProverEuclidV2 {
|
||||
&self.bundle_prover
|
||||
}
|
||||
|
||||
pub fn get_vk_and_cache(&self, task_type: ProofType) -> String {
|
||||
match task_type {
|
||||
ProofType::Chunk => self.cached_vks[&ProofType::Chunk]
|
||||
.get_or_init(|| BASE64_STANDARD.encode(self.chunk_prover.get_app_vk())),
|
||||
ProofType::Batch => self.cached_vks[&ProofType::Batch]
|
||||
.get_or_init(|| BASE64_STANDARD.encode(self.batch_prover.get_app_vk())),
|
||||
ProofType::Bundle => self.cached_vks[&ProofType::Bundle]
|
||||
.get_or_init(|| BASE64_STANDARD.encode(self.bundle_prover.get_app_vk())),
|
||||
_ => unreachable!("Unsupported proof type {:?}", task_type),
|
||||
}
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Arc<Mutex<EuclidV2Handler>> {
|
||||
async fn get_vk(&self, task_type: ProofType) -> String {
|
||||
self.lock().await.get_vk_and_cache(task_type)
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let handler_self = self.lock().await;
|
||||
let u_task: ProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let expected_vk = handler_self.get_vk_and_cache(prove_request.proof_type);
|
||||
if BASE64_STANDARD.encode(&u_task.vk) != expected_vk {
|
||||
eyre::bail!(
|
||||
"vk is not match!, prove type {:?}, expected {}, get {}",
|
||||
prove_request.proof_type,
|
||||
expected_vk,
|
||||
BASE64_STANDARD.encode(&u_task.vk),
|
||||
);
|
||||
}
|
||||
|
||||
let proof = match prove_request.proof_type {
|
||||
ProofType::Chunk => handler_self
|
||||
.chunk_prover
|
||||
.gen_proof_universal(&u_task, false)?,
|
||||
ProofType::Batch => handler_self
|
||||
.batch_prover
|
||||
.gen_proof_universal(&u_task, false)?,
|
||||
ProofType::Bundle => handler_self
|
||||
.bundle_prover
|
||||
.gen_proof_universal(&u_task, true)?,
|
||||
_ => {
|
||||
return Err(eyre::eyre!(
|
||||
"Unsupported proof type {:?}",
|
||||
prove_request.proof_type
|
||||
))
|
||||
}
|
||||
};
|
||||
//TODO: check expected PI
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
}
|
||||
55
crates/prover-bin/src/zk_circuits_handler/universal.rs
Normal file
55
crates/prover-bin/src/zk_circuits_handler/universal.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use std::path::Path;
|
||||
|
||||
use super::CircuitsHandler;
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::ProofType;
|
||||
use scroll_zkvm_prover::{Prover, ProverConfig};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use tokio::sync::Mutex;
|
||||
pub struct UniversalHandler {
|
||||
prover: Prover,
|
||||
}
|
||||
|
||||
/// Safe for current usage as `CircuitsHandler` trait (protected inside of Mutex and NEVER extract
|
||||
/// the instance out by `into_inner`)
|
||||
unsafe impl Send for UniversalHandler {}
|
||||
|
||||
impl UniversalHandler {
|
||||
pub fn new(workspace_path: impl AsRef<Path>, _proof_type: ProofType) -> Result<Self> {
|
||||
let path_app_exe = workspace_path.as_ref().join("app.vmexe");
|
||||
let path_app_config = workspace_path.as_ref().join("openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
let config = ProverConfig {
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
};
|
||||
|
||||
let prover = Prover::setup(config, None)?;
|
||||
Ok(Self { prover })
|
||||
}
|
||||
|
||||
/// get_prover get the inner prover, later we would replace chunk/batch/bundle_prover with
|
||||
/// universal prover, before that, use bundle_prover as the represent one
|
||||
pub fn get_prover(&mut self) -> &mut Prover {
|
||||
&mut self.prover
|
||||
}
|
||||
|
||||
pub fn get_task_from_input(input: &str) -> Result<ProvingTask> {
|
||||
Ok(serde_json::from_str(input)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Mutex<UniversalHandler> {
|
||||
async fn get_proof_data(&self, u_task: &ProvingTask, need_snark: bool) -> Result<String> {
|
||||
let mut handler_self = self.lock().await;
|
||||
|
||||
let proof = handler_self
|
||||
.get_prover()
|
||||
.gen_proof_universal(u_task, need_snark)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
@@ -34,11 +34,9 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
|
||||
@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63 h1:xuqdhD4w/zcI5T8Ty1wHvqB75P2HNg3jTH/kUEHGt9Y=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
|
||||
10
go.work.sum
10
go.work.sum
@@ -731,8 +731,9 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4=
|
||||
github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM=
|
||||
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA=
|
||||
github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=
|
||||
@@ -1199,6 +1200,7 @@ github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
|
||||
github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=
|
||||
@@ -1408,13 +1410,15 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250609113414-f33adf0904bd h1:NUol+dPt
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250609113414-f33adf0904bd/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2fRoMl8YWR9LOsZK4RvJnlG3RODgakj5I8VY=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017081611-2bc7a5482dcc h1:zSO+VMyzmEVezVuMC7jZ9PcvihwmrlKt+7cyv9rpq2s=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017081611-2bc7a5482dcc/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"propose_interval_milliseconds": 100,
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
|
||||
@@ -96,6 +96,9 @@ func action(ctx *cli.Context) error {
|
||||
if cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk <= 0 {
|
||||
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
|
||||
}
|
||||
if cfg.L2Config.RelayerConfig.SenderConfig.FusakaTimestamp == 0 {
|
||||
log.Crit("cfg.L2Config.RelayerConfig.SenderConfig.FusakaTimestamp must be set")
|
||||
}
|
||||
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
|
||||
@@ -49,7 +49,8 @@
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"check_pending_time": 1,
|
||||
"min_gas_tip": 100000000,
|
||||
"max_pending_blob_txs": 3
|
||||
"max_pending_blob_txs": 3,
|
||||
"fusaka_timestamp": 9999999999999
|
||||
},
|
||||
"batch_submission": {
|
||||
"min_batches": 1,
|
||||
@@ -92,7 +93,6 @@
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"propose_interval_milliseconds": 100,
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
|
||||
@@ -15,8 +15,8 @@ require (
|
||||
github.com/holiman/uint256 v1.3.2
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
@@ -49,10 +49,11 @@ require (
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -118,7 +119,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
|
||||
@@ -79,6 +79,8 @@ github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -91,8 +93,8 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
@@ -285,10 +287,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178 h1:4utngmJHXSOS5FoSdZhEV1xMRirpArbXvyoCZY9nYj0=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63 h1:xuqdhD4w/zcI5T8Ty1wHvqB75P2HNg3jTH/kUEHGt9Y=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -330,8 +332,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
|
||||
@@ -31,7 +31,6 @@ type L2Config struct {
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
||||
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
|
||||
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
|
||||
@@ -7,8 +7,14 @@ import (
|
||||
|
||||
// SenderConfig The config for transaction sender
|
||||
type SenderConfig struct {
|
||||
// The RPC endpoint of the ethereum or scroll public node.
|
||||
// The RPC endpoint of the ethereum or scroll public node (for backward compatibility).
|
||||
// If WriteEndpoints is specified, this endpoint will be used only for reading.
|
||||
// If WriteEndpoints is empty, this endpoint will be used for both reading and writing.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The RPC endpoints to send transactions to (optional).
|
||||
// If specified, transactions will be sent to all these endpoints in parallel.
|
||||
// If empty, transactions will be sent to the Endpoint.
|
||||
WriteEndpoints []string `json:"write_endpoints,omitempty"`
|
||||
// The time to trigger check pending txs in sender.
|
||||
CheckPendingTime uint64 `json:"check_pending_time"`
|
||||
// The number of blocks to wait to escalate increase gas price of the transaction.
|
||||
@@ -29,6 +35,8 @@ type SenderConfig struct {
|
||||
TxType string `json:"tx_type"`
|
||||
// The maximum number of pending blob-carrying transactions
|
||||
MaxPendingBlobTxs int64 `json:"max_pending_blob_txs"`
|
||||
// The timestamp of the Ethereum Fusaka upgrade in seconds since epoch.
|
||||
FusakaTimestamp uint64 `json:"fusaka_timestamp"`
|
||||
}
|
||||
|
||||
type BatchSubmission struct {
|
||||
|
||||
@@ -290,6 +290,12 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
|
||||
} else {
|
||||
// rollup mode: pass batchHeader and stateRoot
|
||||
|
||||
// Check state root is not zero
|
||||
if stateRoot == (common.Hash{}) {
|
||||
return fmt.Errorf("state root is zero")
|
||||
}
|
||||
|
||||
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
|
||||
@@ -339,8 +345,16 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
|
||||
// - we have at least minBatches AND price hits a desired target price
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// Get effective batch limits based on whether validium mode is enabled.
|
||||
minBatches, maxBatches := r.getEffectiveBatchLimits()
|
||||
// First, get the backlog count to determine batch submission strategy
|
||||
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches count", "err", err)
|
||||
return
|
||||
}
|
||||
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
|
||||
|
||||
// Get effective batch limits based on validium mode and backlog size.
|
||||
minBatches, maxBatches := r.getEffectiveBatchLimits(backlogCount)
|
||||
|
||||
// get pending batches from database in ascending order by their index.
|
||||
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, maxBatches)
|
||||
@@ -354,15 +368,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
// if backlog outgrow max size, force‐submit enough oldest batches
|
||||
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
|
||||
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
var forceSubmit bool
|
||||
|
||||
startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
|
||||
@@ -388,8 +393,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// return if not hitting target price
|
||||
if skip {
|
||||
log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
|
||||
log.Debug("first batch index", dbBatches[0].Index)
|
||||
log.Debug("backlog count", backlogCount)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
@@ -502,6 +505,11 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.sanityChecksCommitBatchCodecV7CalldataAndBlobs(calldata, blobs); err != nil {
|
||||
log.Error("Sanity check failed for calldata and blobs", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
|
||||
@@ -554,12 +562,22 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
|
||||
}
|
||||
|
||||
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled.
|
||||
func (r *Layer2Relayer) getEffectiveBatchLimits() (int, int) {
|
||||
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled
|
||||
// and the current backlog size.
|
||||
// When backlogCount >= backlog_max: submit min_batches for fast inclusion at slightly higher price.
|
||||
// When backlogCount < backlog_max: submit max_batches for better cost amortization.
|
||||
func (r *Layer2Relayer) getEffectiveBatchLimits(backlogCount int64) (int, int) {
|
||||
if r.cfg.ValidiumMode {
|
||||
return 1, 1 // minBatches=1, maxBatches=1
|
||||
}
|
||||
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MaxBatches
|
||||
|
||||
// If backlog is at or above max, prioritize fast inclusion by submitting min_batches
|
||||
if backlogCount >= r.cfg.BatchSubmission.BacklogMax {
|
||||
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MinBatches
|
||||
}
|
||||
|
||||
// Otherwise, prioritize cost efficiency by trying to submit max_batches
|
||||
return r.cfg.BatchSubmission.MaxBatches, r.cfg.BatchSubmission.MaxBatches
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
|
||||
@@ -999,6 +1017,18 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
|
||||
// Check state root is not zero
|
||||
stateRoot := common.HexToHash(batch.Batch.StateRoot)
|
||||
if stateRoot == (common.Hash{}) {
|
||||
return nil, 0, 0, fmt.Errorf("batch %d state root is zero", batch.Batch.Index)
|
||||
}
|
||||
|
||||
// Check parent batch hash is not zero
|
||||
parentBatchHash := common.HexToHash(batch.Batch.ParentBatchHash)
|
||||
if parentBatchHash == (common.Hash{}) {
|
||||
return nil, 0, 0, fmt.Errorf("batch %d parent batch hash is zero", batch.Batch.Index)
|
||||
}
|
||||
|
||||
// Calculate metrics
|
||||
var maxBlockHeight uint64
|
||||
var totalGasUsed uint64
|
||||
@@ -1018,6 +1048,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
|
||||
|
||||
lastChunk := batch.Chunks[len(batch.Chunks)-1]
|
||||
commitment := common.HexToHash(lastChunk.EndBlockHash)
|
||||
|
||||
version := encoding.CodecVersion(batch.Batch.CodecVersion)
|
||||
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
|
||||
if err != nil {
|
||||
@@ -1028,6 +1059,12 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
// Check state root is not zero
|
||||
stateRoot := common.HexToHash(dbBatch.StateRoot)
|
||||
if stateRoot == (common.Hash{}) {
|
||||
return nil, fmt.Errorf("batch %d state root is zero", dbBatch.Index)
|
||||
}
|
||||
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundlePostEuclidV2",
|
||||
|
||||
487
rollup/internal/controller/relayer/l2_relayer_sanity.go
Normal file
487
rollup/internal/controller/relayer/l2_relayer_sanity.go
Normal file
@@ -0,0 +1,487 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
// sanityChecksCommitBatchCodecV7CalldataAndBlobs performs comprehensive validation of the constructed
|
||||
// transaction data (calldata and blobs) by parsing them and comparing against database records.
|
||||
// This ensures the constructed transaction data is correct and consistent with the database state.
|
||||
func (r *Layer2Relayer) sanityChecksCommitBatchCodecV7CalldataAndBlobs(calldata []byte, blobs []*kzg4844.Blob) error {
|
||||
if r.l1RollupABI == nil {
|
||||
return fmt.Errorf("l1RollupABI is nil: cannot parse commitBatches calldata")
|
||||
}
|
||||
calldataInfo, err := parseCommitBatchesCalldata(r.l1RollupABI, calldata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse calldata: %w", err)
|
||||
}
|
||||
|
||||
batchesToValidate, l1MessagesWithBlockNumbers, err := r.getBatchesFromCalldata(calldataInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get batches from database: %w", err)
|
||||
}
|
||||
|
||||
if err := r.validateCalldataAndBlobsAgainstDatabase(calldataInfo, blobs, batchesToValidate, l1MessagesWithBlockNumbers); err != nil {
|
||||
return fmt.Errorf("calldata and blobs validation failed: %w", err)
|
||||
}
|
||||
|
||||
if err := r.validateDatabaseConsistency(batchesToValidate); err != nil {
|
||||
return fmt.Errorf("database consistency validation failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CalldataInfo holds parsed information from commitBatches calldata
|
||||
type CalldataInfo struct {
|
||||
Version uint8
|
||||
ParentBatchHash common.Hash
|
||||
LastBatchHash common.Hash
|
||||
}
|
||||
|
||||
// parseCommitBatchesCalldata parses the commitBatches calldata and extracts key information
|
||||
func parseCommitBatchesCalldata(abi *abi.ABI, calldata []byte) (*CalldataInfo, error) {
|
||||
method := abi.Methods["commitBatches"]
|
||||
decoded, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unpack commitBatches calldata: %w", err)
|
||||
}
|
||||
|
||||
if len(decoded) != 3 {
|
||||
return nil, fmt.Errorf("unexpected number of decoded parameters: got %d, want 3", len(decoded))
|
||||
}
|
||||
|
||||
version, ok := decoded[0].(uint8)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to type assert version to uint8")
|
||||
}
|
||||
|
||||
parentBatchHashB, ok := decoded[1].([32]uint8)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to type assert parentBatchHash to [32]uint8")
|
||||
}
|
||||
parentBatchHash := common.BytesToHash(parentBatchHashB[:])
|
||||
|
||||
lastBatchHashB, ok := decoded[2].([32]uint8)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to type assert lastBatchHash to [32]uint8")
|
||||
}
|
||||
lastBatchHash := common.BytesToHash(lastBatchHashB[:])
|
||||
|
||||
return &CalldataInfo{
|
||||
Version: version,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
LastBatchHash: lastBatchHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getBatchesFromCalldata retrieves the relevant batches from database based on calldata information
|
||||
func (r *Layer2Relayer) getBatchesFromCalldata(info *CalldataInfo) ([]*dbBatchWithChunks, map[uint64][]*types.TransactionData, error) {
|
||||
// Get the parent batch to determine the starting point
|
||||
parentBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.ParentBatchHash.Hex())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get parent batch by hash %s: %w", info.ParentBatchHash.Hex(), err)
|
||||
}
|
||||
|
||||
// Get the last batch to determine the ending point
|
||||
lastBatch, err := r.batchOrm.GetBatchByHash(r.ctx, info.LastBatchHash.Hex())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get last batch by hash %s: %w", info.LastBatchHash.Hex(), err)
|
||||
}
|
||||
|
||||
// Get all batches in the range (parent+1 to last)
|
||||
firstBatchIndex := parentBatch.Index + 1
|
||||
lastBatchIndex := lastBatch.Index
|
||||
|
||||
// Check if the range is valid
|
||||
if firstBatchIndex > lastBatchIndex {
|
||||
return nil, nil, fmt.Errorf("no batches found in range: first index %d, last index %d", firstBatchIndex, lastBatchIndex)
|
||||
}
|
||||
|
||||
var batchesToValidate []*dbBatchWithChunks
|
||||
l1MessagesWithBlockNumbers := make(map[uint64][]*types.TransactionData)
|
||||
for batchIndex := firstBatchIndex; batchIndex <= lastBatchIndex; batchIndex++ {
|
||||
dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get batch by index %d: %w", batchIndex, err)
|
||||
}
|
||||
|
||||
// Get chunks for this batch
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get chunks for batch %d: %w", batchIndex, err)
|
||||
}
|
||||
|
||||
batchesToValidate = append(batchesToValidate, &dbBatchWithChunks{
|
||||
Batch: dbBatch,
|
||||
Chunks: dbChunks,
|
||||
})
|
||||
|
||||
// If there are L1 messages in this batch, retrieve L1 messages with block numbers
|
||||
for _, chunk := range dbChunks {
|
||||
if chunk.TotalL1MessagesPoppedInChunk > 0 {
|
||||
blockWithL1Messages, err := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, chunk.StartBlockNumber, chunk.EndBlockNumber)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get L2 blocks for chunk %d: %w", chunk.Index, err)
|
||||
}
|
||||
var l1MessagesCount uint64
|
||||
for _, block := range blockWithL1Messages {
|
||||
bn := block.Header.Number.Uint64()
|
||||
seenL2 := false
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type == types.L1MessageTxType {
|
||||
if seenL2 {
|
||||
// Invariant violated: found an L1 message after an L2 transaction in the same block.
|
||||
return nil, nil, fmt.Errorf("L1 message after L2 transaction in block %d", bn)
|
||||
}
|
||||
l1MessagesWithBlockNumbers[bn] = append(l1MessagesWithBlockNumbers[bn], tx)
|
||||
l1MessagesCount++
|
||||
} else {
|
||||
seenL2 = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if chunk.TotalL1MessagesPoppedInChunk != l1MessagesCount {
|
||||
return nil, nil, fmt.Errorf("chunk %d has inconsistent L1 messages count: expected %d, got %d", chunk.Index, chunk.TotalL1MessagesPoppedInChunk, l1MessagesCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return batchesToValidate, l1MessagesWithBlockNumbers, nil
|
||||
}
|
||||
|
||||
// validateDatabaseConsistency performs comprehensive validation of database records
|
||||
func (r *Layer2Relayer) validateDatabaseConsistency(batchesToValidate []*dbBatchWithChunks) error {
|
||||
if len(batchesToValidate) == 0 {
|
||||
return fmt.Errorf("no batches to validate")
|
||||
}
|
||||
|
||||
// Get previous chunk for continuity check
|
||||
firstChunk := batchesToValidate[0].Chunks[0]
|
||||
if firstChunk.Index == 0 {
|
||||
return fmt.Errorf("genesis chunk should not be in normal batch submission flow, chunk index: %d", firstChunk.Index)
|
||||
}
|
||||
|
||||
prevChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, firstChunk.Index-1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get previous chunk %d for continuity check: %w", firstChunk.Index-1, err)
|
||||
}
|
||||
|
||||
firstBatchCodecVersion := batchesToValidate[0].Batch.CodecVersion
|
||||
for i, batch := range batchesToValidate {
|
||||
// Validate codec version consistency
|
||||
if batch.Batch.CodecVersion != firstBatchCodecVersion {
|
||||
return fmt.Errorf("batch %d has different codec version %d, expected %d", batch.Batch.Index, batch.Batch.CodecVersion, firstBatchCodecVersion)
|
||||
}
|
||||
|
||||
// Validate individual batch
|
||||
if err := r.validateSingleBatchConsistency(batch, i, batchesToValidate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate chunks in this batch
|
||||
if err := r.validateBatchChunksConsistency(batch, prevChunk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update prevChunk to the last chunk of this batch for next iteration
|
||||
if len(batch.Chunks) == 0 {
|
||||
return fmt.Errorf("batch %d has no chunks", batch.Batch.Index)
|
||||
}
|
||||
prevChunk = batch.Chunks[len(batch.Chunks)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSingleBatchConsistency validates a single batch's consistency
|
||||
func (r *Layer2Relayer) validateSingleBatchConsistency(batch *dbBatchWithChunks, i int, allBatches []*dbBatchWithChunks) error {
|
||||
if batch == nil || batch.Batch == nil {
|
||||
return fmt.Errorf("batch %d is nil", i)
|
||||
}
|
||||
|
||||
if len(batch.Chunks) == 0 {
|
||||
return fmt.Errorf("batch %d has no chunks", batch.Batch.Index)
|
||||
}
|
||||
|
||||
// Validate essential batch fields
|
||||
batchHash := common.HexToHash(batch.Batch.Hash)
|
||||
if batchHash == (common.Hash{}) {
|
||||
return fmt.Errorf("batch %d hash is zero", batch.Batch.Index)
|
||||
}
|
||||
|
||||
if batch.Batch.Index == 0 {
|
||||
return fmt.Errorf("batch %d has zero index (only genesis batch should have index 0)", i)
|
||||
}
|
||||
|
||||
parentBatchHash := common.HexToHash(batch.Batch.ParentBatchHash)
|
||||
if parentBatchHash == (common.Hash{}) {
|
||||
return fmt.Errorf("batch %d parent batch hash is zero", batch.Batch.Index)
|
||||
}
|
||||
|
||||
stateRoot := common.HexToHash(batch.Batch.StateRoot)
|
||||
if stateRoot == (common.Hash{}) {
|
||||
return fmt.Errorf("batch %d state root is zero", batch.Batch.Index)
|
||||
}
|
||||
|
||||
// Check batch index continuity
|
||||
if i > 0 {
|
||||
prevBatch := allBatches[i-1]
|
||||
if batch.Batch.Index != prevBatch.Batch.Index+1 {
|
||||
return fmt.Errorf("batch index is not sequential: prev batch index %d, current batch index %d", prevBatch.Batch.Index, batch.Batch.Index)
|
||||
}
|
||||
if parentBatchHash != common.HexToHash(prevBatch.Batch.Hash) {
|
||||
return fmt.Errorf("parent batch hash does not match previous batch hash: expected %s, got %s", prevBatch.Batch.Hash, batch.Batch.ParentBatchHash)
|
||||
}
|
||||
} else {
|
||||
// For the first batch, verify continuity with parent batch from database
|
||||
parentBatch, err := r.batchOrm.GetBatchByHash(r.ctx, batch.Batch.ParentBatchHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get parent batch %s for batch %d: %w", batch.Batch.ParentBatchHash, batch.Batch.Index, err)
|
||||
}
|
||||
if batch.Batch.Index != parentBatch.Index+1 {
|
||||
return fmt.Errorf("first batch index is not sequential with parent: parent batch index %d, current batch index %d", parentBatch.Index, batch.Batch.Index)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate L1 message queue consistency
|
||||
if err := r.validateMessageQueueConsistency(batch.Batch.Index, batch.Chunks, common.HexToHash(batch.Batch.PrevL1MessageQueueHash), common.HexToHash(batch.Batch.PostL1MessageQueueHash)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateBatchChunksConsistency validates chunks within a batch
|
||||
func (r *Layer2Relayer) validateBatchChunksConsistency(batch *dbBatchWithChunks, prevChunk *orm.Chunk) error {
|
||||
// Check codec version consistency between chunks and batch
|
||||
for _, chunk := range batch.Chunks {
|
||||
if chunk.CodecVersion != batch.Batch.CodecVersion {
|
||||
return fmt.Errorf("batch %d chunk %d has different codec version %d, expected %d", batch.Batch.Index, chunk.Index, chunk.CodecVersion, batch.Batch.CodecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate each chunk individually
|
||||
currentPrevChunk := prevChunk
|
||||
for j, chunk := range batch.Chunks {
|
||||
if err := r.validateSingleChunkConsistency(chunk, currentPrevChunk); err != nil {
|
||||
return fmt.Errorf("batch %d chunk %d: %w", batch.Batch.Index, j, err)
|
||||
}
|
||||
currentPrevChunk = chunk
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSingleChunkConsistency validates a single chunk
|
||||
func (r *Layer2Relayer) validateSingleChunkConsistency(chunk *orm.Chunk, prevChunk *orm.Chunk) error {
|
||||
if chunk == nil {
|
||||
return fmt.Errorf("chunk is nil")
|
||||
}
|
||||
|
||||
chunkHash := common.HexToHash(chunk.Hash)
|
||||
if chunkHash == (common.Hash{}) {
|
||||
return fmt.Errorf("chunk %d hash is zero", chunk.Index)
|
||||
}
|
||||
|
||||
// Check chunk index continuity
|
||||
if chunk.Index != prevChunk.Index+1 {
|
||||
return fmt.Errorf("chunk index is not sequential: prev chunk index %d, current chunk index %d", prevChunk.Index, chunk.Index)
|
||||
}
|
||||
|
||||
// Validate block range
|
||||
if chunk.StartBlockNumber == 0 && chunk.EndBlockNumber == 0 {
|
||||
return fmt.Errorf("chunk %d has zero block range", chunk.Index)
|
||||
}
|
||||
|
||||
if chunk.StartBlockNumber > chunk.EndBlockNumber {
|
||||
return fmt.Errorf("chunk %d has invalid block range: start %d > end %d", chunk.Index, chunk.StartBlockNumber, chunk.EndBlockNumber)
|
||||
}
|
||||
|
||||
// Check hash fields
|
||||
startBlockHash := common.HexToHash(chunk.StartBlockHash)
|
||||
if startBlockHash == (common.Hash{}) {
|
||||
return fmt.Errorf("chunk %d start block hash is zero", chunk.Index)
|
||||
}
|
||||
|
||||
endBlockHash := common.HexToHash(chunk.EndBlockHash)
|
||||
if endBlockHash == (common.Hash{}) {
|
||||
return fmt.Errorf("chunk %d end block hash is zero", chunk.Index)
|
||||
}
|
||||
|
||||
// Check block continuity with previous chunk
|
||||
if prevChunk.EndBlockNumber+1 != chunk.StartBlockNumber {
|
||||
return fmt.Errorf("chunk is not continuous with previous chunk %d: prev end block %d, current start block %d", prevChunk.Index, prevChunk.EndBlockNumber, chunk.StartBlockNumber)
|
||||
}
|
||||
|
||||
// Check L1 messages continuity
|
||||
expectedPoppedBefore := prevChunk.TotalL1MessagesPoppedBefore + prevChunk.TotalL1MessagesPoppedInChunk
|
||||
if chunk.TotalL1MessagesPoppedBefore != expectedPoppedBefore {
|
||||
return fmt.Errorf("L1 messages popped before is incorrect: expected %d, got %d", expectedPoppedBefore, chunk.TotalL1MessagesPoppedBefore)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateCalldataAndBlobsAgainstDatabase validates calldata and blobs against database records
|
||||
func (r *Layer2Relayer) validateCalldataAndBlobsAgainstDatabase(calldataInfo *CalldataInfo, blobs []*kzg4844.Blob, batchesToValidate []*dbBatchWithChunks, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) error {
|
||||
// Validate blobs
|
||||
if len(blobs) == 0 {
|
||||
return fmt.Errorf("no blobs provided")
|
||||
}
|
||||
|
||||
// Validate blob count
|
||||
if len(blobs) != len(batchesToValidate) {
|
||||
return fmt.Errorf("blob count mismatch: got %d blobs, expected %d batches", len(blobs), len(batchesToValidate))
|
||||
}
|
||||
|
||||
// Get first and last batches for validation, length check is already done above
|
||||
firstBatch := batchesToValidate[0].Batch
|
||||
lastBatch := batchesToValidate[len(batchesToValidate)-1].Batch
|
||||
|
||||
// Validate codec version
|
||||
if calldataInfo.Version != uint8(firstBatch.CodecVersion) {
|
||||
return fmt.Errorf("version mismatch: calldata=%d, db=%d", calldataInfo.Version, firstBatch.CodecVersion)
|
||||
}
|
||||
|
||||
// Validate parent batch hash
|
||||
if calldataInfo.ParentBatchHash != common.HexToHash(firstBatch.ParentBatchHash) {
|
||||
return fmt.Errorf("parentBatchHash mismatch: calldata=%s, db=%s", calldataInfo.ParentBatchHash.Hex(), firstBatch.ParentBatchHash)
|
||||
}
|
||||
|
||||
// Validate last batch hash
|
||||
if calldataInfo.LastBatchHash != common.HexToHash(lastBatch.Hash) {
|
||||
return fmt.Errorf("lastBatchHash mismatch: calldata=%s, db=%s", calldataInfo.LastBatchHash.Hex(), lastBatch.Hash)
|
||||
}
|
||||
|
||||
// Get codec for blob decoding
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(firstBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get codec: %w", err)
|
||||
}
|
||||
|
||||
// Validate each blob against its corresponding batch
|
||||
for i, blob := range blobs {
|
||||
dbBatch := batchesToValidate[i].Batch
|
||||
if err := r.validateSingleBlobAgainstBatch(blob, dbBatch, codec, l1MessagesWithBlockNumbers); err != nil {
|
||||
return fmt.Errorf("blob validation failed for batch %d: %w", dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSingleBlobAgainstBatch validates a single blob against its batch data
|
||||
func (r *Layer2Relayer) validateSingleBlobAgainstBatch(blob *kzg4844.Blob, dbBatch *orm.Batch, codec encoding.Codec, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) error {
|
||||
// Decode blob payload
|
||||
payload, err := codec.DecodeBlob(blob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode blob: %w", err)
|
||||
}
|
||||
|
||||
// Validate batch hash
|
||||
daBatch, err := assembleDABatchFromPayload(payload, dbBatch, codec, l1MessagesWithBlockNumbers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to assemble batch from payload: %w", err)
|
||||
}
|
||||
|
||||
if daBatch.Hash() != common.HexToHash(dbBatch.Hash) {
|
||||
return fmt.Errorf("batch hash mismatch: decoded from blob=%s, db=%s", daBatch.Hash().Hex(), dbBatch.Hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateMessageQueueConsistency validates L1 message queue hash consistency
|
||||
func (r *Layer2Relayer) validateMessageQueueConsistency(batchIndex uint64, chunks []*orm.Chunk, prevL1MsgQueueHash common.Hash, postL1MsgQueueHash common.Hash) error {
|
||||
if len(chunks) == 0 {
|
||||
return fmt.Errorf("batch %d has no chunks for message queue validation", batchIndex)
|
||||
}
|
||||
|
||||
firstChunk := chunks[0]
|
||||
lastChunk := chunks[len(chunks)-1]
|
||||
|
||||
// Calculate total L1 messages in this batch
|
||||
var totalL1MessagesInBatch uint64
|
||||
for _, chunk := range chunks {
|
||||
totalL1MessagesInBatch += chunk.TotalL1MessagesPoppedInChunk
|
||||
}
|
||||
|
||||
// If there were L1 messages processed before this batch, prev hash should not be zero
|
||||
if firstChunk.TotalL1MessagesPoppedBefore > 0 && prevL1MsgQueueHash == (common.Hash{}) {
|
||||
return fmt.Errorf("batch %d prev L1 message queue hash is zero but %d L1 messages were processed before", batchIndex, firstChunk.TotalL1MessagesPoppedBefore)
|
||||
}
|
||||
|
||||
// If there are any L1 messages processed up to this batch, post hash should not be zero
|
||||
totalL1MessagesProcessed := lastChunk.TotalL1MessagesPoppedBefore + lastChunk.TotalL1MessagesPoppedInChunk
|
||||
if totalL1MessagesProcessed > 0 && postL1MsgQueueHash == (common.Hash{}) {
|
||||
return fmt.Errorf("batch %d post L1 message queue hash is zero but %d L1 messages were processed in total", batchIndex, totalL1MessagesProcessed)
|
||||
}
|
||||
|
||||
// Prev and post queue hashes should be different if L1 messages were processed in this batch
|
||||
if totalL1MessagesInBatch > 0 && prevL1MsgQueueHash == postL1MsgQueueHash {
|
||||
return fmt.Errorf("batch %d has same prev and post L1 message queue hashes but processed %d L1 messages in this batch", batchIndex, totalL1MessagesInBatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func assembleDABatchFromPayload(payload encoding.DABlobPayload, dbBatch *orm.Batch, codec encoding.Codec, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) (encoding.DABatch, error) {
|
||||
blocks, err := assembleBlocksFromPayload(payload, l1MessagesWithBlockNumbers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to assemble blocks from payload batch_index=%d codec_version=%d parent_batch_hash=%s: %w", dbBatch.Index, dbBatch.CodecVersion, dbBatch.ParentBatchHash, err)
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index, // The database provides only batch index, other fields are derived from blob payload
|
||||
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash), // The first batch's parent hash is verified with calldata, subsequent batches are linked via dbBatch.ParentBatchHash and verified in database consistency checks
|
||||
PrevL1MessageQueueHash: payload.PrevL1MessageQueueHash(),
|
||||
PostL1MessageQueueHash: payload.PostL1MessageQueueHash(),
|
||||
Blocks: blocks,
|
||||
Chunks: []*encoding.Chunk{ // One chunk for this batch to pass sanity checks when building DABatch
|
||||
{
|
||||
Blocks: blocks,
|
||||
PrevL1MessageQueueHash: payload.PrevL1MessageQueueHash(),
|
||||
PostL1MessageQueueHash: payload.PostL1MessageQueueHash(),
|
||||
},
|
||||
},
|
||||
}
|
||||
daBatch, err := codec.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build DABatch batch_index=%d codec_version=%d parent_batch_hash=%s: %w", dbBatch.Index, dbBatch.CodecVersion, dbBatch.ParentBatchHash, err)
|
||||
}
|
||||
return daBatch, nil
|
||||
}
|
||||
|
||||
func assembleBlocksFromPayload(payload encoding.DABlobPayload, l1MessagesWithBlockNumbers map[uint64][]*types.TransactionData) ([]*encoding.Block, error) {
|
||||
daBlocks := payload.Blocks()
|
||||
txns := payload.Transactions()
|
||||
if len(daBlocks) != len(txns) {
|
||||
return nil, fmt.Errorf("mismatched number of blocks and transactions: %d blocks, %d transactions", len(daBlocks), len(txns))
|
||||
}
|
||||
blocks := make([]*encoding.Block, len(daBlocks))
|
||||
for i := range daBlocks {
|
||||
blocks[i] = &encoding.Block{
|
||||
Header: &types.Header{
|
||||
Number: new(big.Int).SetUint64(daBlocks[i].Number()),
|
||||
Time: daBlocks[i].Timestamp(),
|
||||
BaseFee: daBlocks[i].BaseFee(),
|
||||
GasLimit: daBlocks[i].GasLimit(),
|
||||
},
|
||||
}
|
||||
// Ensure per-block ordering: [L1 messages][L2 transactions]. Prepend L1 messages (if any), then append L2 transactions.
|
||||
if l1Messages, ok := l1MessagesWithBlockNumbers[daBlocks[i].Number()]; ok {
|
||||
blocks[i].Transactions = l1Messages
|
||||
}
|
||||
blocks[i].Transactions = append(blocks[i].Transactions, encoding.TxsToTxsData(txns[i])...)
|
||||
}
|
||||
return blocks, nil
|
||||
}
|
||||
131
rollup/internal/controller/relayer/l2_relayer_sanity_test.go
Normal file
131
rollup/internal/controller/relayer/l2_relayer_sanity_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
|
||||
bridgeabi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func TestAssembleDABatch(t *testing.T) {
|
||||
calldataHex := "0x9bbaa2ba0000000000000000000000000000000000000000000000000000000000000008146793a7d71663cd87ec9713f72242a3798d5e801050130a3e16efaa09fb803e58af2593dadc8b9fff75a2d27199cb97ec115bade109b8d691a512608ef180eb"
|
||||
blobsPath := filepath.Join("../../../testdata", "commit_batches_blobs.json")
|
||||
|
||||
calldata, err := hexutil.Decode(strings.TrimSpace(calldataHex))
|
||||
assert.NoErrorf(t, err, "failed to decode calldata: %s", calldataHex)
|
||||
|
||||
blobs, err := loadBlobsFromJSON(blobsPath)
|
||||
assert.NoErrorf(t, err, "failed to read blobs: %s", blobsPath)
|
||||
assert.NotEmpty(t, blobs, "no blobs provided")
|
||||
|
||||
info, err := parseCommitBatchesCalldata(bridgeabi.ScrollChainABI, calldata)
|
||||
assert.NoError(t, err)
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(info.Version))
|
||||
assert.NoErrorf(t, err, "failed to get codec from version %d", info.Version)
|
||||
|
||||
parentBatchHash := info.ParentBatchHash
|
||||
index := uint64(113571)
|
||||
|
||||
t.Logf("calldata parsed: version=%d parentBatchHash=%s lastBatchHash=%s blobs=%d", info.Version, info.ParentBatchHash.Hex(), info.LastBatchHash.Hex(), len(blobs))
|
||||
|
||||
fromAddr := common.HexToAddress("0x61d8d3e7f7c656493d1d76aaa1a836cedfcbc27b")
|
||||
toAddr := common.HexToAddress("0xba50f5340fb9f3bd074bd638c9be13ecb36e603d")
|
||||
l1MessagesWithBlockNumbers := map[uint64][]*types.TransactionData{
|
||||
11488527: {
|
||||
&types.TransactionData{
|
||||
Type: types.L1MessageTxType,
|
||||
Nonce: 1072515,
|
||||
Gas: 340000,
|
||||
To: &toAddr,
|
||||
Value: (*hexutil.Big)(big.NewInt(0)),
|
||||
Data: "0x8ef1332e00000000000000000000000081f3843af1fbab046b771f0d440c04ebb2b7513f000000000000000000000000cec03800074d0ac0854bf1f34153cc4c8baeeb1e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000105d8300000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000084f03efa3700000000000000000000000000000000000000000000000000000000000024730000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000171bdb6e3062daaee1845ba4cb1902169feb5a9b9555a882d45637d3bd29eb83500000000000000000000000000000000000000000000000000000000",
|
||||
From: fromAddr,
|
||||
},
|
||||
},
|
||||
11488622: {
|
||||
&types.TransactionData{
|
||||
Type: types.L1MessageTxType,
|
||||
Nonce: 1072516,
|
||||
Gas: 340000,
|
||||
To: &toAddr,
|
||||
Value: (*hexutil.Big)(big.NewInt(0)),
|
||||
Data: "0x8ef1332e00000000000000000000000081f3843af1fbab046b771f0d440c04ebb2b7513f000000000000000000000000cec03800074d0ac0854bf1f34153cc4c8baeeb1e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000105d8400000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000084f03efa370000000000000000000000000000000000000000000000000000000000002474000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000012aeb01535c1845b689bfce22e53029ec59ec75ea20f660d7c5fcd99f55b75b6900000000000000000000000000000000000000000000000000000000",
|
||||
From: fromAddr,
|
||||
},
|
||||
},
|
||||
11489190: {
|
||||
&types.TransactionData{
|
||||
Type: types.L1MessageTxType,
|
||||
Nonce: 1072517,
|
||||
Gas: 168000,
|
||||
To: &toAddr,
|
||||
Value: (*hexutil.Big)(big.NewInt(0)),
|
||||
Data: "0x8ef1332e0000000000000000000000003b1399523f819ea4c4d3e76dddefaf4226c6ba570000000000000000000000003b1399523f819ea4c4d3e76dddefaf4226c6ba5700000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000105d8500000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000",
|
||||
From: fromAddr,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, blob := range blobs {
|
||||
payload, decErr := codec.DecodeBlob(blob)
|
||||
assert.NoErrorf(t, decErr, "blob[%d] decode failed", i)
|
||||
if decErr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
dbBatch := &orm.Batch{
|
||||
Index: index,
|
||||
ParentBatchHash: parentBatchHash.Hex(),
|
||||
}
|
||||
|
||||
daBatch, asmErr := assembleDABatchFromPayload(payload, dbBatch, codec, l1MessagesWithBlockNumbers)
|
||||
assert.NoErrorf(t, asmErr, "blob[%d] assemble failed", i)
|
||||
if asmErr == nil {
|
||||
t.Logf("blob[%d] DABatch hash=%s", i, daBatch.Hash().Hex())
|
||||
}
|
||||
|
||||
index += 1
|
||||
parentBatchHash = daBatch.Hash()
|
||||
}
|
||||
}
|
||||
|
||||
func loadBlobsFromJSON(path string) ([]*kzg4844.Blob, error) {
|
||||
raw, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var arr []hexutil.Bytes
|
||||
if err := json.Unmarshal(raw, &arr); err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON; expect [\"0x...\"] array: %w", err)
|
||||
}
|
||||
|
||||
out := make([]*kzg4844.Blob, 0, len(arr))
|
||||
var empty kzg4844.Blob
|
||||
want := len(empty)
|
||||
|
||||
for i, b := range arr {
|
||||
if len(b) != want {
|
||||
return nil, fmt.Errorf("blob[%d] length mismatch: got %d, want %d", i, len(b), want)
|
||||
}
|
||||
blob := new(kzg4844.Blob)
|
||||
copy(blob[:], b)
|
||||
out = append(out, blob)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -70,15 +70,18 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
genesisBatch, err := batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
ParentBatchHash: common.HexToHash(genesisBatch.Hash),
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.WriteEndpoints = []string{cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, cfg.L2Config.RelayerConfig.SenderConfig.Endpoint}
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -81,6 +82,7 @@ func setupEnv(t *testing.T) {
|
||||
block1 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace1, block1)
|
||||
assert.NoError(t, err)
|
||||
block1.Header.Number = big.NewInt(1)
|
||||
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
@@ -94,6 +96,7 @@ func setupEnv(t *testing.T) {
|
||||
block2 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace2, block2)
|
||||
assert.NoError(t, err)
|
||||
block2.Header.Number = big.NewInt(2)
|
||||
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
daChunk2, err := codec.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/holiman/uint256"
|
||||
@@ -67,7 +68,8 @@ type FeeData struct {
|
||||
type Sender struct {
|
||||
config *config.SenderConfig
|
||||
gethClient *gethclient.Client
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
client *ethclient.Client // The client to retrieve on chain data (read-only)
|
||||
writeClients []*ethclient.Client // The clients to send transactions to (write operations)
|
||||
transactionSigner *TransactionSigner
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
@@ -90,9 +92,10 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
return nil, fmt.Errorf("invalid params, EscalateMultipleNum; %v, EscalateMultipleDen: %v", config.EscalateMultipleNum, config.EscalateMultipleDen)
|
||||
}
|
||||
|
||||
// Initialize read client
|
||||
rpcClient, err := rpc.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
return nil, fmt.Errorf("failed to dial read client, err: %w", err)
|
||||
}
|
||||
|
||||
client := ethclient.NewClient(rpcClient)
|
||||
@@ -105,12 +108,42 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
return nil, fmt.Errorf("failed to create transaction signer, err: %w", err)
|
||||
}
|
||||
|
||||
// Initialize write clients
|
||||
var writeClients []*ethclient.Client
|
||||
if len(config.WriteEndpoints) > 0 {
|
||||
// Use specified write endpoints
|
||||
for i, endpoint := range config.WriteEndpoints {
|
||||
writeRpcClient, err := rpc.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial write client %d (endpoint: %s), err: %w", i, endpoint, err)
|
||||
}
|
||||
writeClient := ethclient.NewClient(writeRpcClient)
|
||||
|
||||
// Verify the write client is connected to the same chain
|
||||
writeChainID, err := writeClient.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get chain ID from write client %d (endpoint: %s), err: %w", i, endpoint, err)
|
||||
}
|
||||
if writeChainID.Cmp(chainID) != 0 {
|
||||
return nil, fmt.Errorf("write client %d (endpoint: %s) has different chain ID %s, expected %s", i, endpoint, writeChainID.String(), chainID.String())
|
||||
}
|
||||
|
||||
writeClients = append(writeClients, writeClient)
|
||||
}
|
||||
log.Info("initialized sender with multiple write clients", "service", service, "name", name, "readEndpoint", config.Endpoint, "writeEndpoints", config.WriteEndpoints)
|
||||
} else {
|
||||
// Use read client for writing (backward compatibility)
|
||||
writeClients = append(writeClients, client)
|
||||
log.Info("initialized sender with single client", "service", service, "name", name, "endpoint", config.Endpoint)
|
||||
}
|
||||
|
||||
// Create sender instance first and then initialize nonce
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
gethClient: gethclient.New(rpcClient),
|
||||
client: client,
|
||||
writeClients: writeClients,
|
||||
chainID: chainID,
|
||||
transactionSigner: transactionSigner,
|
||||
db: db,
|
||||
@@ -169,6 +202,82 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
|
||||
}
|
||||
}
|
||||
|
||||
// sendTransactionToMultipleClients sends a transaction to all write clients in parallel
|
||||
// and returns success if at least one client succeeds
|
||||
func (s *Sender) sendTransactionToMultipleClients(signedTx *gethTypes.Transaction) error {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if len(s.writeClients) == 1 {
|
||||
// Single client - use direct approach
|
||||
return s.writeClients[0].SendTransaction(ctx, signedTx)
|
||||
}
|
||||
|
||||
// Multiple clients - send in parallel
|
||||
type result struct {
|
||||
endpoint string
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan result, len(s.writeClients))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Send transaction to all write clients in parallel
|
||||
for i, client := range s.writeClients {
|
||||
wg.Add(1)
|
||||
// Determine endpoint URL for this client
|
||||
endpoint := s.config.WriteEndpoints[i]
|
||||
|
||||
go func(ep string, writeClient *ethclient.Client) {
|
||||
defer wg.Done()
|
||||
err := writeClient.SendTransaction(ctx, signedTx)
|
||||
resultChan <- result{endpoint: ep, err: err}
|
||||
}(endpoint, client)
|
||||
}
|
||||
|
||||
// Wait for all goroutines to finish
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultChan)
|
||||
}()
|
||||
|
||||
// Collect results
|
||||
var errs []error
|
||||
for res := range resultChan {
|
||||
if res.err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s: %w", res.endpoint, res.err))
|
||||
log.Warn("failed to send transaction to write client",
|
||||
"endpoint", res.endpoint,
|
||||
"txHash", signedTx.Hash().Hex(),
|
||||
"nonce", signedTx.Nonce(),
|
||||
"from", s.transactionSigner.GetAddr().String(),
|
||||
"error", res.err)
|
||||
} else {
|
||||
log.Info("successfully sent transaction to write client",
|
||||
"endpoint", res.endpoint,
|
||||
"txHash", signedTx.Hash().Hex(),
|
||||
"nonce", signedTx.Nonce(),
|
||||
"from", s.transactionSigner.GetAddr().String())
|
||||
}
|
||||
}
|
||||
|
||||
// Check if at least one client succeeded
|
||||
if len(errs) < len(s.writeClients) {
|
||||
successCount := len(s.writeClients) - len(errs)
|
||||
if len(errs) > 0 {
|
||||
log.Info("transaction partially succeeded",
|
||||
"txHash", signedTx.Hash().Hex(),
|
||||
"successCount", successCount,
|
||||
"totalClients", len(s.writeClients),
|
||||
"failures", errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// All clients failed
|
||||
return fmt.Errorf("failed to send transaction to all %d write clients: %w", len(s.writeClients), errors.Join(errs...))
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob) (common.Hash, uint64, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
@@ -178,6 +287,12 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
err error
|
||||
)
|
||||
|
||||
blockNumber, blockTimestamp, baseFee, blobBaseFee, err := s.getBlockNumberAndTimestampAndBaseFeeAndBlobFee(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number and base fee", "error", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to get block number and base fee, err: %w", err)
|
||||
}
|
||||
|
||||
if blobs != nil {
|
||||
// check that number of pending blob-carrying txs is not too big
|
||||
if s.senderType == types.SenderTypeCommitBatch {
|
||||
@@ -194,21 +309,24 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
if numPendingTransactions >= s.config.MaxPendingBlobTxs {
|
||||
return common.Hash{}, 0, ErrTooManyPendingBlobTxs
|
||||
}
|
||||
|
||||
}
|
||||
sidecar, err = makeSidecar(blobs)
|
||||
|
||||
if blockTimestamp < s.config.FusakaTimestamp && (s.config.FusakaTimestamp-blockTimestamp) < 180 {
|
||||
return common.Hash{}, 0, fmt.Errorf("pausing blob txs before Fusaka upgrade, eta %d seconds", s.config.FusakaTimestamp-blockTimestamp)
|
||||
}
|
||||
|
||||
version := gethTypes.BlobSidecarVersion0
|
||||
if blockTimestamp >= s.config.FusakaTimestamp {
|
||||
version = gethTypes.BlobSidecarVersion1
|
||||
}
|
||||
|
||||
sidecar, err = makeSidecar(version, blobs)
|
||||
if err != nil {
|
||||
log.Error("failed to make sidecar for blob transaction", "error", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
blockNumber, baseFee, blobBaseFee, err := s.getBlockNumberAndBaseFeeAndBlobFee(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number and base fee", "error", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to get block number and base fee, err: %w", err)
|
||||
}
|
||||
|
||||
if feeData, err = s.getFeeData(target, data, sidecar, baseFee, blobBaseFee); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to get fee data", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
@@ -230,7 +348,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, signedTx); err != nil {
|
||||
if err := s.sendTransactionToMultipleClients(signedTx); err != nil {
|
||||
// Delete the transaction from the pending transaction table if it fails to send.
|
||||
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
|
||||
log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr)
|
||||
@@ -515,6 +633,10 @@ func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee,
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
|
||||
// Note: This might fail during the Fusaka upgrade, if we originally sent a V0 blob tx.
|
||||
// Normally we would need to convert it to V1 before resubmitting. However, this case is
|
||||
// unlikely and geth would still accept the V0 version, so we omit the conversion.
|
||||
signedTx, err := s.createTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), nonce)
|
||||
if err != nil {
|
||||
log.Error("failed to create signed tx (resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", nonce, "err", err)
|
||||
@@ -528,7 +650,7 @@ func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee,
|
||||
func (s *Sender) checkPendingTransaction() {
|
||||
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
|
||||
blockNumber, baseFee, blobBaseFee, err := s.getBlockNumberAndBaseFeeAndBlobFee(s.ctx)
|
||||
blockNumber, _, baseFee, blobBaseFee, err := s.getBlockNumberAndTimestampAndBaseFeeAndBlobFee(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number and base fee", "error", err)
|
||||
return
|
||||
@@ -645,7 +767,7 @@ func (s *Sender) checkPendingTransaction() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil {
|
||||
if err := s.sendTransactionToMultipleClients(newSignedTx); err != nil {
|
||||
if strings.Contains(err.Error(), "nonce too low") {
|
||||
// When we receive a 'nonce too low' error but cannot find the transaction receipt, it indicates another transaction with this nonce has already been processed, so this transaction will never be mined and should be marked as failed.
|
||||
log.Warn("nonce too low detected, marking all non-confirmed transactions with same nonce as failed", "nonce", originalTx.Nonce(), "address", s.transactionSigner.GetAddr().Hex(), "txHash", originalTx.Hash().Hex(), "newTxHash", newSignedTx.Hash().Hex(), "err", err)
|
||||
@@ -706,10 +828,10 @@ func (s *Sender) getSenderMeta() *orm.SenderMeta {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sender) getBlockNumberAndBaseFeeAndBlobFee(ctx context.Context) (uint64, uint64, uint64, error) {
|
||||
func (s *Sender) getBlockNumberAndTimestampAndBaseFeeAndBlobFee(ctx context.Context) (uint64, uint64, uint64, uint64, error) {
|
||||
header, err := s.client.HeaderByNumber(ctx, big.NewInt(rpc.PendingBlockNumber.Int64()))
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
|
||||
var baseFee uint64
|
||||
@@ -722,10 +844,10 @@ func (s *Sender) getBlockNumberAndBaseFeeAndBlobFee(ctx context.Context) (uint64
|
||||
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
|
||||
}
|
||||
// header.Number.Uint64() returns the pendingBlockNumber, so we minus 1 to get the latestBlockNumber.
|
||||
return header.Number.Uint64() - 1, baseFee, blobBaseFee, nil
|
||||
return header.Number.Uint64() - 1, header.Time, baseFee, blobBaseFee, nil
|
||||
}
|
||||
|
||||
func makeSidecar(blobsInput []*kzg4844.Blob) (*gethTypes.BlobTxSidecar, error) {
|
||||
func makeSidecar(version byte, blobsInput []*kzg4844.Blob) (*gethTypes.BlobTxSidecar, error) {
|
||||
if len(blobsInput) == 0 {
|
||||
return nil, errors.New("blobsInput is empty")
|
||||
}
|
||||
@@ -742,23 +864,33 @@ func makeSidecar(blobsInput []*kzg4844.Blob) (*gethTypes.BlobTxSidecar, error) {
|
||||
var proofs []kzg4844.Proof
|
||||
|
||||
for i := range blobs {
|
||||
// Calculate commitment
|
||||
c, err := kzg4844.BlobToCommitment(&blobs[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob commitment, err: %w", err)
|
||||
}
|
||||
|
||||
p, err := kzg4844.ComputeBlobProof(&blobs[i], c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute blob proof, err: %w", err)
|
||||
}
|
||||
|
||||
commitments = append(commitments, c)
|
||||
proofs = append(proofs, p)
|
||||
|
||||
// Calculate proof
|
||||
switch version {
|
||||
case gethTypes.BlobSidecarVersion0:
|
||||
p, err := kzg4844.ComputeBlobProof(&blobs[i], c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute v0 blob proof, err: %w", err)
|
||||
}
|
||||
proofs = append(proofs, p)
|
||||
|
||||
case gethTypes.BlobSidecarVersion1:
|
||||
ps, err := kzg4844.ComputeCellProofs(&blobs[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute v1 blob cell proofs, err: %w", err)
|
||||
}
|
||||
proofs = append(proofs, ps...)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported blob sidecar version: %d", version)
|
||||
}
|
||||
}
|
||||
|
||||
return &gethTypes.BlobTxSidecar{
|
||||
Blobs: blobs,
|
||||
Commitments: commitments,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
return gethTypes.NewBlobTxSidecar(version, blobs, commitments, proofs), nil
|
||||
}
|
||||
|
||||
@@ -291,7 +291,7 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
|
||||
var sidecar *gethTypes.BlobTxSidecar
|
||||
if txBlob[i] != nil {
|
||||
sidecar, err = makeSidecar([]*kzg4844.Blob{txBlob[i]})
|
||||
sidecar, err = makeSidecar(gethTypes.BlobSidecarVersion0, []*kzg4844.Blob{txBlob[i]})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -332,7 +332,7 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
}
|
||||
var sidecar *gethTypes.BlobTxSidecar
|
||||
if txBlob[i] != nil {
|
||||
sidecar, err = makeSidecar([]*kzg4844.Blob{txBlob[i]})
|
||||
sidecar, err = makeSidecar(gethTypes.BlobSidecarVersion0, []*kzg4844.Blob{txBlob[i]})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
tx, err := s.createTx(feeData, &common.Address{}, nil, sidecar, s.transactionSigner.GetNonce())
|
||||
@@ -467,7 +467,7 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
sidecar, err := makeSidecar(randBlobs(1))
|
||||
sidecar, err := makeSidecar(gethTypes.BlobSidecarVersion0, randBlobs(1))
|
||||
assert.NoError(t, err)
|
||||
tx := gethTypes.NewTx(&gethTypes.BlobTx{
|
||||
ChainID: uint256.MustFromBig(s.chainID),
|
||||
@@ -799,7 +799,7 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
blobs := randBlobs(1)
|
||||
sideCar, err := makeSidecar(blobs)
|
||||
sideCar, err := makeSidecar(gethTypes.BlobSidecarVersion0, blobs)
|
||||
assert.NoError(t, err)
|
||||
versionedHash := sideCar.BlobHashes()[0]
|
||||
blsModulo, ok := new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10)
|
||||
|
||||
@@ -36,7 +36,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
|
||||
name: "Timeout",
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -72,8 +72,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxL2GasPerChunk: 20000000,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, ¶ms.ChainConfig{
|
||||
@@ -154,7 +153,6 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
@@ -227,7 +225,6 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
@@ -309,15 +306,14 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
||||
|
||||
// Create chunk proposer with no uncompressed batch bytes limit for chunks
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1, // One block per chunk
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: 1200000, // One block per chunk via gas limit
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV8, chainConfig, db, nil)
|
||||
|
||||
// Insert 2 blocks with large calldata and create 2 chunks
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
for i := uint64(1); i <= 2; i++ {
|
||||
for i := uint64(1); i <= 3; i++ {
|
||||
blockCopy := *block
|
||||
blockCopy.Header = &gethTypes.Header{}
|
||||
*blockCopy.Header = *block.Header
|
||||
@@ -326,7 +322,9 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{&blockCopy})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk() // Each call creates one chunk with one block
|
||||
cp.TryProposeChunk() // Each chunk will contain 1 block (~3KiB)
|
||||
// We create 2 chunks here, as we have 3 blocks and reach the gas limit for the 1st chunk with the 2nd block
|
||||
// and the 2nd chunk with the 3rd block.
|
||||
}
|
||||
|
||||
// Create batch proposer with 4KiB uncompressed batch bytes limit
|
||||
|
||||
@@ -86,15 +86,19 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
block3 := *block1
|
||||
block3.Header = &gethTypes.Header{}
|
||||
*block3.Header = *block1.Header
|
||||
block3.Header.Number = new(big.Int).SetUint64(block2.Header.Number.Uint64() + 1)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2, &block3})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: 1152994, // One block per chunk via gas limit
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
@@ -54,7 +54,6 @@ type ChunkProposer struct {
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
log.Info("new chunk proposer",
|
||||
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
|
||||
"maxL2GasPerChunk", cfg.MaxL2GasPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||
"maxBlobSize", maxBlobSize)
|
||||
@@ -232,10 +231,9 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
return err
|
||||
}
|
||||
|
||||
maxBlocksThisChunk := p.cfg.MaxBlockNumPerChunk
|
||||
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
|
||||
// select blocks without a hard limit on count in practice (use a large value)
|
||||
// The actual limits will be enforced by gas, timeout, and blob size constraints
|
||||
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, 1000)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -251,7 +249,7 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||
if currentHardfork != hardforkName {
|
||||
blocks = blocks[:i]
|
||||
maxBlocksThisChunk = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork
|
||||
// Truncate blocks at hardfork boundary
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -324,8 +322,8 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
|
||||
log.Info("reached maximum number of blocks in chunk or first block timeout",
|
||||
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout reached",
|
||||
"block count", len(chunk.Blocks),
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"start block timestamp", metrics.FirstBlockTimestamp,
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxL2Gas uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
@@ -30,14 +29,12 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxL2Gas: 20_000_000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxL2Gas: 20_000_000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
@@ -45,15 +42,13 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "MaxL2GasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxL2Gas: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxL2Gas: 20_000_000,
|
||||
name: "SingleBlockByGasLimit",
|
||||
maxL2Gas: 1_100_000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
@@ -62,7 +57,6 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
// In this test the second block is not included in the chunk because together
|
||||
// with the first block it exceeds the maxL2GasPerChunk limit.
|
||||
name: "MaxL2GasPerChunkIsSecondBlock",
|
||||
maxBlockNum: 10,
|
||||
maxL2Gas: 1_153_000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
@@ -85,7 +79,6 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxL2GasPerChunk: tt.maxL2Gas,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
@@ -110,53 +103,6 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerBlobSizeLimitCodecV7(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for i := uint64(0); i < 510; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = new(big.Int).SetUint64(i + 1)
|
||||
block.Header.Time = i + 1
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Add genesis chunk.
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 255,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedNumChunks int = 2
|
||||
var numBlocksMultiplier uint64 = 255
|
||||
assert.Len(t, chunks, expectedNumChunks)
|
||||
|
||||
for i, chunk := range chunks {
|
||||
expected := numBlocksMultiplier * (uint64(i) + 1)
|
||||
if expected > 2000 {
|
||||
expected = 2000
|
||||
}
|
||||
assert.Equal(t, expected, chunk.EndBlockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -204,7 +150,6 @@ func testChunkProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
||||
// Set max_uncompressed_batch_bytes_size to 4KiB (4 * 1024)
|
||||
// One block (~3KiB) should fit, but two blocks (~6KiB) should exceed the limit
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64, // No block number limit
|
||||
MaxL2GasPerChunk: math.MaxUint64, // No gas limit
|
||||
ChunkTimeoutSec: math.MaxUint32, // No timeout limit
|
||||
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
|
||||
|
||||
@@ -102,7 +102,6 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestChunkProposerLimitsCodecV7", testChunkProposerLimitsCodecV7)
|
||||
t.Run("TestChunkProposerBlobSizeLimitCodecV7", testChunkProposerBlobSizeLimitCodecV7)
|
||||
t.Run("TestChunkProposerUncompressedBatchBytesLimitCodecV8", testChunkProposerUncompressedBatchBytesLimitCodecV8)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
|
||||
@@ -266,6 +266,19 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetBatchByHash retrieves the batch by the given hash.
|
||||
func (o *Batch) GetBatchByHash(ctx context.Context, hash string) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash = ?", hash)
|
||||
|
||||
var batch Batch
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, metrics rutils.BatchMetrics, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if batch == nil {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"l2_config": {
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"chunk_proposer_config": {
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"chunk_timeout_sec": 72000000000,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
|
||||
7
rollup/testdata/commit_batches_blobs.json
vendored
Normal file
7
rollup/testdata/commit_batches_blobs.json
vendored
Normal file
File diff suppressed because one or more lines are too long
215
rollup/tests/integration_tool/imports.go
Normal file
215
rollup/tests/integration_tool/imports.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
type importRecord struct {
|
||||
Chunk []string `json:"chunks"`
|
||||
Batch []string `json:"batches"`
|
||||
Bundle []string `json:"bundles"`
|
||||
}
|
||||
|
||||
func randomPickKfromN(n, k int, rng *rand.Rand) []int {
|
||||
ret := make([]int, n-1)
|
||||
for i := 1; i < n; i++ {
|
||||
ret[i-1] = i
|
||||
}
|
||||
|
||||
rng.Shuffle(len(ret), func(i, j int) {
|
||||
ret[i], ret[j] = ret[j], ret[i]
|
||||
})
|
||||
|
||||
ret = ret[:k-1]
|
||||
sort.Ints(ret)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
|
||||
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := &importRecord{}
|
||||
// Create a new random source with the provided seed
|
||||
source := rand.NewSource(seed)
|
||||
//nolint:all
|
||||
rng := rand.New(source)
|
||||
|
||||
chkSepIdx := randomPickKfromN(int(endBlk-beginBlk)+1, chkNum, rng)
|
||||
chkSep := make([]uint64, len(chkSepIdx))
|
||||
for i, ind := range chkSepIdx {
|
||||
chkSep[i] = beginBlk + uint64(ind)
|
||||
}
|
||||
chkSep = append(chkSep, endBlk+1)
|
||||
|
||||
log.Info("separated chunk", "border", chkSep)
|
||||
head := beginBlk
|
||||
lastMsgHash := common.Hash{}
|
||||
|
||||
ormChks := make([]*orm.Chunk, 0, chkNum)
|
||||
encChks := make([]*encoding.Chunk, 0, chkNum)
|
||||
for _, edBlk := range chkSep {
|
||||
ormChk, chk, err := importChunk(ctx, db, head, edBlk-1, lastMsgHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lastMsgHash = chk.PostL1MessageQueueHash
|
||||
ormChks = append(ormChks, ormChk)
|
||||
encChks = append(encChks, chk)
|
||||
head = edBlk
|
||||
}
|
||||
|
||||
for _, chk := range ormChks {
|
||||
ret.Chunk = append(ret.Chunk, chk.Hash)
|
||||
}
|
||||
|
||||
batchSep := randomPickKfromN(chkNum, batchNum, rng)
|
||||
batchSep = append(batchSep, chkNum)
|
||||
log.Info("separated batch", "border", batchSep)
|
||||
|
||||
headChk := int(0)
|
||||
batches := make([]*orm.Batch, 0, batchNum)
|
||||
var lastBatch *orm.Batch
|
||||
for _, endChk := range batchSep {
|
||||
batch, err := importBatch(ctx, db, ormChks[headChk:endChk], encChks[headChk:endChk], lastBatch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lastBatch = batch
|
||||
batches = append(batches, batch)
|
||||
headChk = endChk
|
||||
}
|
||||
|
||||
for _, batch := range batches {
|
||||
ret.Batch = append(ret.Batch, batch.Hash)
|
||||
}
|
||||
|
||||
bundleSep := randomPickKfromN(batchNum, bundleNum, rng)
|
||||
bundleSep = append(bundleSep, batchNum)
|
||||
log.Info("separated bundle", "border", bundleSep)
|
||||
|
||||
headBatch := int(0)
|
||||
for _, endBatch := range bundleSep {
|
||||
hash, err := importBundle(ctx, db, batches[headBatch:endBatch])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret.Bundle = append(ret.Bundle, hash)
|
||||
headBatch = endBatch
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func importChunk(ctx context.Context, db *gorm.DB, beginBlk, endBlk uint64, prevMsgQueueHash common.Hash) (*orm.Chunk, *encoding.Chunk, error) {
|
||||
nblk := int(endBlk-beginBlk) + 1
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
|
||||
blks, err := blockOrm.GetL2BlocksGEHeight(ctx, beginBlk, nblk)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
postHash, err := encoding.MessageQueueV2ApplyL1MessagesFromBlocks(prevMsgQueueHash, blks)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
theChunk := &encoding.Chunk{
|
||||
Blocks: blks,
|
||||
PrevL1MessageQueueHash: prevMsgQueueHash,
|
||||
PostL1MessageQueueHash: postHash,
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
|
||||
dbChk, err := chunkOrm.InsertChunk(ctx, theChunk, codecCfg, utils.ChunkMetrics{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = blockOrm.UpdateChunkHashInRange(ctx, beginBlk, endBlk, dbChk.Hash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
log.Info("insert chunk", "From", beginBlk, "To", endBlk, "hash", dbChk.Hash)
|
||||
return dbChk, theChunk, nil
|
||||
}
|
||||
|
||||
func importBatch(ctx context.Context, db *gorm.DB, chks []*orm.Chunk, encChks []*encoding.Chunk, last *orm.Batch) (*orm.Batch, error) {
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
if last == nil {
|
||||
var err error
|
||||
last, err = batchOrm.GetLatestBatch(ctx)
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, err
|
||||
} else if last != nil {
|
||||
log.Info("start from last batch", "index", last.Index)
|
||||
}
|
||||
}
|
||||
|
||||
index := uint64(0)
|
||||
var parentHash common.Hash
|
||||
if last != nil {
|
||||
index = last.Index + 1
|
||||
parentHash = common.HexToHash(last.Hash)
|
||||
}
|
||||
|
||||
var blks []*encoding.Block
|
||||
for _, chk := range encChks {
|
||||
blks = append(blks, chk.Blocks...)
|
||||
}
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: index,
|
||||
TotalL1MessagePoppedBefore: chks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: parentHash,
|
||||
Chunks: encChks,
|
||||
Blocks: blks,
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(ctx, batch, codecCfg, utils.BatchMetrics{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = orm.NewChunk(db).UpdateBatchHashInRange(ctx, chks[0].Index, chks[len(chks)-1].Index, dbBatch.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("insert batch", "index", index)
|
||||
return dbBatch, nil
|
||||
}
|
||||
|
||||
func importBundle(ctx context.Context, db *gorm.DB, batches []*orm.Batch) (string, error) {
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(ctx, batches, codecCfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = orm.NewBatch(db).UpdateBundleHashInRange(ctx, batches[0].Index, batches[len(batches)-1].Index, bundle.Hash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Info("insert bundle", "hash", bundle.Hash)
|
||||
return bundle.Hash, nil
|
||||
}
|
||||
198
rollup/tests/integration_tool/main.go
Normal file
198
rollup/tests/integration_tool/main.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
var cfg *config
|
||||
var codecCfg encoding.CodecVersion = encoding.CodecV8
|
||||
|
||||
var outputNumFlag = cli.StringFlag{
|
||||
Name: "counts",
|
||||
Usage: "Counts for output (chunks,batches,bundles)",
|
||||
Value: "4,2,1",
|
||||
}
|
||||
|
||||
var outputPathFlag = cli.StringFlag{
|
||||
Name: "output",
|
||||
Usage: "output file path",
|
||||
Value: "testset.json",
|
||||
}
|
||||
|
||||
var seedFlag = cli.Int64Flag{
|
||||
Name: "seed",
|
||||
Usage: "random seed, 0 to use random selected seed",
|
||||
Value: 0,
|
||||
}
|
||||
|
||||
var codecFlag = cli.IntFlag{
|
||||
Name: "codec",
|
||||
Usage: "codec version, valid from 6, default(auto) is 0",
|
||||
Value: 0,
|
||||
}
|
||||
|
||||
func parseThreeIntegers(value string) (int, int, int, error) {
|
||||
// Split the input string by comma
|
||||
parts := strings.Split(value, ",")
|
||||
|
||||
// Check that we have exactly 3 parts
|
||||
if len(parts) != 3 {
|
||||
return 0, 0, 0, fmt.Errorf("input must contain exactly 3 comma-separated integers, got %s", value)
|
||||
}
|
||||
|
||||
// Parse the three integers
|
||||
values := make([]int, 3)
|
||||
for i, part := range parts {
|
||||
// Trim any whitespace
|
||||
part = strings.TrimSpace(part)
|
||||
|
||||
// Parse the integer
|
||||
val, err := strconv.Atoi(part)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("failed to parse '%s' as integer: %w", part, err)
|
||||
}
|
||||
|
||||
// Check that it's positive
|
||||
if val <= 0 {
|
||||
return 0, 0, 0, fmt.Errorf("all integers must be greater than 0, got %d", val)
|
||||
}
|
||||
|
||||
values[i] = val
|
||||
}
|
||||
|
||||
// Check that first >= second >= third
|
||||
if values[0] < values[1] || values[1] < values[2] {
|
||||
return 0, 0, 0, fmt.Errorf("integers must be in descending order: %d >= %d >= %d",
|
||||
values[0], values[1], values[2])
|
||||
}
|
||||
|
||||
return values[0], values[1], values[2], nil
|
||||
}
|
||||
|
||||
// load a comptabile type of config for rollup
|
||||
type config struct {
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "integration-test-tool"
|
||||
app.Usage = "The Scroll L2 Integration Test Tool"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, &codecFlag, &seedFlag, &outputNumFlag, &outputPathFlag)
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
if err := utils.LogSetup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
var err error
|
||||
cfg, err = newConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newConfig(file string) (*config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &config{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
if ctx.Args().Len() < 2 {
|
||||
return fmt.Errorf("specify begin and end block number")
|
||||
}
|
||||
|
||||
codecFl := ctx.Int(codecFlag.Name)
|
||||
if codecFl != 0 {
|
||||
switch codecFl {
|
||||
case 6:
|
||||
codecCfg = encoding.CodecV6
|
||||
case 7:
|
||||
codecCfg = encoding.CodecV7
|
||||
case 8:
|
||||
codecCfg = encoding.CodecV8
|
||||
default:
|
||||
return fmt.Errorf("invalid codec version %d", codecFl)
|
||||
}
|
||||
log.Info("set codec", "version", codecCfg)
|
||||
}
|
||||
|
||||
beginBlk, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid begin block number: %w", err)
|
||||
}
|
||||
endBlk, err := strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid begin block number: %w", err)
|
||||
}
|
||||
|
||||
chkNum, batchNum, bundleNum, err := parseThreeIntegers(ctx.String(outputNumFlag.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seed := ctx.Int64(seedFlag.Name)
|
||||
//nolint:all
|
||||
if seed == 0 {
|
||||
seed = rand.Int63()
|
||||
}
|
||||
|
||||
outputPath := ctx.String(outputPathFlag.Name)
|
||||
log.Info("output", "Seed", seed, "file", outputPath)
|
||||
ret, err := importData(ctx.Context, beginBlk, endBlk, chkNum, batchNum, bundleNum, seed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Marshal the ret variable to JSON with indentation for readability
|
||||
jsonData, err := json.MarshalIndent(ret, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal result data to JSON: %w", err)
|
||||
}
|
||||
|
||||
// Write the JSON data to the specified file
|
||||
err = os.WriteFile(outputPath, jsonData, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write result to file %s: %w", outputPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -118,7 +118,6 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
|
||||
}
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2025-02-14"
|
||||
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]
|
||||
channel = "nightly-2025-08-18"
|
||||
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]
|
||||
components = ["llvm-tools", "rustc-dev"]
|
||||
@@ -5,8 +5,8 @@ go 1.22
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63
|
||||
github.com/stretchr/testify v1.10.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
@@ -16,10 +16,10 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
@@ -34,7 +34,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
|
||||
@@ -22,8 +22,8 @@ github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh
|
||||
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -31,8 +31,8 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
@@ -93,18 +93,18 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178 h1:4utngmJHXSOS5FoSdZhEV1xMRirpArbXvyoCZY9nYj0=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250826112206-b4cce5c5d178/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63 h1:xuqdhD4w/zcI5T8Ty1wHvqB75P2HNg3jTH/kUEHGt9Y=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251017054300-9aa8b3f38f63/go.mod h1:zRa7CnS75mFdgp8IeMtZV/wCAlxPRT33Ek3+fFbBJVQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
|
||||
3
tests/prover-e2e/.env
Normal file
3
tests/prover-e2e/.env
Normal file
@@ -0,0 +1,3 @@
|
||||
GOOSE_DRIVER=postgres
|
||||
GOOSE_DBSTRING=postgresql://dev:dev@localhost:5432/scroll?sslmode=disable
|
||||
GOOSE_MIGRATION_DIR=../../database/migrate/migrations
|
||||
2
tests/prover-e2e/.gitignore
vendored
Normal file
2
tests/prover-e2e/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
build/*
|
||||
testset.json
|
||||
132
tests/prover-e2e/00100_import_blocks.sql
Normal file
132
tests/prover-e2e/00100_import_blocks.sql
Normal file
@@ -0,0 +1,132 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973700', '0x29c84f0df09fda2c6c63d314bb6714dbbdeca3b85c91743f9e25d3f81c28b986', '0x01aabb5d1d7edadd10011b4099de7ed703b9ce495717cd48a304ff4db3710d8a', '{"parentHash":"0x01aabb5d1d7edadd10011b4099de7ed703b9ce495717cd48a304ff4db3710d8a","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77204","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36e5","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x29c84f0df09fda2c6c63d314bb6714dbbdeca3b85c91743f9e25d3f81c28b986"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167589', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973701', '0x21ad5215b5b51cb5eb5ea6a19444804ca1628cbf8ef8cf1977660d8c468c0151', '0x29c84f0df09fda2c6c63d314bb6714dbbdeca3b85c91743f9e25d3f81c28b986', '{"parentHash":"0x29c84f0df09fda2c6c63d314bb6714dbbdeca3b85c91743f9e25d3f81c28b986","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77205","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36e6","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x21ad5215b5b51cb5eb5ea6a19444804ca1628cbf8ef8cf1977660d8c468c0151"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167590', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973702', '0x8c9ce33a9b62060b193c01f31518f3bfc5d8132c11569a5b4db03a5b0611f30e', '0x21ad5215b5b51cb5eb5ea6a19444804ca1628cbf8ef8cf1977660d8c468c0151', '{"parentHash":"0x21ad5215b5b51cb5eb5ea6a19444804ca1628cbf8ef8cf1977660d8c468c0151","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77206","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36e7","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x8c9ce33a9b62060b193c01f31518f3bfc5d8132c11569a5b4db03a5b0611f30e"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167591', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973703', '0x73eed8060ca9a36fe8bf8c981f0e44425cd69ade00ff986452f1c02d462194fe', '0x8c9ce33a9b62060b193c01f31518f3bfc5d8132c11569a5b4db03a5b0611f30e', '{"parentHash":"0x8c9ce33a9b62060b193c01f31518f3bfc5d8132c11569a5b4db03a5b0611f30e","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77207","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36e8","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x73eed8060ca9a36fe8bf8c981f0e44425cd69ade00ff986452f1c02d462194fe"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167592', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973704', '0x7a6a1bede8936cfbd677cf38c43e399c8a2d0b62700caf05513bad540541b1b5', '0x73eed8060ca9a36fe8bf8c981f0e44425cd69ade00ff986452f1c02d462194fe', '{"parentHash":"0x73eed8060ca9a36fe8bf8c981f0e44425cd69ade00ff986452f1c02d462194fe","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77208","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36e9","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x7a6a1bede8936cfbd677cf38c43e399c8a2d0b62700caf05513bad540541b1b5"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167593', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973705', '0x1a5306293b7801a42c4402f9d32e7a45d640c49506ee61452da0120f3e242424', '0x7a6a1bede8936cfbd677cf38c43e399c8a2d0b62700caf05513bad540541b1b5', '{"parentHash":"0x7a6a1bede8936cfbd677cf38c43e399c8a2d0b62700caf05513bad540541b1b5","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77209","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36ea","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x1a5306293b7801a42c4402f9d32e7a45d640c49506ee61452da0120f3e242424"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167594', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973706', '0xc89f391a03c7138f676bd8babed6589035b2b7d8c8b99071cb90661f7f996386', '0x1a5306293b7801a42c4402f9d32e7a45d640c49506ee61452da0120f3e242424', '{"parentHash":"0x1a5306293b7801a42c4402f9d32e7a45d640c49506ee61452da0120f3e242424","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7720a","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36eb","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xc89f391a03c7138f676bd8babed6589035b2b7d8c8b99071cb90661f7f996386"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x347733a157bc7045f6a1d5bfd37d51763f3503b63290576a65b3b83265add2cf', '0', '0', '1753167595', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973707', '0x38d72b14ef43e548ab0ffd84892e7c3accdd11e1121c2c7a94b953b4e896eb41', '0xc89f391a03c7138f676bd8babed6589035b2b7d8c8b99071cb90661f7f996386', '{"parentHash":"0xc89f391a03c7138f676bd8babed6589035b2b7d8c8b99071cb90661f7f996386","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946","transactionsRoot":"0xf14cf5134833ddb4f42a017e92af371f0a71eaf5d84cb6e681c81fa023662c5d","receiptsRoot":"0x4008fb883088f1ba377310e15221fffc8e5446faf420d6a28e061e9341beb056","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000900000000000000000000000000000000000000000000000000000000000000000000000001000000008000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000020000000000000000000","difficulty":"0x1","number":"0xa7720b","gasLimit":"0x1312d00","gasUsed":"0x9642","timestamp":"0x687f36ec","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x38d72b14ef43e548ab0ffd84892e7c3accdd11e1121c2c7a94b953b4e896eb41"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946', '1', '38466', '1753167596', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[{"type":2,"nonce":3392500,"txHash":"0xc15b615906602154131a6c42d7603def4bd2a769881292d831140b0b9b8f8850","gas":45919,"gasPrice":"0x1de8476","gasTipCap":"0x64","gasFeeCap":"0x1de8476","from":"0x0000000000000000000000000000000000000000","to":"0x5300000000000000000000000000000000000002","chainId":"0x8274f","value":"0x0","data":"0x39455d3a0000000000000000000000000000000000000000000000000000000000045b840000000000000000000000000000000000000000000000000000000000000001","isCreate":false,"accessList":[{"address":"0x5300000000000000000000000000000000000003","storageKeys":["0x297c59f20c6b2556a4ed35dccabbdeb8b1cf950f62aefb86b98d19b5a4aff2a2"]}],"authorizationList":null,"v":"0x1","r":"0xa1b888cc9be7990c4f6bd8a9d0d5fa743ea8173196c7ca871464becd133ba0de","s":"0x6bacc3e1a244c62eff3008795e010598d07b95a8bad7a5592ec941e121294885"}]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973708', '0x230863f98595ba0c83785caf618072ce2a876307102adbeba11b9de9c4af8a08', '0x38d72b14ef43e548ab0ffd84892e7c3accdd11e1121c2c7a94b953b4e896eb41', '{"parentHash":"0x38d72b14ef43e548ab0ffd84892e7c3accdd11e1121c2c7a94b953b4e896eb41","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7720c","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36ed","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x230863f98595ba0c83785caf618072ce2a876307102adbeba11b9de9c4af8a08"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946', '0', '0', '1753167597', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973709', '0xae4af19a7697c2bb10a641f07269ed7df66775d56414567245adc98befdae557', '0x230863f98595ba0c83785caf618072ce2a876307102adbeba11b9de9c4af8a08', '{"parentHash":"0x230863f98595ba0c83785caf618072ce2a876307102adbeba11b9de9c4af8a08","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7720d","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36ee","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xae4af19a7697c2bb10a641f07269ed7df66775d56414567245adc98befdae557"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946', '0', '0', '1753167598', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973710', '0xd2bc7e24b66940a767abaee485870e9ebd24ff28e72a3096cdccc55b85f84182', '0xae4af19a7697c2bb10a641f07269ed7df66775d56414567245adc98befdae557', '{"parentHash":"0xae4af19a7697c2bb10a641f07269ed7df66775d56414567245adc98befdae557","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7720e","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36ef","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xd2bc7e24b66940a767abaee485870e9ebd24ff28e72a3096cdccc55b85f84182"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb920df561f210a617a0c1567cb2f65350818b96b159f5aa4b9ac7915b7af4946', '0', '0', '1753167599', '', '0x206c062cf0991353ba5ebc9888ca224f470ad3edf8e8e01125726a3858ebdd73', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973711', '0x1870b94320db154de4702fe6bfb69ebc98fb531b78bf81a69b8ab658ba9d9af5', '0xd2bc7e24b66940a767abaee485870e9ebd24ff28e72a3096cdccc55b85f84182', '{"parentHash":"0xd2bc7e24b66940a767abaee485870e9ebd24ff28e72a3096cdccc55b85f84182","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x2bdf2906a7bbb398419246c3c77804a204641259b2aeb4f4a806eb772d31c480","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7720f","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f0","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4209","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x1870b94320db154de4702fe6bfb69ebc98fb531b78bf81a69b8ab658ba9d9af5"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x2bdf2906a7bbb398419246c3c77804a204641259b2aeb4f4a806eb772d31c480', '0', '0', '1753167600', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973712', '0x271745e26e3222352fce7052edef9adecba921f4315e48a9d55e46640ac324ce', '0x1870b94320db154de4702fe6bfb69ebc98fb531b78bf81a69b8ab658ba9d9af5', '{"parentHash":"0x1870b94320db154de4702fe6bfb69ebc98fb531b78bf81a69b8ab658ba9d9af5","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x1ec0026bd12fe29d710e5f04e605cdb715d68a2e5bac57416066a7bc6b298762","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77210","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f1","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4208","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x271745e26e3222352fce7052edef9adecba921f4315e48a9d55e46640ac324ce"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x1ec0026bd12fe29d710e5f04e605cdb715d68a2e5bac57416066a7bc6b298762', '0', '0', '1753167601', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973713', '0xe193fc4298a6beebbc59b83cc7e2bbdace76c24fe9b7bd76aa415159ecb60914', '0x271745e26e3222352fce7052edef9adecba921f4315e48a9d55e46640ac324ce', '{"parentHash":"0x271745e26e3222352fce7052edef9adecba921f4315e48a9d55e46640ac324ce","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x7e29363a63f54a0e03e08cb515a98f3c416a5ade3ec15d29eddd262baf67a2a1","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77211","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f2","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xe193fc4298a6beebbc59b83cc7e2bbdace76c24fe9b7bd76aa415159ecb60914"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x7e29363a63f54a0e03e08cb515a98f3c416a5ade3ec15d29eddd262baf67a2a1', '0', '0', '1753167602', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973714', '0x8e99d9242315a107492520e9255dd77798adbff81393d3de83960dac361bd838', '0xe193fc4298a6beebbc59b83cc7e2bbdace76c24fe9b7bd76aa415159ecb60914', '{"parentHash":"0xe193fc4298a6beebbc59b83cc7e2bbdace76c24fe9b7bd76aa415159ecb60914","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xd818ac5028fe5aa1abd2d3ffe4693b4e96eabad35e49011e2ce920bcd76d061a","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77212","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f3","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x8e99d9242315a107492520e9255dd77798adbff81393d3de83960dac361bd838"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xd818ac5028fe5aa1abd2d3ffe4693b4e96eabad35e49011e2ce920bcd76d061a', '0', '0', '1753167603', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973715', '0x9ee9d75a25a912e7d3907679a1e588021f4d2040c71d2e1c958538466a4fbbd6', '0x8e99d9242315a107492520e9255dd77798adbff81393d3de83960dac361bd838', '{"parentHash":"0x8e99d9242315a107492520e9255dd77798adbff81393d3de83960dac361bd838","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x233fd85bd753e126e4df23a05c56ccde3eb6ec06ce2565a990af3347dc95b0c5","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77213","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f4","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x9ee9d75a25a912e7d3907679a1e588021f4d2040c71d2e1c958538466a4fbbd6"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x233fd85bd753e126e4df23a05c56ccde3eb6ec06ce2565a990af3347dc95b0c5', '0', '0', '1753167604', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973716', '0x9b25094db21166930008728c487ca9dbbc1e842701c8573eaa6bea4d41c10a7e', '0x9ee9d75a25a912e7d3907679a1e588021f4d2040c71d2e1c958538466a4fbbd6', '{"parentHash":"0x9ee9d75a25a912e7d3907679a1e588021f4d2040c71d2e1c958538466a4fbbd6","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x12be357fcc1fc28e574a7f95a5f9b3aae7e18d8ab8829c676478b4e8953a8502","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77214","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f5","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x9b25094db21166930008728c487ca9dbbc1e842701c8573eaa6bea4d41c10a7e"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x12be357fcc1fc28e574a7f95a5f9b3aae7e18d8ab8829c676478b4e8953a8502', '0', '0', '1753167605', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973717', '0xeaab0e07b40720f8e961f28ef665f08e67797428dc1eaccba88d5d4f60341284', '0x9b25094db21166930008728c487ca9dbbc1e842701c8573eaa6bea4d41c10a7e', '{"parentHash":"0x9b25094db21166930008728c487ca9dbbc1e842701c8573eaa6bea4d41c10a7e","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x7e49dc33343a54e9afc285155b8a35575e6924d465fe2dc543b5ea8915eb828a","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77215","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f6","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xeaab0e07b40720f8e961f28ef665f08e67797428dc1eaccba88d5d4f60341284"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x7e49dc33343a54e9afc285155b8a35575e6924d465fe2dc543b5ea8915eb828a', '0', '0', '1753167606', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973718', '0xd6af3c7bf29f3689b516ed5c8fcf885a4e7eb2df751c35d4ebbb19fcc13628d4', '0xeaab0e07b40720f8e961f28ef665f08e67797428dc1eaccba88d5d4f60341284', '{"parentHash":"0xeaab0e07b40720f8e961f28ef665f08e67797428dc1eaccba88d5d4f60341284","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xf1a7db2e4f463fa87e3e65b73d2abc5374302855f6af9735d5a11c94c2d93975","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77216","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f7","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xd6af3c7bf29f3689b516ed5c8fcf885a4e7eb2df751c35d4ebbb19fcc13628d4"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xf1a7db2e4f463fa87e3e65b73d2abc5374302855f6af9735d5a11c94c2d93975', '0', '0', '1753167607', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973719', '0x5815f5b91d53d0b5c7d423f06da7cad3d45edeab1c2b590af02ceebfd33b2ce1', '0xd6af3c7bf29f3689b516ed5c8fcf885a4e7eb2df751c35d4ebbb19fcc13628d4', '{"parentHash":"0xd6af3c7bf29f3689b516ed5c8fcf885a4e7eb2df751c35d4ebbb19fcc13628d4","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb5d1f420ddc1edb60c7fc3a06929a2014c548d1ddd52a78ab6984faed53a09d1","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77217","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36f8","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x5815f5b91d53d0b5c7d423f06da7cad3d45edeab1c2b590af02ceebfd33b2ce1"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb5d1f420ddc1edb60c7fc3a06929a2014c548d1ddd52a78ab6984faed53a09d1', '0', '0', '1753167608', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973720', '0x4d8bbd6a15515cacf18cf9810ca3867442cefc733e9cdeaa1527a008fbca3bd1', '0x5815f5b91d53d0b5c7d423f06da7cad3d45edeab1c2b590af02ceebfd33b2ce1', '{"parentHash":"0x5815f5b91d53d0b5c7d423f06da7cad3d45edeab1c2b590af02ceebfd33b2ce1","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xf4ca7c941e6ad6a780ad8422a817c6a7916f3f80b5f0d0f95cabcb17b0531299","transactionsRoot":"0x79c3ba4e0fe89ddea0ed8becdbfff86f18dab3ffd21eaf13744b86cb104d664e","receiptsRoot":"0xc8f88931c3c4ca18cb582e490d7acabfbe04fd6fa971549af6bf927aec7bfa1f","logsBloom":"0x00000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000200000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77218","gasLimit":"0x1312d00","gasUsed":"0x7623","timestamp":"0x687f36f9","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x4d8bbd6a15515cacf18cf9810ca3867442cefc733e9cdeaa1527a008fbca3bd1"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xf4ca7c941e6ad6a780ad8422a817c6a7916f3f80b5f0d0f95cabcb17b0531299', '1', '30243', '1753167609', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[{"type":0,"nonce":13627,"txHash":"0x539962b9584723f919b9f3a0b454622f5f51c195300564116d0cedfec17a1381","gas":30243,"gasPrice":"0xef426b","gasTipCap":"0xef426b","gasFeeCap":"0xef426b","from":"0x0000000000000000000000000000000000000000","to":"0xf07cc6482a24843efe7b42259acbaf8d0a2a6952","chainId":"0x8274f","value":"0x0","data":"0x91b7f5ed0000000000000000000000000000000000000000000018f4c5be1c1407000000","isCreate":false,"accessList":null,"authorizationList":null,"v":"0x104ec2","r":"0xaa309d7e218825160be9a87c9e50d3cbfead9c87e90e984ad0ea2441633092a2","s":"0x438f39c0af058794f320e5578720557af07c5397e363f9628a6c4ffee5bd2487"}]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973721', '0x84c53d922cfe0558c4be02865af5ebd49efe44a458dabc16aea5584e5e06f346', '0x4d8bbd6a15515cacf18cf9810ca3867442cefc733e9cdeaa1527a008fbca3bd1', '{"parentHash":"0x4d8bbd6a15515cacf18cf9810ca3867442cefc733e9cdeaa1527a008fbca3bd1","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xd4838ba86f5a8e865a41ef7547148b6074235a658dd57ff2296c0badda4760d1","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77219","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36fa","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x84c53d922cfe0558c4be02865af5ebd49efe44a458dabc16aea5584e5e06f346"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xd4838ba86f5a8e865a41ef7547148b6074235a658dd57ff2296c0badda4760d1', '0', '0', '1753167610', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973722', '0xe1d601522b08d98852b4c7dc3584f292ac246a3dac3c600ba58bd6c20c97be5b', '0x84c53d922cfe0558c4be02865af5ebd49efe44a458dabc16aea5584e5e06f346', '{"parentHash":"0x84c53d922cfe0558c4be02865af5ebd49efe44a458dabc16aea5584e5e06f346","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x8deede75e20423d0495cbdb493d320dddde6df0459df998608a16f658eb7bec3","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7721a","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36fb","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xe1d601522b08d98852b4c7dc3584f292ac246a3dac3c600ba58bd6c20c97be5b"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x8deede75e20423d0495cbdb493d320dddde6df0459df998608a16f658eb7bec3', '0', '0', '1753167611', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973723', '0x8579712fc434b401f1ecfcf3ae22611be054480fa882e90f8eecb6c5e97534bd', '0xe1d601522b08d98852b4c7dc3584f292ac246a3dac3c600ba58bd6c20c97be5b', '{"parentHash":"0xe1d601522b08d98852b4c7dc3584f292ac246a3dac3c600ba58bd6c20c97be5b","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb4fe51cda0401bb19e8448a2697a49e1fbc25398c2b18a9955d0a8e6f4b153a7","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7721b","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36fc","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x8579712fc434b401f1ecfcf3ae22611be054480fa882e90f8eecb6c5e97534bd"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb4fe51cda0401bb19e8448a2697a49e1fbc25398c2b18a9955d0a8e6f4b153a7', '0', '0', '1753167612', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973724', '0xe13a0b907e044a9df1952acc31dc08a578fb910a0cc224e11692cb84c9c9a9f7', '0x8579712fc434b401f1ecfcf3ae22611be054480fa882e90f8eecb6c5e97534bd', '{"parentHash":"0x8579712fc434b401f1ecfcf3ae22611be054480fa882e90f8eecb6c5e97534bd","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xcd17c85290d8ec7473357ebe1605f766af6c1356732cc7ad11de0453baca05c6","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7721c","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36fd","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xe13a0b907e044a9df1952acc31dc08a578fb910a0cc224e11692cb84c9c9a9f7"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xcd17c85290d8ec7473357ebe1605f766af6c1356732cc7ad11de0453baca05c6', '0', '0', '1753167613', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973725', '0x2e26fb489f8644b3b5c44cd493ebc140ba3bc716588f37a71b8ba6dc504ccb5f', '0xe13a0b907e044a9df1952acc31dc08a578fb910a0cc224e11692cb84c9c9a9f7', '{"parentHash":"0xe13a0b907e044a9df1952acc31dc08a578fb910a0cc224e11692cb84c9c9a9f7","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xfd321f4a3e2bc757df89162f730a2e37519dcb29cdb63019665c1fe4dbceeb00","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7721d","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36fe","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x2e26fb489f8644b3b5c44cd493ebc140ba3bc716588f37a71b8ba6dc504ccb5f"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xfd321f4a3e2bc757df89162f730a2e37519dcb29cdb63019665c1fe4dbceeb00', '0', '0', '1753167614', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973726', '0x313b0fbb7cbb8bc1ba4fbc50684b516d31f9f7ee6f66d919da01328537a4b0a1', '0x2e26fb489f8644b3b5c44cd493ebc140ba3bc716588f37a71b8ba6dc504ccb5f', '{"parentHash":"0x2e26fb489f8644b3b5c44cd493ebc140ba3bc716588f37a71b8ba6dc504ccb5f","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x1a24ed5ee5e8ca354f583b28bd7f2c4c6fe4dca59fef476578eddab17b857471","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa7721e","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f36ff","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x313b0fbb7cbb8bc1ba4fbc50684b516d31f9f7ee6f66d919da01328537a4b0a1"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x1a24ed5ee5e8ca354f583b28bd7f2c4c6fe4dca59fef476578eddab17b857471', '0', '0', '1753167615', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973727', '0xf9039c9c24ab919066f2eb6f97360cfb727ed032c9e6142ea45e784b19894560', '0x313b0fbb7cbb8bc1ba4fbc50684b516d31f9f7ee6f66d919da01328537a4b0a1', '{"parentHash":"0x313b0fbb7cbb8bc1ba4fbc50684b516d31f9f7ee6f66d919da01328537a4b0a1","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x2927f53f1eaaeaa17a80f048f10474a7cc3b2c96547cc47caad33ff9e5b38da6","transactionsRoot":"0x80fd441b38b6ffb8f9369d8a5179356f9bf5ad332db0da99f7c6efdb90939cd2","receiptsRoot":"0xa262cee7ba62c004c6554e9cf378512a868346c24f8cafc1ac1954250339149e","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000900000000000000000000000000000000000000000000000000000000000000000000000001000000008000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000020000000000000000000","difficulty":"0x1","number":"0xa7721f","gasLimit":"0x1312d00","gasUsed":"0x9642","timestamp":"0x687f3700","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xf9039c9c24ab919066f2eb6f97360cfb727ed032c9e6142ea45e784b19894560"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x2927f53f1eaaeaa17a80f048f10474a7cc3b2c96547cc47caad33ff9e5b38da6', '1', '38466', '1753167616', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[{"type":2,"nonce":3392501,"txHash":"0xa5231ea1b94eb516575807531763b312d250ee5ad4dfbeea66beab5f448c32b6","gas":45919,"gasPrice":"0x1de8472","gasTipCap":"0x64","gasFeeCap":"0x1de8472","from":"0x0000000000000000000000000000000000000000","to":"0x5300000000000000000000000000000000000002","chainId":"0x8274f","value":"0x0","data":"0x39455d3a000000000000000000000000000000000000000000000000000000000004580f0000000000000000000000000000000000000000000000000000000000000001","isCreate":false,"accessList":[{"address":"0x5300000000000000000000000000000000000003","storageKeys":["0x297c59f20c6b2556a4ed35dccabbdeb8b1cf950f62aefb86b98d19b5a4aff2a2"]}],"authorizationList":null,"v":"0x1","r":"0xa09a97c38c7a58f40ff39ca74f938c63f1ef822cf91926d4fff96b7dc818d3f3","s":"0x77ee7453096794d9cbb206f26077f23b4cc88fe51893cb5eab46714e379ac833"}]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973728', '0xf27ff9223f6bf9a964737d50cb7c005f049cf0f4edfd16d24178a798c21716d6', '0xf9039c9c24ab919066f2eb6f97360cfb727ed032c9e6142ea45e784b19894560', '{"parentHash":"0xf9039c9c24ab919066f2eb6f97360cfb727ed032c9e6142ea45e784b19894560","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0x0dbe54818526afaabbce83765eabcd4ec4d437a3497e5d046d599af862ea9850","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77220","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f3701","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0xf27ff9223f6bf9a964737d50cb7c005f049cf0f4edfd16d24178a798c21716d6"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0x0dbe54818526afaabbce83765eabcd4ec4d437a3497e5d046d599af862ea9850', '0', '0', '1753167617', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973729', '0x2b7777eb3ffe5939d6b70883cef69250ef5a2ed62a8b378973e0c3fe84707137', '0xf27ff9223f6bf9a964737d50cb7c005f049cf0f4edfd16d24178a798c21716d6', '{"parentHash":"0xf27ff9223f6bf9a964737d50cb7c005f049cf0f4edfd16d24178a798c21716d6","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xb89ed319fb9dcaed2df7e72223683cf255f6c1e45742e6caa810938871ce53bf","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77221","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f3702","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x2b7777eb3ffe5939d6b70883cef69250ef5a2ed62a8b378973e0c3fe84707137"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xb89ed319fb9dcaed2df7e72223683cf255f6c1e45742e6caa810938871ce53bf', '0', '0', '1753167618', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
state_root, tx_num, gas_used, block_timestamp, row_consumption,
|
||||
chunk_hash, transactions
|
||||
) VALUES ('10973730', '0x56318f0a941611fc22640ea7f7d0308ab88a9e23059b5c6983bafc2402003d13', '0x2b7777eb3ffe5939d6b70883cef69250ef5a2ed62a8b378973e0c3fe84707137', '{"parentHash":"0x2b7777eb3ffe5939d6b70883cef69250ef5a2ed62a8b378973e0c3fe84707137","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","miner":"0x0000000000000000000000000000000000000000","stateRoot":"0xe603d341e958521d3f5df8f37b5144b3c003214c481716cffa4e8d6303d9734f","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","difficulty":"0x1","number":"0xa77222","gasLimit":"0x1312d00","gasUsed":"0x0","timestamp":"0x687f3703","extraData":"0x","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","baseFeePerGas":"0xef4207","withdrawalsRoot":null,"blobGasUsed":null,"excessBlobGas":null,"parentBeaconBlockRoot":null,"requestsHash":null,"hash":"0x56318f0a941611fc22640ea7f7d0308ab88a9e23059b5c6983bafc2402003d13"}', '0x5a9bd7f5f6723ce51c03beffa310a5bf79c2cf261ddb8622cf407b41d968ef91', '0xe603d341e958521d3f5df8f37b5144b3c003214c481716cffa4e8d6303d9734f', '0', '0', '1753167619', '', '0x2f73e96335a43b678e107b2ef57c7ec0297d88d4a9986c1d6f4e31f1d11fb4f4', '[]');
|
||||
|
||||
-- +goose StatementEnd
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DELETE FROM l2_block;
|
||||
-- +goose StatementEnd
|
||||
47
tests/prover-e2e/Makefile
Normal file
47
tests/prover-e2e/Makefile
Normal file
@@ -0,0 +1,47 @@
|
||||
.PHONY: clean setup_db test_tool all check_vars
|
||||
|
||||
GOOSE_CMD?=goose
|
||||
BEGIN_BLOCK?=10973711
|
||||
END_BLOCK?=10973721
|
||||
|
||||
all: setup_db test_tool import_data
|
||||
|
||||
clean:
|
||||
docker compose down
|
||||
|
||||
check_vars:
|
||||
@if [ -z "$(BEGIN_BLOCK)" ] || [ -z "$(END_BLOCK)" ]; then \
|
||||
echo "Error: BEGIN_BLOCK and END_BLOCK must be defined"; \
|
||||
echo "Usage: make import_data BEGIN_BLOCK=<start_block> END_BLOCK=<end_block>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
setup_db: clean
|
||||
docker compose up --detach
|
||||
@echo "Waiting for PostgreSQL to be ready..."
|
||||
@for i in $$(seq 1 30); do \
|
||||
if nc -z localhost 5432 >/dev/null 2>&1; then \
|
||||
echo "PostgreSQL port is open!"; \
|
||||
sleep 2; \
|
||||
break; \
|
||||
fi; \
|
||||
echo "Waiting for PostgreSQL to start... ($$i/30)"; \
|
||||
sleep 2; \
|
||||
if [ $$i -eq 30 ]; then \
|
||||
echo "Timed out waiting for PostgreSQL to start"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done
|
||||
${GOOSE_CMD} up
|
||||
GOOSE_MIGRATION_DIR=./ ${GOOSE_CMD} up-to 100
|
||||
|
||||
test_tool:
|
||||
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
|
||||
|
||||
build/bin/e2e_tool: test_tool
|
||||
|
||||
import_data_euclid: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config ./config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
|
||||
import_data: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config ./config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
12
tests/prover-e2e/README.md
Normal file
12
tests/prover-e2e/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## A new e2e test tool to setup a local environment for testing coordinator and prover.
|
||||
|
||||
It contains data from some blocks in scroll sepolia, and helps to generate a series of chunks/batches/bundles from these blocks, filling the DB for the coordinator, so an e2e test (from chunk to bundle) can be run completely local
|
||||
|
||||
Steps:
|
||||
1. run `make all` under `tests/prover-e2e`, it would launch a postgreSql db in local docker container, which is ready to be used by coordinator (include some chunks/batches/bundles waiting to be proven)
|
||||
2. download circuit assets with `download-release.sh` script in `zkvm-prover`
|
||||
3. generate the verifier stuff corresponding to the downloaded assets by `make gen_verifier_stuff` in `zkvm-prover`
|
||||
4. setup `config.json` and `genesis.json` for coordinator, copy the generated verifier stuff in step 3 to the directory which coordinator would load them
|
||||
5. build and launch `coordinator_api` service locally
|
||||
6. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api
|
||||
7. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.
|
||||
8
tests/prover-e2e/config.json
Normal file
8
tests/prover-e2e/config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
}
|
||||
}
|
||||
22
tests/prover-e2e/docker-compose.yml
Normal file
22
tests/prover-e2e/docker-compose.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
# docker-compose.yml
|
||||
# This configuration is for local debugging only.
|
||||
# - PostgreSQL is bound to localhost (127.0.0.1) and not exposed externally.
|
||||
# - Data is persisted to ./db relative to current directory.
|
||||
# - No production security settings are applied.
|
||||
# The access url is postgresql://dev:dev@localhost:5432/devdb
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
container_name: local_postgres
|
||||
environment:
|
||||
POSTGRES_USER: dev
|
||||
POSTGRES_PASSWORD: dev
|
||||
POSTGRES_DB: scroll
|
||||
ports:
|
||||
- "127.0.0.1:5432:5432" # Listen only on localhost
|
||||
# volumes:
|
||||
# - ./db:/var/lib/postgresql/data # Persist data to local ./db
|
||||
restart: unless-stopped
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user