mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
83 Commits
test/code
...
refactor/z
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1464d1091d | ||
|
|
14f355b528 | ||
|
|
4b91057a05 | ||
|
|
904b97b70d | ||
|
|
a8a0a816d1 | ||
|
|
fedfa04c2b | ||
|
|
9c5c29e465 | ||
|
|
1135291329 | ||
|
|
9b10ad8f14 | ||
|
|
d0869d44d6 | ||
|
|
bcd48b846f | ||
|
|
591a833146 | ||
|
|
d4f1e7d981 | ||
|
|
9801992724 | ||
|
|
bff0e6f398 | ||
|
|
85f3cc35e0 | ||
|
|
0d8b00c3de | ||
|
|
15c872d5a5 | ||
|
|
c0f773c014 | ||
|
|
826357ab5d | ||
|
|
a300fa284b | ||
|
|
d26381cba3 | ||
|
|
0e65686ce4 | ||
|
|
6dd878eaca | ||
|
|
932be72b88 | ||
|
|
0c4c410d2c | ||
|
|
0360cd2c6f | ||
|
|
a6d49b24d8 | ||
|
|
8ed77bd780 | ||
|
|
66b1095e25 | ||
|
|
69ff09a7e6 | ||
|
|
318c46ebc9 | ||
|
|
a18fe06440 | ||
|
|
3ac69bec51 | ||
|
|
e80f030246 | ||
|
|
a77f1413ee | ||
|
|
5b62692098 | ||
|
|
4f34e90f00 | ||
|
|
6b1b822c81 | ||
|
|
a34c01d90b | ||
|
|
0578aab3ae | ||
|
|
55dfbf6735 | ||
|
|
f3ddf43439 | ||
|
|
a2582dcc3f | ||
|
|
0a1868cec1 | ||
|
|
fcfbc53252 | ||
|
|
13c8605211 | ||
|
|
228cba48b7 | ||
|
|
b7c5ba9046 | ||
|
|
5f7973528e | ||
|
|
69a7339bab | ||
|
|
056ebdca1c | ||
|
|
7a386ad807 | ||
|
|
5fb93c4dc3 | ||
|
|
a5e2d71ebd | ||
|
|
6b837c01f3 | ||
|
|
5b6b145753 | ||
|
|
c07975acdf | ||
|
|
dfdb2ecf07 | ||
|
|
a6f2457040 | ||
|
|
fa0927c5dc | ||
|
|
f92029aaeb | ||
|
|
45b23edde9 | ||
|
|
33b1b3cb51 | ||
|
|
51c930d7da | ||
|
|
4cfc5511fb | ||
|
|
06beb5dca3 | ||
|
|
968a396b5e | ||
|
|
fa2401c081 | ||
|
|
438a9fb1d6 | ||
|
|
1c22307f08 | ||
|
|
22dd3901f0 | ||
|
|
54d823677f | ||
|
|
e3cf2cb82b | ||
|
|
b6025425ac | ||
|
|
3ab5752276 | ||
|
|
c4ba0f9178 | ||
|
|
f0e8fbe738 | ||
|
|
2059b49624 | ||
|
|
bc8f9dbc83 | ||
|
|
cc2441d42d | ||
|
|
5d965d49db | ||
|
|
233fff0333 |
16
.dockerignore
Normal file
16
.dockerignore
Normal file
@@ -0,0 +1,16 @@
|
||||
.github
|
||||
|
||||
.gitignore
|
||||
|
||||
.dockerignore
|
||||
|
||||
Dockerfile
|
||||
Dockerfile.backup
|
||||
|
||||
.output
|
||||
|
||||
docs
|
||||
|
||||
openvm-clippy
|
||||
|
||||
target
|
||||
6
.github/workflows/common.yml
vendored
6
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
toolchain: nightly-2024-12-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -42,6 +42,10 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
|
||||
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@@ -307,6 +307,13 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Run custom script
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/init-openvm.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
|
||||
6
.github/workflows/intermediate-docker.yml
vendored
6
.github/workflows/intermediate-docker.yml
vendored
@@ -9,9 +9,13 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- "1.20"
|
||||
- "1.20.14"
|
||||
- "1.21"
|
||||
- "1.21.13"
|
||||
- "1.22"
|
||||
- "1.22.12"
|
||||
- "1.23"
|
||||
- "1.23.7"
|
||||
default: "1.21"
|
||||
RUST_VERSION:
|
||||
description: "Rust toolchain version"
|
||||
@@ -20,6 +24,7 @@ on:
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
@@ -43,6 +48,7 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- 0.1.41
|
||||
- 0.1.71
|
||||
BASE_IMAGE:
|
||||
description: "which intermediate image you want to update"
|
||||
required: true
|
||||
|
||||
99
.github/workflows/prover.yml
vendored
99
.github/workflows/prover.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Prover
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
paths_ignore: '["**/README.md"]'
|
||||
|
||||
fmt:
|
||||
needs: [skip_check]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: rustfmt
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Cargo check
|
||||
run: cargo check --all-features
|
||||
- name: Cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
needs: [skip_check, fmt]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: clippy
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Run clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
compile:
|
||||
needs: [skip_check, clippy]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
|
||||
9675
Cargo.lock
generated
Normal file
9675
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
93
Cargo.toml
Normal file
93
Cargo.toml
Normal file
@@ -0,0 +1,93 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"common/types-rs",
|
||||
"common/types-rs/base",
|
||||
"common/types-rs/aggregation",
|
||||
"common/types-rs/chunk",
|
||||
"common/types-rs/batch",
|
||||
"common/types-rs/bundle",
|
||||
"common/libzkp/impl",
|
||||
"zkvm-prover/prover",
|
||||
"zkvm-prover/verifier",
|
||||
"zkvm-prover/integration",
|
||||
"zkvm-prover/bin",
|
||||
]
|
||||
exclude = [
|
||||
"prover"
|
||||
]
|
||||
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
authors = ["Scroll developers"]
|
||||
edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.8"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", package = "scroll-zkvm-prover"}
|
||||
|
||||
openvm = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-build = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-custom-insn = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-rv32im-guest = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-compiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-recursion = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-continuations = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-sdk = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-stark-sdk = { git = "https://github.com/openvm-org/stark-backend.git", tag = "v1.0.1" }
|
||||
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-kv = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-trie = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
metrics-tracing-context = "0.16.0"
|
||||
|
||||
alloy = { version = "0.11", default-features = false }
|
||||
alloy-primitives = { version = "0.8", default-features = false }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "0.8", default-features = false }
|
||||
|
||||
rkyv = "0.8"
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_with = "3.11.0"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tracing = "0.1"
|
||||
eyre = "0.6"
|
||||
bincode_v1 = { version = "1.3", package = "bincode"}
|
||||
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
|
||||
"loader_halo2",
|
||||
"halo2-axiom",
|
||||
"display",
|
||||
] }
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
#TODO: upgrade
|
||||
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
|
||||
|
||||
scroll-zkvm-circuit-input-types = { path = "common/types-rs"}
|
||||
scroll-zkvm-verifier = { path = "zkvm-prover/verifier"}
|
||||
scroll-zkvm-prover = { path = "zkvm-prover/prover"}
|
||||
|
||||
[patch.crates-io]
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
26
Makefile
26
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.6.3
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
@@ -8,12 +8,12 @@ help: ## Display this help message
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
update: ## Update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
|
||||
lint: ## The code's format and security checks
|
||||
make -C rollup lint
|
||||
@@ -31,12 +31,12 @@ fmt: ## Format the code
|
||||
cd $(PWD)/rollup/ && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go mod tidy
|
||||
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
goimports -local $(PWD)/database/ -w .
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/tests/integration-test/ -w .
|
||||
goimports -local scroll-tech/bridge-history-api/ -w .
|
||||
goimports -local scroll-tech/common/ -w .
|
||||
goimports -local scroll-tech/coordinator/ -w .
|
||||
goimports -local scroll-tech/database/ -w .
|
||||
goimports -local scroll-tech/rollup/ -w .
|
||||
goimports -local scroll-tech/tests/integration-test/ -w .
|
||||
|
||||
dev_docker: ## Build docker images for development/testing usages
|
||||
docker pull postgres
|
||||
|
||||
@@ -37,6 +37,6 @@ reset-env:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
|
||||
|
||||
bridgehistoryapi-docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile --platform=linux/amd64
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile --platform=linux/amd64
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile --platform=linux/amd64
|
||||
|
||||
File diff suppressed because one or more lines are too long
13
bridge-history-api/abi/backend_abi_test.go
Normal file
13
bridge-history-api/abi/backend_abi_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package backendabi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEventSignatures(t *testing.T) {
|
||||
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), L1RevertBatchV0EventSig)
|
||||
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,uint256)")), L1RevertBatchV7EventSig)
|
||||
}
|
||||
@@ -68,7 +68,10 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
observability.Server(ctx, db)
|
||||
|
||||
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
l1MessageFetcher, err := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
if err != nil {
|
||||
log.Crit("failed to create L1MessageFetcher", "err", err)
|
||||
}
|
||||
go l1MessageFetcher.Start()
|
||||
|
||||
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
|
||||
|
||||
@@ -19,9 +19,11 @@
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
|
||||
"MessageQueueV2Addr": "0x0000000000000000000000000000000000000000",
|
||||
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
|
||||
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000"
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"BlobScanAPIEndpoint": "https://api.blobscan.com/blobs/"
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
module scroll-tech/bridge-history-api
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/gin-contrib/cors v1.5.0
|
||||
@@ -8,41 +10,43 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
@@ -55,15 +59,15 @@ require (
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -89,15 +93,15 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
@@ -108,8 +112,8 @@ require (
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
||||
@@ -11,9 +11,11 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
@@ -23,8 +25,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -53,20 +55,20 @@ github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
@@ -86,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -108,9 +110,8 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
|
||||
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
|
||||
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
|
||||
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
@@ -170,13 +171,13 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
@@ -202,8 +203,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -302,16 +303,16 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -324,6 +325,8 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
@@ -340,14 +343,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
@@ -397,8 +400,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -415,13 +418,14 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -30,9 +30,14 @@ type FetcherConfig struct {
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
MessageQueueV2Addr string `json:"MessageQueueV2Addr"`
|
||||
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
|
||||
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
|
||||
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
|
||||
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -2,6 +2,7 @@ package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
@@ -35,13 +37,32 @@ type L1MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
log.Warn("failed to create BeaconNodeClient", "err", err)
|
||||
} else {
|
||||
blobClient.AddBlobClient(beaconNodeClient)
|
||||
}
|
||||
}
|
||||
if cfg.BlobScanAPIEndpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewBlobScanClient(cfg.BlobScanAPIEndpoint))
|
||||
}
|
||||
if cfg.BlockNativeAPIEndpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewBlockNativeClient(cfg.BlockNativeAPIEndpoint))
|
||||
}
|
||||
if blobClient.Size() == 0 {
|
||||
return nil, fmt.Errorf("no blob client is configured")
|
||||
}
|
||||
|
||||
c := &L1MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client, blobClient),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -58,7 +79,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
|
||||
Help: "Latest blockchain height the L1 message fetcher has synced with.",
|
||||
})
|
||||
|
||||
return c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Start starts the L1 message fetching process.
|
||||
|
||||
@@ -2,13 +2,16 @@ package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
@@ -19,15 +22,17 @@ import (
|
||||
|
||||
// L1EventParser the l1 event parser
|
||||
type L1EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
blobClient blob_client.BlobClient
|
||||
}
|
||||
|
||||
// NewL1EventParser creates l1 event parser
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client, blobClient blob_client.BlobClient) *L1EventParser {
|
||||
return &L1EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
blobClient: blobClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +237,21 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
|
||||
}
|
||||
|
||||
// ParseL1BatchEventLogs parses L1 watched batch events.
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client, blockTimestampsMap map[uint64]uint64) ([]*orm.BatchEvent, error) {
|
||||
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
|
||||
// with one transaction carrying multiple blobs,
|
||||
// each CommitBatch event corresponds to a blob containing block range data.
|
||||
// To correctly process these events, we need to:
|
||||
// 1. Parsing the associated blob data to extract the block range for each event
|
||||
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
|
||||
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
|
||||
// - Derive the index of the current batch by the number of parent batch hashes tracked
|
||||
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
|
||||
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
|
||||
// The index map serves this purpose with:
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
@@ -247,11 +266,59 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
|
||||
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
return nil, err
|
||||
}
|
||||
if version >= 7 { // It's a batch with version >= 7.
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unsupported codec version: %v, err: %w", version, err)
|
||||
}
|
||||
|
||||
// we append the batch hash to the slice for the current commit transaction after processing the batch.
|
||||
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
|
||||
currentIndex := len(txBlobIndexMap[vlog.TxHash])
|
||||
if currentIndex >= len(commitTx.BlobHashes()) {
|
||||
return nil, fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
|
||||
vlog.TxHash.String(), len(commitTx.BlobHashes()), currentIndex, event.BatchIndex.Uint64())
|
||||
}
|
||||
blobVersionedHash := commitTx.BlobHashes()[currentIndex]
|
||||
|
||||
// validate the batch hash
|
||||
var parentBatchHash common.Hash
|
||||
if currentIndex == 0 {
|
||||
parentBatchHash, err = utils.GetParentBatchHashFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch header from calldata, tx hash: %s, err: %w", vlog.TxHash.String(), err)
|
||||
}
|
||||
} else {
|
||||
// here we need to subtract 1 from the current index to get the parent batch hash.
|
||||
parentBatchHash = txBlobIndexMap[vlog.TxHash][currentIndex-1]
|
||||
}
|
||||
calculatedBatch, err := codec.NewDABatchFromParams(event.BatchIndex.Uint64(), blobVersionedHash, parentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex.Uint64(), err)
|
||||
}
|
||||
if calculatedBatch.Hash() != event.BatchHash {
|
||||
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex)
|
||||
}
|
||||
startBlock = blocks[0].Number()
|
||||
endBlock = blocks[len(blocks)-1].Number()
|
||||
|
||||
txBlobIndexMap[vlog.TxHash] = append(txBlobIndexMap[vlog.TxHash], event.BatchHash)
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(btypes.BatchStatusTypeCommitted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
@@ -260,8 +327,8 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
EndBlockNumber: endBlock,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchEventSig:
|
||||
event := backendabi.L1RevertBatchEvent{}
|
||||
case backendabi.L1RevertBatchV0EventSig:
|
||||
event := backendabi.L1RevertBatchV0Event{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
@@ -272,6 +339,19 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchV7EventSig:
|
||||
event := backendabi.L1RevertBatchV7Event{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch0", vlog); err != nil {
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
for i := event.StartBatchIndex.Uint64(); i <= event.FinishBatchIndex.Uint64(); i++ {
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(btypes.BatchStatusTypeReverted),
|
||||
BatchIndex: i,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
}
|
||||
case backendabi.L1FinalizeBatchEventSig:
|
||||
event := backendabi.L1FinalizeBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
|
||||
@@ -389,3 +469,27 @@ func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMe
|
||||
}
|
||||
return sender.String(), nil
|
||||
}
|
||||
|
||||
func (e *L1EventParser) getBatchBlockRangeFromBlob(ctx context.Context, codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
|
||||
blob, err := e.blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobVersionedHash, l1BlockTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
if blob == nil {
|
||||
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
blobPayload, err := codec.DecodeBlob(blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
|
||||
blocks := blobPayload.Blocks()
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
log.Debug("Successfully processed blob", "blobVersionedHash", blobVersionedHash.Hex(), "blocksCount", len(blocks))
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
@@ -49,7 +50,7 @@ type L1FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client, blobClient blob_client.BlobClient) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
@@ -119,6 +120,10 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.MessageQueueV2Addr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.MessageQueueV2Addr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
@@ -129,7 +134,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL1EventParser(cfg, client),
|
||||
parser: NewL1EventParser(cfg, client, blobClient),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -168,14 +173,10 @@ func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) ([]*orm.CrossMessage, error) {
|
||||
var l1RevertedTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
|
||||
for i := from; i <= to; i++ {
|
||||
block := blocks[i-from]
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
// Gateways: L1 deposit.
|
||||
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
|
||||
@@ -187,7 +188,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, receiptErr
|
||||
return nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
@@ -199,7 +200,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
sender, senderErr := signer.Sender(tx)
|
||||
if senderErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
|
||||
return nil, nil, senderErr
|
||||
return nil, senderErr
|
||||
}
|
||||
|
||||
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
|
||||
@@ -213,7 +214,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
})
|
||||
}
|
||||
}
|
||||
return blockTimestampsMap, l1RevertedTxs, nil
|
||||
return l1RevertedTxs, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
|
||||
@@ -224,7 +225,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
|
||||
query.Topics[0] = make([]common.Hash, 16)
|
||||
query.Topics[0] = make([]common.Hash, 17)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1DepositERC721Sig
|
||||
@@ -233,14 +234,15 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
|
||||
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
|
||||
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
|
||||
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][15] = backendabi.L1DepositWrappedTokenSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchV0EventSig
|
||||
query.Topics[0][9] = backendabi.L1RevertBatchV7EventSig
|
||||
query.Topics[0][10] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][11] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][15] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][16] = backendabi.L1DepositWrappedTokenSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
@@ -264,12 +266,18 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return isReorg, reorgHeight, blockHash, nil, nil
|
||||
}
|
||||
|
||||
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// Map block number to block timestamp to avoid fetching block header multiple times to get block timestamp.
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
for _, block := range blocks {
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
}
|
||||
|
||||
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
|
||||
@@ -282,7 +290,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -45,7 +46,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Order("l1_block_number desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
|
||||
@@ -62,7 +63,7 @@ func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (ui
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
|
||||
db = db.Order("batch_index desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// No finalized batch found, return genesis batch's end block number.
|
||||
return 0, nil
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Conte
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
|
||||
db = db.Order("batch_index asc")
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
|
||||
@@ -116,7 +117,7 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
|
||||
}
|
||||
case btypes.BatchStatusTypeReverted:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
|
||||
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -84,7 +85,7 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
|
||||
db = db.Order("l2_block_number desc")
|
||||
}
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
|
||||
@@ -108,7 +109,7 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
|
||||
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
|
||||
db = db.Order("message_nonce desc")
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
@@ -127,10 +128,10 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Order("message_nonce asc")
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
@@ -66,25 +66,26 @@ func ComputeMessageHash(
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
// GetBatchVersionAndBlockRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uint64, error) {
|
||||
const methodIDLength = 4
|
||||
if len(txData) < methodIDLength {
|
||||
return 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
return 0, 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
}
|
||||
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
}
|
||||
values, err := method.Inputs.Unpack(txData[methodIDLength:])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
}
|
||||
|
||||
var chunks [][]byte
|
||||
var version uint8
|
||||
|
||||
if method.Name == "importGenesisBatch" {
|
||||
return 0, 0, nil
|
||||
return 0, 0, 0, nil
|
||||
} else if method.Name == "commitBatch" {
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
@@ -95,11 +96,11 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
|
||||
var args commitBatchArgs
|
||||
if err = method.Inputs.Copy(&args, values); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
|
||||
}
|
||||
|
||||
chunks = args.Chunks
|
||||
|
||||
version = args.Version
|
||||
} else if method.Name == "commitBatchWithBlobProof" {
|
||||
type commitBatchWithBlobProofArgs struct {
|
||||
Version uint8
|
||||
@@ -111,10 +112,22 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
|
||||
var args commitBatchWithBlobProofArgs
|
||||
if err = method.Inputs.Copy(&args, values); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
|
||||
}
|
||||
|
||||
chunks = args.Chunks
|
||||
version = args.Version
|
||||
} else if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
|
||||
if len(values) < 3 {
|
||||
return 0, 0, 0, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
|
||||
}
|
||||
|
||||
var ok bool
|
||||
version, ok = values[0].(uint8)
|
||||
if !ok {
|
||||
return 0, 0, 0, fmt.Errorf("invalid version type: %T", values[0])
|
||||
}
|
||||
return version, 0, 0, nil
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
@@ -124,7 +137,7 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(chunks) == 0 {
|
||||
return 0, 0, errors.New("invalid chunks")
|
||||
return 0, 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
@@ -135,7 +148,36 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return startBlock, finishBlock, err
|
||||
return version, startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetParentBatchHashFromCalldata gets the parent batch hash from calldata.
|
||||
// It only supports commitBatches and commitAndFinalizeBatch, which only accept batches >= v7.
|
||||
func GetParentBatchHashFromCalldata(txData []byte) (common.Hash, error) {
|
||||
const methodIDLength = 4
|
||||
if len(txData) < methodIDLength {
|
||||
return common.Hash{}, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
}
|
||||
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
}
|
||||
values, err := method.Inputs.Unpack(txData[methodIDLength:])
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
}
|
||||
|
||||
if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
|
||||
if len(values) < 3 {
|
||||
return common.Hash{}, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
|
||||
}
|
||||
parentBatchHash, ok := values[1].([32]byte)
|
||||
if !ok {
|
||||
return common.Hash{}, fmt.Errorf("invalid parentBatchHash type: %T", values[1])
|
||||
}
|
||||
return common.BytesToHash(parentBatchHash[:]), nil
|
||||
}
|
||||
return common.Hash{}, fmt.Errorf("method %s does not support parent batch header", method.Name)
|
||||
}
|
||||
|
||||
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
|
||||
File diff suppressed because one or more lines are too long
6
build/.cargo/config.toml
Normal file
6
build/.cargo/config.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[patch."https://github.com/scroll-tech/scroll.git"]
|
||||
scroll-zkvm-circuit-input-types-base = { path = "../common/types-rs/base"}
|
||||
scroll-zkvm-circuit-input-types-aggregation = { path = "../common/types-rs/aggregation"}
|
||||
scroll-zkvm-circuit-input-types-chunk = { path = "../common/types-rs/chunk"}
|
||||
scroll-zkvm-circuit-input-types-batch = { path = "../common/types-rs/batch"}
|
||||
scroll-zkvm-circuit-input-types-bundle = { path = "../common/types-rs/bundle"}
|
||||
@@ -91,7 +91,7 @@ linters-settings:
|
||||
#local-prefixes: github.com/org/project
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 30
|
||||
min-complexity: 40
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
@@ -254,6 +254,9 @@ issues:
|
||||
- linters:
|
||||
- wsl
|
||||
text: "expressions should not be cuddled with declarations or returns"
|
||||
- linters:
|
||||
- govet
|
||||
text: 'shadow: declaration of "(err|ctx)" shadows declaration at'
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
@@ -10,10 +10,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -9,6 +9,10 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
|
||||
24
build/dockerfiles/coordinator-api/config.toml
Normal file
24
build/dockerfiles/coordinator-api/config.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
2
build/dockerfiles/coordinator-api/gitconfig
Normal file
2
build/dockerfiles/coordinator-api/gitconfig
Normal file
@@ -0,0 +1,2 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
12
build/dockerfiles/coordinator-api/init-openvm.sh
Executable file
12
build/dockerfiles/coordinator-api/init-openvm.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
OPENVM_GPU_COMMIT=dfa10b4
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout openvm-gpu
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -16,10 +16,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22.12
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -36,7 +36,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22.12
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -32,7 +32,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
23
build/dockerfiles/prover.Dockerfile
Normal file
23
build/dockerfiles/prover.Dockerfile
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM ubuntu:24.04 AS builder
|
||||
|
||||
RUN apt-get update -y && apt-get upgrade -y
|
||||
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config -y
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
COPY . /src
|
||||
|
||||
RUN cd /src/zkvm-prover && make prover
|
||||
|
||||
FROM ubuntu:24.04 AS runtime
|
||||
|
||||
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["prover"]
|
||||
5
build/dockerfiles/prover.dockerignore
Normal file
5
build/dockerfiles/prover.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
)
|
||||
|
||||
// GetHardforkName returns the name of the hardfork active at the given block height and timestamp.
|
||||
// It checks the chain configuration to determine which hardfork is active.
|
||||
func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uint64) string {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "homestead"
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "bernoulli"
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return "curie"
|
||||
} else if !config.IsDarwinV2(blockTimestamp) {
|
||||
return "darwin"
|
||||
} else {
|
||||
return "darwinV2"
|
||||
}
|
||||
}
|
||||
|
||||
// GetCodecVersion returns the encoding codec version for the given block height and timestamp.
|
||||
// It determines the appropriate codec version based on the active hardfork.
|
||||
func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV0
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV1
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return encoding.CodecV2
|
||||
} else if !config.IsDarwinV2(blockTimestamp) {
|
||||
return encoding.CodecV3
|
||||
} else {
|
||||
return encoding.CodecV4
|
||||
}
|
||||
}
|
||||
|
||||
// GetMaxChunksPerBatch returns the maximum number of chunks allowed per batch for the given block height and timestamp.
|
||||
// This value may change depending on the active hardfork.
|
||||
func GetMaxChunksPerBatch(config *params.ChainConfig, blockHeight, blockTimestamp uint64) uint64 {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return 45
|
||||
} else if !config.IsDarwinV2(blockTimestamp) {
|
||||
return 45
|
||||
} else {
|
||||
return 45
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
module scroll-tech/common
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/bits-and-blooms/bitset v1.13.0
|
||||
github.com/bits-and-blooms/bitset v1.20.0
|
||||
github.com/docker/docker v26.1.0+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -13,9 +15,8 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0
|
||||
@@ -31,7 +32,7 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.45 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect
|
||||
@@ -54,18 +55,18 @@ require (
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.5 // indirect
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/continuity v0.4.2 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
|
||||
@@ -78,7 +79,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -118,9 +119,9 @@ require (
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
@@ -135,12 +136,12 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
@@ -183,6 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -191,16 +193,17 @@ require (
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
|
||||
@@ -228,17 +231,19 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
||||
108
common/go.sum
108
common/go.sum
@@ -27,9 +27,11 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
@@ -70,8 +72,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
@@ -119,10 +121,10 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
|
||||
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||
@@ -151,10 +153,10 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -163,8 +165,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
@@ -212,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -379,13 +381,13 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
|
||||
@@ -434,8 +436,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -453,8 +455,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
@@ -469,8 +471,9 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
@@ -627,16 +630,16 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -660,6 +663,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
|
||||
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
|
||||
@@ -669,8 +674,8 @@ github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IEx
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
@@ -700,10 +705,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
@@ -720,8 +725,8 @@ github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0h
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
|
||||
@@ -801,11 +806,15 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
@@ -820,8 +829,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
@@ -864,8 +873,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -905,22 +914,23 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
7094
common/libzkp/impl/Cargo.lock
generated
7094
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -7,37 +7,19 @@ edition = "2021"
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
scroll-zkvm-prover.workspace = true
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
|
||||
# darwin
|
||||
prover_v4 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
# darwin_v2
|
||||
prover_v5 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
env_logger = "0.11.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
once_cell = "1.19"
|
||||
serde = "1.0"
|
||||
base64.workspace = true
|
||||
once_cell.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
anyhow = "1.0.86"
|
||||
serde_json.workspace = true
|
||||
anyhow = "1"
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
@@ -1 +1 @@
|
||||
nightly-2023-12-03
|
||||
nightly-2024-12-06
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
mod utils;
|
||||
mod verifier;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec};
|
||||
use libc::c_char;
|
||||
use prover_v5::utils::init_env_and_log;
|
||||
use verifier::{TaskType, VerifierConfig};
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init(config: *const c_char) {
|
||||
init_env_and_log("ffi_init");
|
||||
|
||||
let config_str = c_char_to_str(config);
|
||||
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
|
||||
verifier::init(verifier_config);
|
||||
@@ -26,9 +25,8 @@ pub unsafe extern "C" fn verify_chunk_proof(
|
||||
}
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Err(e) = verifier {
|
||||
@@ -61,3 +59,18 @@ pub unsafe extern "C" fn verify_bundle_proof(
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Bundle)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
_dump_vk(fork_name, file);
|
||||
}
|
||||
|
||||
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Ok(verifier) = verifier {
|
||||
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
mod darwin;
|
||||
mod darwin_v2;
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclid;
|
||||
mod euclidv2;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use darwin::DarwinVerifier;
|
||||
use darwin_v2::DarwinV2Verifier;
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
use prover_v4::utils::load_params;
|
||||
use euclid::EuclidVerifier;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, collections::BTreeMap, rc::Rc};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
@@ -16,8 +16,16 @@ pub enum TaskType {
|
||||
Bundle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VKDump {
|
||||
pub chunk_vk: String,
|
||||
pub batch_vk: String,
|
||||
pub bundle_vk: String,
|
||||
}
|
||||
|
||||
pub trait ProofVerifier {
|
||||
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
|
||||
fn dump_vk(&self, file: &Path);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -37,55 +45,25 @@ type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut PARAMS_MAP: OnceCell<BTreeMap<u32, ParamsKZG<Bn256>>> = OnceCell::new();
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let low_conf = config.low_version_circuit;
|
||||
|
||||
std::env::set_var("SCROLL_PROVER_ASSETS_DIR", &low_conf.assets_path);
|
||||
let params_degrees = [
|
||||
*prover_v4::config::LAYER2_DEGREE,
|
||||
*prover_v4::config::LAYER4_DEGREE,
|
||||
];
|
||||
|
||||
// params should be shared between low and high
|
||||
let mut params_map = BTreeMap::new();
|
||||
for degree in params_degrees {
|
||||
if let std::collections::btree_map::Entry::Vacant(e) = params_map.entry(degree) {
|
||||
match load_params(&low_conf.params_path, degree, None) {
|
||||
Ok(params) => {
|
||||
e.insert(params);
|
||||
}
|
||||
Err(e) => panic!(
|
||||
"failed to load params, degree {}, dir {}, err {}",
|
||||
degree, low_conf.params_path, e
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
PARAMS_MAP.set(params_map).unwrap_unchecked();
|
||||
}
|
||||
|
||||
let verifier = DarwinVerifier::new(unsafe { PARAMS_MAP.get().unwrap() }, &low_conf.assets_path);
|
||||
|
||||
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_LOW
|
||||
.set(VerifierPair(
|
||||
low_conf.fork_name,
|
||||
"euclid".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
let high_conf = config.high_version_circuit;
|
||||
let verifier =
|
||||
DarwinV2Verifier::new(unsafe { PARAMS_MAP.get().unwrap() }, &high_conf.assets_path);
|
||||
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
high_conf.fork_name,
|
||||
"euclidV2".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType};
|
||||
|
||||
use anyhow::Result;
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use prover_v4::{
|
||||
aggregator::Verifier as AggVerifier, zkevm::Verifier, BatchProof, BundleProof, ChunkProof,
|
||||
};
|
||||
use std::{collections::BTreeMap, env};
|
||||
|
||||
pub struct DarwinVerifier<'params> {
|
||||
verifier: Verifier<'params>,
|
||||
agg_verifier: AggVerifier<'params>,
|
||||
}
|
||||
|
||||
impl<'params> DarwinVerifier<'params> {
|
||||
pub fn new(params_map: &'params BTreeMap<u32, ParamsKZG<Bn256>>, assets_dir: &str) -> Self {
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_params_and_assets(params_map, assets_dir);
|
||||
let agg_verifier = AggVerifier::from_params_and_assets(params_map, assets_dir);
|
||||
|
||||
Self {
|
||||
verifier,
|
||||
agg_verifier,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'params> ProofVerifier for DarwinVerifier<'params> {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
let result = panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.verifier.verify_chunk_proof(proof)
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_batch_proof(&proof)
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_bundle_proof(proof)
|
||||
}
|
||||
});
|
||||
result.map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType};
|
||||
|
||||
use anyhow::Result;
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use prover_v5::{
|
||||
aggregator::Verifier as AggVerifier, zkevm::Verifier, BatchProof, BundleProof, ChunkProof,
|
||||
};
|
||||
use std::{collections::BTreeMap, env};
|
||||
|
||||
pub struct DarwinV2Verifier<'params> {
|
||||
verifier: Verifier<'params>,
|
||||
agg_verifier: AggVerifier<'params>,
|
||||
}
|
||||
|
||||
impl<'params> DarwinV2Verifier<'params> {
|
||||
pub fn new(params_map: &'params BTreeMap<u32, ParamsKZG<Bn256>>, assets_dir: &str) -> Self {
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_params_and_assets(params_map, assets_dir);
|
||||
let agg_verifier = AggVerifier::from_params_and_assets(params_map, assets_dir);
|
||||
|
||||
Self {
|
||||
verifier,
|
||||
agg_verifier,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'params> ProofVerifier for DarwinV2Verifier<'params> {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
let result = panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.verifier.verify_chunk_proof(proof)
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_batch_proof(&proof)
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_bundle_proof(proof)
|
||||
}
|
||||
});
|
||||
result.map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
66
common/libzkp/impl/src/verifier/euclid.rs
Normal file
66
common/libzkp/impl/src/verifier/euclid.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidVerifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV1,
|
||||
}
|
||||
|
||||
impl EuclidVerifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidVerifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
66
common/libzkp/impl/src/verifier/euclidv2.rs
Normal file
66
common/libzkp/impl/src/verifier/euclidv2.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV2,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -8,3 +8,5 @@ char verify_batch_proof(char* proof, char* fork_name);
|
||||
char verify_bundle_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
void dump_vk(char* fork_name, char* file);
|
||||
|
||||
@@ -195,7 +195,7 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
|
||||
// GetWeb3SignerEndpoint returns the endpoint of the running L2Geth container
|
||||
func (t *TestcontainerApps) GetWeb3SignerEndpoint() (string, error) {
|
||||
if t.web3SignerContainer == nil || !t.web3SignerContainer.IsRunning() {
|
||||
return "", errors.New("web3signer is not running")
|
||||
|
||||
17
common/types-rs/Cargo.toml
Normal file
17
common/types-rs/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
types-base = { path = "base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
types-chunk = { path = "chunk", package = "scroll-zkvm-circuit-input-types-chunk"}
|
||||
types-batch = { path = "batch", package = "scroll-zkvm-circuit-input-types-batch"}
|
||||
types-bundle = { path = "bundle", package = "scroll-zkvm-circuit-input-types-bundle"}
|
||||
|
||||
|
||||
24
common/types-rs/README.md
Normal file
24
common/types-rs/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Input Types for circuits
|
||||
|
||||
A series of separated crates for the input types accepted by circuits as input.
|
||||
|
||||
This crate help decoupling circuits with other crates and keep their dependencies neat and controllable. Avoiding to involve crates which is not compatible with the tootlchain of openvm from indirect dependency.
|
||||
|
||||
### Code structure
|
||||
```
|
||||
types-rs
|
||||
│
|
||||
├── base
|
||||
│
|
||||
├── circuit
|
||||
│
|
||||
├── aggregation
|
||||
│
|
||||
<following are layer-oriented crates>
|
||||
│
|
||||
├── chunk
|
||||
│
|
||||
├── batch
|
||||
│
|
||||
└── bundle
|
||||
```
|
||||
14
common/types-rs/aggregation/Cargo.toml
Normal file
14
common/types-rs/aggregation/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-aggregation"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
81
common/types-rs/aggregation/src/lib.rs
Normal file
81
common/types-rs/aggregation/src/lib.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
/// Represents an openvm program commitments and public values.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct AggregationInput {
|
||||
/// Public values.
|
||||
pub public_values: Vec<u32>,
|
||||
/// Represent the commitment needed to verify a root proof
|
||||
pub commitment: ProgramCommitment,
|
||||
}
|
||||
|
||||
/// Represent the commitment needed to verify a [`RootProof`].
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ProgramCommitment {
|
||||
/// The commitment to the child program exe.
|
||||
pub exe: [u32; 8],
|
||||
/// The commitment to the child program leaf.
|
||||
pub leaf: [u32; 8],
|
||||
}
|
||||
|
||||
impl ProgramCommitment {
|
||||
pub fn deserialize(commitment_bytes: &[u8]) -> Self {
|
||||
// TODO: temporary skip deserialize if no vk is provided
|
||||
if commitment_bytes.is_empty() {
|
||||
return Default::default();
|
||||
}
|
||||
|
||||
let archived_data =
|
||||
rkyv::access::<ArchivedProgramCommitment, rkyv::rancor::BoxedError>(commitment_bytes)
|
||||
.unwrap();
|
||||
|
||||
Self {
|
||||
exe: archived_data.exe.map(|u32_le| u32_le.to_native()),
|
||||
leaf: archived_data.leaf.map(|u32_le| u32_le.to_native()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
rkyv::to_bytes::<rkyv::rancor::BoxedError>(self)
|
||||
.map(|v| v.to_vec())
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedProgramCommitment> for ProgramCommitment {
|
||||
fn from(archived: &ArchivedProgramCommitment) -> Self {
|
||||
Self {
|
||||
exe: archived.exe.map(|u32_le| u32_le.to_native()),
|
||||
leaf: archived.leaf.map(|u32_le| u32_le.to_native()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of public-input values, i.e. [u32; N].
|
||||
///
|
||||
/// Note that the actual value for each u32 is a byte.
|
||||
pub const NUM_PUBLIC_VALUES: usize = 32;
|
||||
|
||||
/// Witness for an [`AggregationCircuit`][AggCircuit] that also carries proofs that are being
|
||||
/// aggregated.
|
||||
pub trait ProofCarryingWitness {
|
||||
/// Get the root proofs from the witness.
|
||||
fn get_proofs(&self) -> Vec<AggregationInput>;
|
||||
}
|
||||
21
common/types-rs/base/Cargo.toml
Normal file
21
common/types-rs/base/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-base"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
alloy-serde.workspace = true
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
tiny-keccak = { workspace = true }
|
||||
sha3 = "0.10.8"
|
||||
sha2 = "0.10.8"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
2
common/types-rs/base/src/lib.rs
Normal file
2
common/types-rs/base/src/lib.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod public_inputs;
|
||||
pub mod utils;
|
||||
81
common/types-rs/base/src/public_inputs.rs
Normal file
81
common/types-rs/base/src/public_inputs.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
use alloy_primitives::B256;
|
||||
pub mod batch;
|
||||
pub mod bundle;
|
||||
pub mod chunk;
|
||||
|
||||
/// Defines behaviour to be implemented by types representing the public-input values of a circuit.
|
||||
pub trait PublicInputs {
|
||||
/// Keccak-256 digest of the public inputs. The public-input hash are revealed as public values
|
||||
/// via [`openvm::io::reveal`].
|
||||
fn pi_hash(&self) -> B256;
|
||||
|
||||
/// Validation logic between public inputs of two contiguous instances.
|
||||
fn validate(&self, prev_pi: &Self);
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Default,
|
||||
Debug,
|
||||
Copy,
|
||||
Clone,
|
||||
PartialEq,
|
||||
Eq,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub enum ForkName {
|
||||
#[default]
|
||||
EuclidV1,
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl From<&ArchivedForkName> for ForkName {
|
||||
fn from(archived: &ArchivedForkName) -> Self {
|
||||
match archived {
|
||||
ArchivedForkName::EuclidV1 => ForkName::EuclidV1,
|
||||
ArchivedForkName::EuclidV2 => ForkName::EuclidV2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<&str>> for ForkName {
|
||||
fn from(value: Option<&str>) -> Self {
|
||||
match value {
|
||||
None => Default::default(),
|
||||
Some("euclidv1") => ForkName::EuclidV1,
|
||||
Some("euclidv2") => ForkName::EuclidV2,
|
||||
Some(s) => unreachable!("hardfork not accepted: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ForkName {
|
||||
fn from(value: &str) -> Self {
|
||||
match value {
|
||||
"euclidv1" => ForkName::EuclidV1,
|
||||
"euclidv2" => ForkName::EuclidV2,
|
||||
s => unreachable!("hardfork not accepted: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// helper trait to extend PublicInputs
|
||||
pub trait MultiVersionPublicInputs {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256;
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName);
|
||||
}
|
||||
|
||||
impl<T: MultiVersionPublicInputs> PublicInputs for (T, ForkName) {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_by_fork(self.1)
|
||||
}
|
||||
|
||||
fn validate(&self, prev_pi: &Self) {
|
||||
assert_eq!(self.1, prev_pi.1);
|
||||
self.0.validate(&prev_pi.0, self.1)
|
||||
}
|
||||
}
|
||||
144
common/types-rs/base/src/public_inputs/batch.rs
Normal file
144
common/types-rs/base/src/public_inputs/batch.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Represents public-input values for a batch.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchInfo {
|
||||
/// The state root before applying the batch.
|
||||
#[rkyv()]
|
||||
pub parent_state_root: B256,
|
||||
/// The batch hash of the parent batch.
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The state root after applying txs in the batch.
|
||||
#[rkyv()]
|
||||
pub state_root: B256,
|
||||
/// The batch header hash of the batch.
|
||||
#[rkyv()]
|
||||
pub batch_hash: B256,
|
||||
/// The EIP-155 chain ID of all txs in the batch.
|
||||
#[rkyv()]
|
||||
pub chain_id: u64,
|
||||
/// The withdraw root of the last block in the last chunk in the batch.
|
||||
#[rkyv()]
|
||||
pub withdraw_root: B256,
|
||||
/// The L1 msg queue hash at the end of the previous batch.
|
||||
#[rkyv()]
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The L1 msg queue hash at the end of the current batch.
|
||||
#[rkyv()]
|
||||
pub post_msg_queue_hash: B256,
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchInfo> for BatchInfo {
|
||||
fn from(archived: &ArchivedBatchInfo) -> Self {
|
||||
Self {
|
||||
parent_state_root: archived.parent_state_root.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
state_root: archived.state_root.into(),
|
||||
batch_hash: archived.batch_hash.into(),
|
||||
chain_id: archived.chain_id.into(),
|
||||
withdraw_root: archived.withdraw_root.into(),
|
||||
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchInfo {
|
||||
/// Public input hash for a batch (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// parent state root ||
|
||||
/// parent batch hash ||
|
||||
/// state root ||
|
||||
/// batch hash ||
|
||||
/// chain id ||
|
||||
/// withdraw root ||
|
||||
/// )
|
||||
fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.parent_state_root.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a batch (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// parent state root ||
|
||||
/// parent batch hash ||
|
||||
/// state root ||
|
||||
/// batch hash ||
|
||||
/// chain id ||
|
||||
/// withdraw root ||
|
||||
/// prev msg queue hash ||
|
||||
/// post msg queue hash
|
||||
/// )
|
||||
fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.parent_state_root.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.prev_msg_queue_hash.as_slice())
|
||||
.chain(self.post_msg_queue_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type VersionedBatchInfo = (BatchInfo, ForkName);
|
||||
|
||||
impl MultiVersionPublicInputs for BatchInfo {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate public inputs between 2 contiguous batches.
|
||||
///
|
||||
/// - chain id MUST match
|
||||
/// - state roots MUST be chained
|
||||
/// - batch hashes MUST be chained
|
||||
/// - L1 msg queue hashes MUST be chained
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
|
||||
assert_eq!(self.chain_id, prev_pi.chain_id);
|
||||
assert_eq!(self.parent_state_root, prev_pi.state_root);
|
||||
assert_eq!(self.parent_batch_hash, prev_pi.batch_hash);
|
||||
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
|
||||
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
|
||||
}
|
||||
}
|
||||
}
|
||||
149
common/types-rs/base/src/public_inputs/bundle.rs
Normal file
149
common/types-rs/base/src/public_inputs/bundle.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs, PublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Represents fields required to compute the public-inputs digest of a bundle.
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct BundleInfo {
|
||||
/// The EIP-155 chain ID of all txs in the bundle.
|
||||
pub chain_id: u64,
|
||||
/// The L1 msg queue hash at the end of the last batch in the bundle.
|
||||
/// Not a phase 1 field so we make it omitable
|
||||
#[serde(default)]
|
||||
pub msg_queue_hash: B256,
|
||||
/// The number of batches bundled together in the bundle.
|
||||
pub num_batches: u32,
|
||||
/// The last finalized on-chain state root.
|
||||
pub prev_state_root: B256,
|
||||
/// The last finalized on-chain batch hash.
|
||||
pub prev_batch_hash: B256,
|
||||
/// The state root after applying every batch in the bundle.
|
||||
///
|
||||
/// Upon verification of the EVM-verifiable bundle proof, this state root will be finalized
|
||||
/// on-chain.
|
||||
pub post_state_root: B256,
|
||||
/// The batch hash of the last batch in the bundle.
|
||||
///
|
||||
/// Upon verification of the EVM-verifiable bundle proof, this batch hash will be finalized
|
||||
/// on-chain.
|
||||
pub batch_hash: B256,
|
||||
/// The withdrawals root at the last block in the last chunk in the last batch in the bundle.
|
||||
pub withdraw_root: B256,
|
||||
}
|
||||
|
||||
impl BundleInfo {
|
||||
/// Public input hash for a bundle (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// num batches ||
|
||||
/// prev state root ||
|
||||
/// prev batch hash ||
|
||||
/// post state root ||
|
||||
/// batch hash ||
|
||||
/// withdraw root
|
||||
/// )
|
||||
pub fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.num_batches.to_be_bytes().as_slice())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.prev_batch_hash.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a bundle (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// msg_queue_hash ||
|
||||
/// num batches ||
|
||||
/// prev state root ||
|
||||
/// prev batch hash ||
|
||||
/// post state root ||
|
||||
/// batch hash ||
|
||||
/// withdraw root
|
||||
/// )
|
||||
pub fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.msg_queue_hash.as_slice())
|
||||
.chain(self.num_batches.to_be_bytes().as_slice())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.prev_batch_hash.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn pi_hash(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiVersionPublicInputs for BundleInfo {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self, _fork_name: ForkName) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BundleInfoV1(pub BundleInfo);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BundleInfoV2(pub BundleInfo);
|
||||
|
||||
impl From<BundleInfo> for BundleInfoV1 {
|
||||
fn from(value: BundleInfo) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BundleInfo> for BundleInfoV2 {
|
||||
fn from(value: BundleInfo) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInputs for BundleInfoV1 {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_euclidv1()
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInputs for BundleInfoV2 {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_euclidv2()
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
248
common/types-rs/base/src/public_inputs/chunk.rs
Normal file
248
common/types-rs/base/src/public_inputs/chunk.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
use alloy_primitives::{B256, U256};
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Number of bytes used to serialise [`BlockContextV2`].
|
||||
pub const SIZE_BLOCK_CTX: usize = 52;
|
||||
|
||||
/// Represents the version 2 of block context.
|
||||
///
|
||||
/// The difference between v2 and v1 is that the block number field has been removed since v2.
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
PartialEq,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BlockContextV2 {
|
||||
/// The timestamp of the block.
|
||||
pub timestamp: u64,
|
||||
/// The base fee of the block.
|
||||
pub base_fee: U256,
|
||||
/// The gas limit of the block.
|
||||
pub gas_limit: u64,
|
||||
/// The number of transactions in the block, including both L1 msg txs as well as L2 txs.
|
||||
pub num_txs: u16,
|
||||
/// The number of L1 msg txs in the block.
|
||||
pub num_l1_msgs: u16,
|
||||
}
|
||||
|
||||
impl From<&ArchivedBlockContextV2> for BlockContextV2 {
|
||||
fn from(archived: &ArchivedBlockContextV2) -> Self {
|
||||
Self {
|
||||
timestamp: archived.timestamp.into(),
|
||||
base_fee: archived.base_fee.into(),
|
||||
gas_limit: archived.gas_limit.into(),
|
||||
num_txs: archived.num_txs.into(),
|
||||
num_l1_msgs: archived.num_l1_msgs.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&[u8]> for BlockContextV2 {
|
||||
fn from(bytes: &[u8]) -> Self {
|
||||
assert_eq!(bytes.len(), SIZE_BLOCK_CTX);
|
||||
|
||||
let timestamp = u64::from_be_bytes(bytes[0..8].try_into().expect("should not fail"));
|
||||
let base_fee = U256::from_be_slice(&bytes[8..40]);
|
||||
let gas_limit = u64::from_be_bytes(bytes[40..48].try_into().expect("should not fail"));
|
||||
let num_txs = u16::from_be_bytes(bytes[48..50].try_into().expect("should not fail"));
|
||||
let num_l1_msgs = u16::from_be_bytes(bytes[50..52].try_into().expect("should not fail"));
|
||||
|
||||
Self {
|
||||
timestamp,
|
||||
base_fee,
|
||||
gas_limit,
|
||||
num_txs,
|
||||
num_l1_msgs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockContextV2 {
|
||||
/// Serialize the block context in packed form.
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
std::iter::empty()
|
||||
.chain(self.timestamp.to_be_bytes())
|
||||
.chain(self.base_fee.to_be_bytes::<32>())
|
||||
.chain(self.gas_limit.to_be_bytes())
|
||||
.chain(self.num_txs.to_be_bytes())
|
||||
.chain(self.num_l1_msgs.to_be_bytes())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents header-like information for the chunk.
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ChunkInfo {
|
||||
/// The EIP-155 chain ID for all txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub chain_id: u64,
|
||||
/// The state root before applying the chunk.
|
||||
#[rkyv()]
|
||||
pub prev_state_root: B256,
|
||||
/// The state root after applying the chunk.
|
||||
#[rkyv()]
|
||||
pub post_state_root: B256,
|
||||
/// The withdrawals root after applying the chunk.
|
||||
#[rkyv()]
|
||||
pub withdraw_root: B256,
|
||||
/// Digest of L1 message txs force included in the chunk.
|
||||
/// It is a legacy field and can be omitted in new defination
|
||||
#[rkyv()]
|
||||
#[serde(default)]
|
||||
pub data_hash: B256,
|
||||
/// Digest of L2 tx data flattened over all L2 txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub tx_data_digest: B256,
|
||||
/// The L1 msg queue hash at the end of the previous chunk.
|
||||
#[rkyv()]
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The L1 msg queue hash at the end of the current chunk.
|
||||
#[rkyv()]
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// The length of rlp encoded L2 tx bytes flattened over all L2 txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub tx_data_length: u64,
|
||||
/// The block number of the first block in the chunk.
|
||||
#[rkyv()]
|
||||
pub initial_block_number: u64,
|
||||
/// The block contexts of the blocks in the chunk.
|
||||
#[rkyv()]
|
||||
pub block_ctxs: Vec<BlockContextV2>,
|
||||
}
|
||||
|
||||
impl ChunkInfo {
|
||||
/// Public input hash for a given chunk (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// prev state root ||
|
||||
/// post state root ||
|
||||
/// withdraw root ||
|
||||
/// chunk data hash ||
|
||||
/// tx data hash
|
||||
/// )
|
||||
pub fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(&self.chain_id.to_be_bytes())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.data_hash.as_slice())
|
||||
.chain(self.tx_data_digest.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a given chunk (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// prev state root ||
|
||||
/// post state root ||
|
||||
/// withdraw root ||
|
||||
/// tx data digest ||
|
||||
/// prev msg queue hash ||
|
||||
/// post msg queue hash ||
|
||||
/// initial block number ||
|
||||
/// block_ctx for block_ctx in block_ctxs
|
||||
/// )
|
||||
pub fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(&self.chain_id.to_be_bytes())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.tx_data_digest.as_slice())
|
||||
.chain(self.prev_msg_queue_hash.as_slice())
|
||||
.chain(self.post_msg_queue_hash.as_slice())
|
||||
.chain(&self.initial_block_number.to_be_bytes())
|
||||
.chain(
|
||||
self.block_ctxs
|
||||
.iter()
|
||||
.flat_map(|block_ctx| block_ctx.to_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
.as_slice(),
|
||||
)
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedChunkInfo> for ChunkInfo {
|
||||
fn from(archived: &ArchivedChunkInfo) -> Self {
|
||||
Self {
|
||||
chain_id: archived.chain_id.into(),
|
||||
prev_state_root: archived.prev_state_root.into(),
|
||||
post_state_root: archived.post_state_root.into(),
|
||||
withdraw_root: archived.withdraw_root.into(),
|
||||
data_hash: archived.data_hash.into(),
|
||||
tx_data_digest: archived.tx_data_digest.into(),
|
||||
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
|
||||
tx_data_length: archived.tx_data_length.into(),
|
||||
initial_block_number: archived.initial_block_number.into(),
|
||||
block_ctxs: archived
|
||||
.block_ctxs
|
||||
.iter()
|
||||
.map(BlockContextV2::from)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type VersionedChunkInfo = (ChunkInfo, ForkName);
|
||||
|
||||
impl MultiVersionPublicInputs for ChunkInfo {
|
||||
/// Compute the public input hash for the chunk.
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => {
|
||||
assert_ne!(self.data_hash, B256::ZERO, "v6 must has valid data hash");
|
||||
self.pi_hash_euclidv1()
|
||||
}
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate public inputs between 2 contiguous chunks.
|
||||
///
|
||||
/// - chain id MUST match
|
||||
/// - state roots MUST be chained
|
||||
/// - L1 msg queue hash MUST be chained
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
|
||||
assert_eq!(self.chain_id, prev_pi.chain_id);
|
||||
assert_eq!(self.prev_state_root, prev_pi.post_state_root);
|
||||
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
|
||||
|
||||
// message queue hash is used only after euclidv2 (da-codec@v7)
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
|
||||
}
|
||||
}
|
||||
}
|
||||
35
common/types-rs/base/src/utils/hash.rs
Normal file
35
common/types-rs/base/src/utils/hash.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use alloy_primitives::B256;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
/// From the utility of ether-rs
|
||||
///
|
||||
/// Computes the Keccak-256 hash of input bytes.
|
||||
///
|
||||
/// Note that strings are interpreted as UTF-8 bytes,
|
||||
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
let mut output = [0u8; 32];
|
||||
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(bytes.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
B256::from(output)
|
||||
}
|
||||
|
||||
pub fn keccak256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
use sha3::{Digest, Keccak256};
|
||||
let mut output = [0u8; 32];
|
||||
let mut hasher = Keccak256::new();
|
||||
hasher.update(bytes.as_ref());
|
||||
output.copy_from_slice(hasher.finalize().as_ref());
|
||||
B256::from(output)
|
||||
}
|
||||
|
||||
pub fn sha256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut output = [0u8; 32];
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(bytes.as_ref());
|
||||
output.copy_from_slice(hasher.finalize().as_ref());
|
||||
B256::from(output)
|
||||
}
|
||||
2
common/types-rs/base/src/utils/mod.rs
Normal file
2
common/types-rs/base/src/utils/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
mod hash;
|
||||
pub use hash::{keccak256, keccak256_rv32, sha256_rv32};
|
||||
21
common/types-rs/batch/Cargo.toml
Normal file
21
common/types-rs/batch/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-batch"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
vm-zstd = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
30
common/types-rs/batch/src/header/mod.rs
Normal file
30
common/types-rs/batch/src/header/mod.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
pub mod v6;
|
||||
|
||||
pub mod v7;
|
||||
|
||||
pub trait BatchHeader {
|
||||
/// The DA-codec version for the batch header.
|
||||
fn version(&self) -> u8;
|
||||
|
||||
/// The incremental index of the batch.
|
||||
fn index(&self) -> u64;
|
||||
|
||||
/// The batch header digest of the parent batch.
|
||||
fn parent_batch_hash(&self) -> B256;
|
||||
|
||||
/// The batch header digest.
|
||||
fn batch_hash(&self) -> B256;
|
||||
}
|
||||
|
||||
/// Reference header indicate the version of batch header base on which batch hash
|
||||
/// should be calculated.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub enum ReferenceHeader {
|
||||
/// Represents DA-codec v6.
|
||||
V6(v6::BatchHeaderV6),
|
||||
/// Represents DA-codec v7.
|
||||
V7(v7::BatchHeaderV7),
|
||||
}
|
||||
151
common/types-rs/batch/src/header/v6.rs
Normal file
151
common/types-rs/batch/src/header/v6.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use super::BatchHeader;
|
||||
use alloy_primitives::B256;
|
||||
use types_base::utils::keccak256;
|
||||
|
||||
/// Represents the header summarising the batch of chunks as per DA-codec v6.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchHeaderV6 {
|
||||
/// The DA-codec version for the batch.
|
||||
#[rkyv()]
|
||||
pub version: u8,
|
||||
/// The index of the batch
|
||||
#[rkyv()]
|
||||
pub batch_index: u64,
|
||||
/// Number of L1 messages popped in the batch
|
||||
#[rkyv()]
|
||||
pub l1_message_popped: u64,
|
||||
/// Number of total L1 messages popped after the batch
|
||||
#[rkyv()]
|
||||
pub total_l1_message_popped: u64,
|
||||
/// The parent batch hash
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The timestamp of the last block in this batch
|
||||
#[rkyv()]
|
||||
pub last_block_timestamp: u64,
|
||||
/// The data hash of the batch
|
||||
#[rkyv()]
|
||||
pub data_hash: B256,
|
||||
/// The versioned hash of the blob with this batch's data
|
||||
#[rkyv()]
|
||||
pub blob_versioned_hash: B256,
|
||||
/// The blob data proof: z (32), y (32)
|
||||
#[rkyv()]
|
||||
pub blob_data_proof: [B256; 2],
|
||||
}
|
||||
|
||||
impl BatchHeader for BatchHeaderV6 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash
|
||||
}
|
||||
|
||||
/// Batch hash as per DA-codec v6:
|
||||
///
|
||||
/// keccak(
|
||||
/// version ||
|
||||
/// batch index ||
|
||||
/// l1 message popped ||
|
||||
/// total l1 message popped ||
|
||||
/// batch data hash ||
|
||||
/// versioned hash ||
|
||||
/// parent batch hash ||
|
||||
/// last block timestamp ||
|
||||
/// z ||
|
||||
/// y
|
||||
/// )
|
||||
fn batch_hash(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(self.batch_index.to_be_bytes().as_slice())
|
||||
.chain(self.l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(self.total_l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(self.data_hash.as_slice())
|
||||
.chain(self.blob_versioned_hash.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.last_block_timestamp.to_be_bytes().as_slice())
|
||||
.chain(self.blob_data_proof[0].as_slice())
|
||||
.chain(self.blob_data_proof[1].as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchHeader for ArchivedBatchHeaderV6 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index.into()
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash.into()
|
||||
}
|
||||
|
||||
fn batch_hash(&self) -> B256 {
|
||||
let batch_index: u64 = self.batch_index.into();
|
||||
let l1_message_popped: u64 = self.l1_message_popped.into();
|
||||
let total_l1_message_popped: u64 = self.total_l1_message_popped.into();
|
||||
let data_hash: B256 = self.data_hash.into();
|
||||
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
|
||||
let parent_batch_hash: B256 = self.parent_batch_hash.into();
|
||||
let last_block_timestamp: u64 = self.last_block_timestamp.into();
|
||||
let blob_data_proof: [B256; 2] = self.blob_data_proof.map(|h| h.into());
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(batch_index.to_be_bytes().as_slice())
|
||||
.chain(l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(total_l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(data_hash.as_slice())
|
||||
.chain(blob_versioned_hash.as_slice())
|
||||
.chain(parent_batch_hash.as_slice())
|
||||
.chain(last_block_timestamp.to_be_bytes().as_slice())
|
||||
.chain(blob_data_proof[0].as_slice())
|
||||
.chain(blob_data_proof[1].as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchHeaderV6> for BatchHeaderV6 {
|
||||
fn from(archived: &ArchivedBatchHeaderV6) -> Self {
|
||||
Self {
|
||||
version: archived.version,
|
||||
batch_index: archived.batch_index.into(),
|
||||
l1_message_popped: archived.l1_message_popped.into(),
|
||||
total_l1_message_popped: archived.total_l1_message_popped.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
last_block_timestamp: archived.last_block_timestamp.into(),
|
||||
data_hash: archived.data_hash.into(),
|
||||
blob_versioned_hash: archived.blob_versioned_hash.into(),
|
||||
blob_data_proof: [
|
||||
archived.blob_data_proof[0].into(),
|
||||
archived.blob_data_proof[1].into(),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
106
common/types-rs/batch/src/header/v7.rs
Normal file
106
common/types-rs/batch/src/header/v7.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use super::BatchHeader;
|
||||
use types_base::utils::keccak256;
|
||||
|
||||
/// Represents the header summarising the batch of chunks as per DA-codec v7.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchHeaderV7 {
|
||||
/// The DA-codec version for the batch.
|
||||
#[rkyv()]
|
||||
pub version: u8,
|
||||
/// The index of the batch
|
||||
#[rkyv()]
|
||||
pub batch_index: u64,
|
||||
/// The parent batch hash
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The versioned hash of the blob with this batch's data
|
||||
#[rkyv()]
|
||||
pub blob_versioned_hash: B256,
|
||||
}
|
||||
|
||||
impl BatchHeader for BatchHeaderV7 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash
|
||||
}
|
||||
|
||||
/// Batch hash as per DA-codec v7:
|
||||
///
|
||||
/// keccak(
|
||||
/// version ||
|
||||
/// batch index ||
|
||||
/// versioned hash ||
|
||||
/// parent batch hash
|
||||
/// )
|
||||
fn batch_hash(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(self.batch_index.to_be_bytes().as_slice())
|
||||
.chain(self.blob_versioned_hash.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchHeader for ArchivedBatchHeaderV7 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index.into()
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash.into()
|
||||
}
|
||||
|
||||
fn batch_hash(&self) -> B256 {
|
||||
let batch_index: u64 = self.batch_index.into();
|
||||
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
|
||||
let parent_batch_hash: B256 = self.parent_batch_hash.into();
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(batch_index.to_be_bytes().as_slice())
|
||||
.chain(blob_versioned_hash.as_slice())
|
||||
.chain(parent_batch_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchHeaderV7> for BatchHeaderV7 {
|
||||
fn from(archived: &ArchivedBatchHeaderV7) -> Self {
|
||||
Self {
|
||||
version: archived.version,
|
||||
batch_index: archived.batch_index.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
blob_versioned_hash: archived.blob_versioned_hash.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
17
common/types-rs/batch/src/lib.rs
Normal file
17
common/types-rs/batch/src/lib.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
mod header;
|
||||
pub use header::{
|
||||
ArchivedReferenceHeader, BatchHeader, ReferenceHeader,
|
||||
v6::{ArchivedBatchHeaderV6, BatchHeaderV6},
|
||||
v7::{ArchivedBatchHeaderV7, BatchHeaderV7},
|
||||
};
|
||||
|
||||
mod payload;
|
||||
pub use payload::{
|
||||
v6::{EnvelopeV6, PayloadV6},
|
||||
v7::{EnvelopeV7, PayloadV7},
|
||||
};
|
||||
|
||||
pub use payload::{BLOB_WIDTH, N_BLOB_BYTES, N_DATA_BYTES_PER_COEFFICIENT};
|
||||
|
||||
mod witness;
|
||||
pub use witness::{ArchivedBatchWitness, BatchWitness, Bytes48, PointEvalWitness};
|
||||
15
common/types-rs/batch/src/payload/mod.rs
Normal file
15
common/types-rs/batch/src/payload/mod.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
pub mod v6;
|
||||
pub mod v7;
|
||||
|
||||
/// The number data bytes we pack each BLS12-381 scalar into. The most-significant byte is 0.
|
||||
pub const N_DATA_BYTES_PER_COEFFICIENT: usize = 31;
|
||||
|
||||
/// The number of BLS12-381 scalar fields that effectively represent an EIP-4844 blob.
|
||||
pub const BLOB_WIDTH: usize = 4096;
|
||||
|
||||
/// The effective (reduced) number of bytes we can use within a blob.
|
||||
///
|
||||
/// EIP-4844 requires that each 32-bytes chunk of bytes represent a BLS12-381 scalar field element
|
||||
/// in its canonical form. As a result, we set the most-significant byte in each such chunk to 0.
|
||||
/// This allows us to use only up to 31 bytes in each such chunk, hence the reduced capacity.
|
||||
pub const N_BLOB_BYTES: usize = BLOB_WIDTH * N_DATA_BYTES_PER_COEFFICIENT;
|
||||
212
common/types-rs/batch/src/payload/v6.rs
Normal file
212
common/types-rs/batch/src/payload/v6.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
use alloy_primitives::B256;
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::BatchHeaderV6;
|
||||
use types_base::{public_inputs::chunk::ChunkInfo, utils::keccak256};
|
||||
|
||||
/// The default max chunks for v6 payload
|
||||
pub const N_MAX_CHUNKS: usize = 45;
|
||||
|
||||
/// The number of bytes to encode number of chunks in a batch.
|
||||
const N_BYTES_NUM_CHUNKS: usize = 2;
|
||||
|
||||
/// The number of rows to encode chunk size (u32).
|
||||
const N_BYTES_CHUNK_SIZE: usize = 4;
|
||||
|
||||
impl From<&[u8]> for EnvelopeV6 {
|
||||
fn from(blob_bytes: &[u8]) -> Self {
|
||||
let is_encoded = blob_bytes[0] & 1 == 1;
|
||||
Self {
|
||||
is_encoded,
|
||||
envelope_bytes: if blob_bytes[0] & 1 == 1 {
|
||||
vm_zstd::process(&blob_bytes[1..]).unwrap().decoded_data
|
||||
} else {
|
||||
Vec::from(&blob_bytes[1..])
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvelopeV6 {
|
||||
/// The original envelope bytes supplied.
|
||||
///
|
||||
/// Caching just for re-use later in challenge digest computation.
|
||||
pub envelope_bytes: Vec<u8>,
|
||||
/// If the enveloped bytes is encoded (compressed) in envelop
|
||||
pub is_encoded: bool,
|
||||
}
|
||||
|
||||
impl EnvelopeV6 {
|
||||
/// Parse payload bytes and obtain challenge digest
|
||||
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
let payload = Payload::from(self);
|
||||
payload.get_challenge_digest(versioned_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&EnvelopeV6> for Payload {
|
||||
fn from(envelope: &EnvelopeV6) -> Self {
|
||||
Self::from_payload(&envelope.envelope_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload that describes a batch.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Payload {
|
||||
/// Metadata that encodes the sizes of every chunk in the batch.
|
||||
pub metadata_digest: B256,
|
||||
/// The Keccak digests of transaction bytes for every chunk in the batch.
|
||||
///
|
||||
/// The `chunk_data_digest` is a part of the chunk-circuit's public input and hence used to
|
||||
/// verify that the transaction bytes included in the chunk-circuit indeed match the
|
||||
/// transaction bytes made available in the batch.
|
||||
pub chunk_data_digests: Vec<B256>,
|
||||
}
|
||||
|
||||
pub type PayloadV6 = Payload;
|
||||
|
||||
impl Payload {
|
||||
/// For raw payload data (read from decompressed enveloped data), which is raw batch bytes
|
||||
/// with metadata, this function segments the byte stream into chunk segments.
|
||||
///
|
||||
/// This method is used INSIDE OF zkvm since we can not generate (compress) batch data within
|
||||
/// the vm program
|
||||
///
|
||||
/// The structure of batch bytes is as follows:
|
||||
///
|
||||
/// | Byte Index | Size | Hint |
|
||||
/// |--------------------------------------------------------------|-------------------------------|-------------------------------------|
|
||||
/// | 0 | N_BYTES_NUM_CHUNKS | Number of chunks |
|
||||
/// | N_BYTES_NUM_CHUNKS | N_BYTES_CHUNK_SIZE | Size of chunks[0] |
|
||||
/// | N_BYTES_NUM_CHUNKS + N_BYTES_CHUNK_SIZE | N_BYTES_CHUNK_SIZE | Size of chunks[1] |
|
||||
/// | N_BYTES_NUM_CHUNKS + (i * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[i] |
|
||||
/// | N_BYTES_NUM_CHUNKS + ((N_MAX_CHUNKS-1) * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[N_MAX_CHUNKS-1] |
|
||||
/// | N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE) | Size of chunks[0] | L2 tx bytes of chunks[0] |
|
||||
/// | "" + Size_of_chunks[0] | Size of chunks[1] | L2 tx bytes of chunks[1] |
|
||||
/// | "" + Size_of_chunks[i-1] | Size of chunks[i] | L2 tx bytes of chunks[i] |
|
||||
/// | "" + Size_of_chunks[Num_chunks-1] | Size of chunks[Num_chunks-1] | L2 tx bytes of chunks[Num_chunks-1] |
|
||||
pub fn from_payload(batch_bytes_with_metadata: &[u8]) -> Self {
|
||||
// Get the metadata bytes and metadata digest.
|
||||
let n_bytes_metadata = Self::n_bytes_metadata();
|
||||
let metadata_bytes = &batch_bytes_with_metadata[..n_bytes_metadata];
|
||||
let metadata_digest = keccak256(metadata_bytes);
|
||||
|
||||
// The remaining bytes represent the chunk data (L2 tx bytes) segmented as chunks.
|
||||
let batch_bytes = &batch_bytes_with_metadata[n_bytes_metadata..];
|
||||
|
||||
// The number of chunks in the batch.
|
||||
let valid_chunks = metadata_bytes[..N_BYTES_NUM_CHUNKS]
|
||||
.iter()
|
||||
.fold(0usize, |acc, &d| acc * 256usize + d as usize);
|
||||
|
||||
// The size of each chunk in the batch.
|
||||
let chunk_sizes = metadata_bytes[N_BYTES_NUM_CHUNKS..]
|
||||
.iter()
|
||||
.chunks(N_BYTES_CHUNK_SIZE)
|
||||
.into_iter()
|
||||
.map(|bytes| bytes.fold(0usize, |acc, &d| acc * 256usize + d as usize))
|
||||
.collect::<Vec<usize>>();
|
||||
|
||||
// For every unused chunk, the chunk size should be set to 0.
|
||||
for &unused_chunk_size in chunk_sizes.iter().skip(valid_chunks) {
|
||||
assert_eq!(unused_chunk_size, 0, "unused chunk has size 0");
|
||||
}
|
||||
|
||||
// Segment the batch bytes based on the chunk sizes.
|
||||
let (segmented_batch_data, remaining_bytes) =
|
||||
chunk_sizes.into_iter().take(valid_chunks).fold(
|
||||
(Vec::new(), batch_bytes),
|
||||
|(mut datas, rest_bytes), size| {
|
||||
datas.push(Vec::from(&rest_bytes[..size]));
|
||||
(datas, &rest_bytes[size..])
|
||||
},
|
||||
);
|
||||
|
||||
// After segmenting the batch data into chunks, no bytes should be left.
|
||||
assert!(
|
||||
remaining_bytes.is_empty(),
|
||||
"chunk segmentation len must add up to the correct value"
|
||||
);
|
||||
|
||||
// Compute the chunk data digests based on the segmented data.
|
||||
let chunk_data_digests = segmented_batch_data
|
||||
.iter()
|
||||
.map(|bytes| B256::from(keccak256(bytes)))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
metadata_digest,
|
||||
chunk_data_digests,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the challenge digest from blob bytes. which is the combination of
|
||||
/// digest for bytes in each chunk
|
||||
pub fn get_challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
keccak256(self.get_challenge_digest_preimage(versioned_hash))
|
||||
}
|
||||
|
||||
/// The number of bytes in payload Data to represent the "payload metadata" section: a u16 to
|
||||
/// represent the size of chunks and max_chunks * u32 to represent chunk sizes
|
||||
const fn n_bytes_metadata() -> usize {
|
||||
N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE)
|
||||
}
|
||||
|
||||
/// Validate the payload contents.
|
||||
pub fn validate<'a>(
|
||||
&self,
|
||||
header: &BatchHeaderV6,
|
||||
chunk_infos: &'a [ChunkInfo],
|
||||
) -> (&'a ChunkInfo, &'a ChunkInfo) {
|
||||
// There should be at least 1 chunk info.
|
||||
assert!(!chunk_infos.is_empty(), "at least 1 chunk info");
|
||||
|
||||
// Get the first and last chunks' info, to construct the batch info.
|
||||
let (first_chunk, last_chunk) = (
|
||||
chunk_infos.first().expect("at least one chunk in batch"),
|
||||
chunk_infos.last().expect("at least one chunk in batch"),
|
||||
);
|
||||
|
||||
for (&chunk_data_digest, chunk_info) in self.chunk_data_digests.iter().zip_eq(chunk_infos) {
|
||||
assert_eq!(chunk_data_digest, chunk_info.tx_data_digest)
|
||||
}
|
||||
|
||||
// Validate the l1-msg identifier data_hash for the batch.
|
||||
let batch_data_hash_preimage = chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| chunk_info.data_hash.0)
|
||||
.collect::<Vec<_>>();
|
||||
let batch_data_hash = keccak256(batch_data_hash_preimage);
|
||||
assert_eq!(batch_data_hash, header.data_hash);
|
||||
|
||||
(first_chunk, last_chunk)
|
||||
}
|
||||
|
||||
/// Get the preimage for the challenge digest.
|
||||
pub(crate) fn get_challenge_digest_preimage(&self, versioned_hash: B256) -> Vec<u8> {
|
||||
// preimage =
|
||||
// metadata_digest ||
|
||||
// chunk[0].chunk_data_digest || ...
|
||||
// chunk[N_SNARKS-1].chunk_data_digest ||
|
||||
// blob_versioned_hash
|
||||
//
|
||||
// where chunk_data_digest for a padded chunk is set equal to the "last valid chunk"'s
|
||||
// chunk_data_digest.
|
||||
let mut preimage = self.metadata_digest.to_vec();
|
||||
let last_digest = self
|
||||
.chunk_data_digests
|
||||
.last()
|
||||
.expect("at least we have one");
|
||||
for chunk_digest in self
|
||||
.chunk_data_digests
|
||||
.iter()
|
||||
.chain(std::iter::repeat(last_digest))
|
||||
.take(N_MAX_CHUNKS)
|
||||
{
|
||||
preimage.extend_from_slice(chunk_digest.as_slice());
|
||||
}
|
||||
preimage.extend_from_slice(versioned_hash.as_slice());
|
||||
preimage
|
||||
}
|
||||
}
|
||||
256
common/types-rs/batch/src/payload/v7.rs
Normal file
256
common/types-rs/batch/src/payload/v7.rs
Normal file
@@ -0,0 +1,256 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::BatchHeaderV7;
|
||||
use types_base::{
|
||||
public_inputs::chunk::{BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
use super::N_BLOB_BYTES;
|
||||
|
||||
/// da-codec@v7
|
||||
const DA_CODEC_VERSION: u8 = 7;
|
||||
|
||||
/// Represents the data contained within an EIP-4844 blob that is published on-chain.
|
||||
///
|
||||
/// The bytes following some metadata represent zstd-encoded [`PayloadV7`] if the envelope is
|
||||
/// indicated as `is_encoded == true`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvelopeV7 {
|
||||
/// The original envelope bytes supplied.
|
||||
///
|
||||
/// Caching just for re-use later in challenge digest computation.
|
||||
pub envelope_bytes: Vec<u8>,
|
||||
/// The version from da-codec, i.e. v7 in this case.
|
||||
pub version: u8,
|
||||
/// A single byte boolean flag (value is 0 or 1) to denote whether or not the following blob
|
||||
/// bytes represent a batch in its zstd-encoded or raw form.
|
||||
pub is_encoded: u8,
|
||||
/// The unpadded bytes that possibly encode the [`PayloadV7`].
|
||||
pub unpadded_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<&[u8]> for EnvelopeV7 {
|
||||
fn from(blob_bytes: &[u8]) -> Self {
|
||||
// The number of bytes is as expected.
|
||||
assert_eq!(blob_bytes.len(), N_BLOB_BYTES);
|
||||
|
||||
// The version of the blob encoding was as expected, i.e. da-codec@v7.
|
||||
let version = blob_bytes[0];
|
||||
assert_eq!(version, DA_CODEC_VERSION);
|
||||
|
||||
// Calculate the unpadded size of the encoded payload.
|
||||
//
|
||||
// It should be at most the maximum number of bytes allowed.
|
||||
let unpadded_size = (blob_bytes[1] as usize) * 256 * 256
|
||||
+ (blob_bytes[2] as usize) * 256
|
||||
+ blob_bytes[3] as usize;
|
||||
assert!(unpadded_size <= N_BLOB_BYTES - 5);
|
||||
|
||||
// Whether the envelope represents encoded payload or raw payload.
|
||||
//
|
||||
// Is a boolean.
|
||||
let is_encoded = blob_bytes[4];
|
||||
assert!(is_encoded <= 1);
|
||||
|
||||
// The padded bytes are all 0s.
|
||||
for &padded_byte in blob_bytes.iter().skip(5 + unpadded_size) {
|
||||
assert_eq!(padded_byte, 0);
|
||||
}
|
||||
|
||||
Self {
|
||||
version,
|
||||
is_encoded,
|
||||
unpadded_bytes: blob_bytes[5..(5 + unpadded_size)].to_vec(),
|
||||
envelope_bytes: blob_bytes.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvelopeV7 {
|
||||
/// The verification of the EIP-4844 blob is done via point-evaluation precompile
|
||||
/// implemented in-circuit.
|
||||
///
|
||||
/// We require a random challenge point for this, and using Fiat-Shamir we compute it with
|
||||
/// every byte in the blob along with the blob's versioned hash, i.e. an identifier for its KZG
|
||||
/// commitment.
|
||||
///
|
||||
/// keccak256(
|
||||
/// keccak256(envelope) ||
|
||||
/// versioned hash
|
||||
/// )
|
||||
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(keccak256(&self.envelope_bytes))
|
||||
.chain(versioned_hash.0)
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the batch data, eventually encoded into an [`EnvelopeV7`].
|
||||
///
|
||||
/// | Field | # Bytes | Type | Index |
|
||||
/// |------------------------|---------|----------------|---------------|
|
||||
/// | prevL1MessageQueueHash | 32 | bytes32 | 0 |
|
||||
/// | postL1MessageQueueHash | 32 | bytes32 | 32 |
|
||||
/// | initialL2BlockNumber | 8 | u64 | 64 |
|
||||
/// | numBlocks | 2 | u16 | 72 |
|
||||
/// | blockCtxs[0] | 52 | BlockContextV2 | 74 |
|
||||
/// | ... blockCtxs[i] ... | 52 | BlockContextV2 | 74 + 52*i |
|
||||
/// | blockCtxs[n-1] | 52 | BlockContextV2 | 74 + 52*(n-1) |
|
||||
/// | l2TxsData | dynamic | bytes | 74 + 52*n |
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PayloadV7 {
|
||||
/// The version from da-codec, i.e. v7 in this case.
|
||||
///
|
||||
/// Note: This is not really a part of payload, simply coopied from the envelope for
|
||||
/// convenience.
|
||||
pub version: u8,
|
||||
/// Message queue hash at the end of the previous batch.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// Message queue hash at the end of the current batch.
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// The block number of the first block in the batch.
|
||||
pub initial_block_number: u64,
|
||||
/// The number of blocks in the batch.
|
||||
pub num_blocks: u16,
|
||||
/// The block contexts of each block in the batch.
|
||||
pub block_contexts: Vec<BlockContextV2>,
|
||||
/// The L2 tx data flattened over every tx in every block in the batch.
|
||||
pub tx_data: Vec<u8>,
|
||||
}
|
||||
|
||||
const INDEX_PREV_MSG_QUEUE_HASH: usize = 0;
|
||||
const INDEX_POST_MSG_QUEUE_HASH: usize = INDEX_PREV_MSG_QUEUE_HASH + 32;
|
||||
const INDEX_L2_BLOCK_NUM: usize = INDEX_POST_MSG_QUEUE_HASH + 32;
|
||||
const INDEX_NUM_BLOCKS: usize = INDEX_L2_BLOCK_NUM + 8;
|
||||
const INDEX_BLOCK_CTX: usize = INDEX_NUM_BLOCKS + 2;
|
||||
|
||||
impl From<&EnvelopeV7> for PayloadV7 {
|
||||
fn from(envelope: &EnvelopeV7) -> Self {
|
||||
// Conditionally decode depending on the flag set in the envelope.
|
||||
let payload_bytes = if envelope.is_encoded & 1 == 1 {
|
||||
vm_zstd::process(&envelope.unpadded_bytes)
|
||||
.expect("zstd decode should succeed")
|
||||
.decoded_data
|
||||
} else {
|
||||
envelope.unpadded_bytes.to_vec()
|
||||
};
|
||||
|
||||
// Sanity check on the payload size.
|
||||
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX);
|
||||
let num_blocks = u16::from_be_bytes(
|
||||
payload_bytes[INDEX_NUM_BLOCKS..INDEX_BLOCK_CTX]
|
||||
.try_into()
|
||||
.expect("should not fail"),
|
||||
);
|
||||
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX));
|
||||
|
||||
// Deserialize the other fields.
|
||||
let prev_msg_queue_hash =
|
||||
B256::from_slice(&payload_bytes[INDEX_PREV_MSG_QUEUE_HASH..INDEX_POST_MSG_QUEUE_HASH]);
|
||||
let post_msg_queue_hash =
|
||||
B256::from_slice(&payload_bytes[INDEX_POST_MSG_QUEUE_HASH..INDEX_L2_BLOCK_NUM]);
|
||||
let initial_block_number = u64::from_be_bytes(
|
||||
payload_bytes[INDEX_L2_BLOCK_NUM..INDEX_NUM_BLOCKS]
|
||||
.try_into()
|
||||
.expect("should not fail"),
|
||||
);
|
||||
|
||||
// Deserialize block contexts depending on the number of blocks in the batch.
|
||||
let mut block_contexts = Vec::with_capacity(num_blocks as usize);
|
||||
for i in 0..num_blocks {
|
||||
let start = (i as usize) * SIZE_BLOCK_CTX + INDEX_BLOCK_CTX;
|
||||
block_contexts.push(BlockContextV2::from(
|
||||
&payload_bytes[start..(start + SIZE_BLOCK_CTX)],
|
||||
));
|
||||
}
|
||||
|
||||
// All remaining bytes are flattened L2 txs.
|
||||
let tx_data =
|
||||
payload_bytes[INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX)..].to_vec();
|
||||
|
||||
Self {
|
||||
version: envelope.version,
|
||||
prev_msg_queue_hash,
|
||||
post_msg_queue_hash,
|
||||
initial_block_number,
|
||||
num_blocks,
|
||||
block_contexts,
|
||||
tx_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadV7 {
|
||||
/// Validate the payload contents.
|
||||
pub fn validate<'a>(
|
||||
&self,
|
||||
header: &BatchHeaderV7,
|
||||
chunk_infos: &'a [ChunkInfo],
|
||||
) -> (&'a ChunkInfo, &'a ChunkInfo) {
|
||||
// Get the first and last chunks' info, to construct the batch info.
|
||||
let (first_chunk, last_chunk) = (
|
||||
chunk_infos.first().expect("at least one chunk in batch"),
|
||||
chunk_infos.last().expect("at least one chunk in batch"),
|
||||
);
|
||||
|
||||
// version from payload is what's present in the on-chain batch header
|
||||
assert_eq!(self.version, header.version);
|
||||
|
||||
// number of blocks in the batch
|
||||
assert_eq!(
|
||||
usize::from(self.num_blocks),
|
||||
chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| &chunk_info.block_ctxs)
|
||||
.count()
|
||||
);
|
||||
assert_eq!(usize::from(self.num_blocks), self.block_contexts.len());
|
||||
|
||||
// the block number of the first block in the batch
|
||||
assert_eq!(self.initial_block_number, first_chunk.initial_block_number);
|
||||
|
||||
// prev message queue hash
|
||||
assert_eq!(self.prev_msg_queue_hash, first_chunk.prev_msg_queue_hash);
|
||||
|
||||
// post message queue hash
|
||||
assert_eq!(self.post_msg_queue_hash, last_chunk.post_msg_queue_hash);
|
||||
|
||||
// for each chunk, the tx_data_digest, i.e. keccak digest of the rlp-encoded L2 tx bytes
|
||||
// flattened over every tx in the chunk, should be re-computed and matched against the
|
||||
// public input of the chunk-circuit.
|
||||
//
|
||||
// first check that the total size of rlp-encoded tx data flattened over all txs in the
|
||||
// chunk is in fact the size available from the payload.
|
||||
assert_eq!(
|
||||
u64::try_from(self.tx_data.len()).expect("len(tx-data) is u64"),
|
||||
chunk_infos
|
||||
.iter()
|
||||
.map(|chunk_info| chunk_info.tx_data_length)
|
||||
.sum::<u64>(),
|
||||
);
|
||||
let mut index: usize = 0;
|
||||
for chunk_info in chunk_infos.iter() {
|
||||
let chunk_size = chunk_info.tx_data_length as usize;
|
||||
let chunk_tx_data_digest =
|
||||
keccak256(&self.tx_data.as_slice()[index..(index + chunk_size)]);
|
||||
assert_eq!(chunk_tx_data_digest, chunk_info.tx_data_digest);
|
||||
index += chunk_size;
|
||||
}
|
||||
|
||||
// for each block in the batch, check that the block context matches what's provided as
|
||||
// witness.
|
||||
for (block_ctx, witness_block_ctx) in self.block_contexts.iter().zip(
|
||||
chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| &chunk_info.block_ctxs),
|
||||
) {
|
||||
assert_eq!(block_ctx, witness_block_ctx);
|
||||
}
|
||||
|
||||
(first_chunk, last_chunk)
|
||||
}
|
||||
}
|
||||
57
common/types-rs/batch/src/witness.rs
Normal file
57
common/types-rs/batch/src/witness.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use crate::header::ReferenceHeader;
|
||||
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
|
||||
use types_base::public_inputs::{ForkName, chunk::ChunkInfo};
|
||||
|
||||
/// Simply rewrap byte48 to avoid unnecessary dep
|
||||
pub type Bytes48 = [u8; 48];
|
||||
|
||||
/// Witness required by applying point evaluation
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct PointEvalWitness {
|
||||
/// kzg commitment
|
||||
#[rkyv()]
|
||||
pub kzg_commitment: Bytes48,
|
||||
/// kzg proof
|
||||
#[rkyv()]
|
||||
pub kzg_proof: Bytes48,
|
||||
}
|
||||
|
||||
/// Witness to the batch circuit.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchWitness {
|
||||
/// Flattened root proofs from all chunks in the batch.
|
||||
#[rkyv()]
|
||||
pub chunk_proofs: Vec<AggregationInput>,
|
||||
/// Chunk infos.
|
||||
#[rkyv()]
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
/// Blob bytes.
|
||||
#[rkyv()]
|
||||
pub blob_bytes: Vec<u8>,
|
||||
/// Witness for point evaluation
|
||||
pub point_eval_witness: PointEvalWitness,
|
||||
/// Header for reference.
|
||||
#[rkyv()]
|
||||
pub reference_header: ReferenceHeader,
|
||||
/// The code version specify the chain spec
|
||||
#[rkyv()]
|
||||
pub fork_name: ForkName,
|
||||
}
|
||||
|
||||
impl ProofCarryingWitness for ArchivedBatchWitness {
|
||||
fn get_proofs(&self) -> Vec<AggregationInput> {
|
||||
self.chunk_proofs
|
||||
.iter()
|
||||
.map(|archived| AggregationInput {
|
||||
public_values: archived
|
||||
.public_values
|
||||
.iter()
|
||||
.map(|u32_le| u32_le.to_native())
|
||||
.collect(),
|
||||
commitment: ProgramCommitment::from(&archived.commitment),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
21
common/types-rs/bundle/Cargo.toml
Normal file
21
common/types-rs/bundle/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-bundle"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
vm-zstd = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
2
common/types-rs/bundle/src/lib.rs
Normal file
2
common/types-rs/bundle/src/lib.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
mod witness;
|
||||
pub use witness::{ArchivedBundleWitness, BundleWitness};
|
||||
30
common/types-rs/bundle/src/witness.rs
Normal file
30
common/types-rs/bundle/src/witness.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
|
||||
use types_base::public_inputs::batch::BatchInfo;
|
||||
|
||||
/// The witness for the bundle circuit.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BundleWitness {
|
||||
/// Batch proofs being aggregated in the bundle.
|
||||
#[rkyv()]
|
||||
pub batch_proofs: Vec<AggregationInput>,
|
||||
/// Public-input values for the corresponding batch proofs.
|
||||
#[rkyv()]
|
||||
pub batch_infos: Vec<BatchInfo>,
|
||||
}
|
||||
|
||||
impl ProofCarryingWitness for ArchivedBundleWitness {
|
||||
fn get_proofs(&self) -> Vec<AggregationInput> {
|
||||
self.batch_proofs
|
||||
.iter()
|
||||
.map(|archived| AggregationInput {
|
||||
public_values: archived
|
||||
.public_values
|
||||
.iter()
|
||||
.map(|u32_le| u32_le.to_native())
|
||||
.collect(),
|
||||
commitment: ProgramCommitment::from(&archived.commitment),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
28
common/types-rs/chunk/Cargo.toml
Normal file
28
common/types-rs/chunk/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-chunk"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
sbv-trie = { workspace = true }
|
||||
sbv-core = { workspace = true }
|
||||
sbv-primitives = { workspace = true }
|
||||
sbv-kv = { workspace = true }
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
|
||||
openvm = { workspace = true, features = ["std"] }
|
||||
openvm-rv32im-guest = { workspace = true }
|
||||
openvm-custom-insn = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
openvm = ["sbv-trie/openvm", "sbv-core/openvm", "sbv-primitives/openvm"]
|
||||
167
common/types-rs/chunk/src/execute.rs
Normal file
167
common/types-rs/chunk/src/execute.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
use sbv_core::{EvmDatabase, EvmExecutor};
|
||||
use sbv_primitives::{
|
||||
BlockWitness,
|
||||
chainspec::{
|
||||
BaseFeeParams, BaseFeeParamsKind, Chain, MAINNET,
|
||||
reth_chainspec::ChainSpec,
|
||||
scroll::{ScrollChainConfig, ScrollChainSpec},
|
||||
},
|
||||
ext::{BlockWitnessChunkExt, TxBytesHashExt},
|
||||
hardforks::SCROLL_DEV_HARDFORKS,
|
||||
types::{
|
||||
consensus::BlockHeader,
|
||||
reth::{Block, BlockWitnessRethExt, RecoveredBlock},
|
||||
scroll::ChunkInfoBuilder,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{ArchivedChunkWitness, make_providers, manually_drop_on_zkvm};
|
||||
use types_base::public_inputs::{
|
||||
ForkName,
|
||||
chunk::{BlockContextV2, ChunkInfo},
|
||||
};
|
||||
|
||||
fn block_ctxv2_from_block(value: &RecoveredBlock<Block>) -> BlockContextV2 {
|
||||
use alloy_primitives::U256;
|
||||
BlockContextV2 {
|
||||
timestamp: value.timestamp,
|
||||
gas_limit: value.gas_limit,
|
||||
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
|
||||
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
|
||||
num_l1_msgs: u16::try_from(
|
||||
value
|
||||
.body()
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| tx.is_l1_message())
|
||||
.count(),
|
||||
)
|
||||
.expect("num l1 msgs u16"),
|
||||
}
|
||||
}
|
||||
|
||||
type Witness = ArchivedChunkWitness;
|
||||
|
||||
pub fn execute(witness: &Witness) -> Result<ChunkInfo, String> {
|
||||
if witness.blocks.is_empty() {
|
||||
return Err("At least one witness must be provided in chunk mode".into());
|
||||
}
|
||||
if !witness.blocks.has_same_chain_id() {
|
||||
return Err("All witnesses must have the same chain id in chunk mode".into());
|
||||
}
|
||||
if !witness.blocks.has_seq_block_number() {
|
||||
return Err("All witnesses must have sequential block numbers in chunk mode".into());
|
||||
}
|
||||
// Get the blocks to build the basic chunk-info.
|
||||
let blocks = manually_drop_on_zkvm!(
|
||||
witness
|
||||
.blocks
|
||||
.iter()
|
||||
.map(|w| w.build_reth_block())
|
||||
.collect::<Result<Vec<RecoveredBlock<Block>>, _>>()
|
||||
.map_err(|e| e.to_string())?
|
||||
);
|
||||
let pre_state_root = witness.blocks[0].pre_state_root;
|
||||
|
||||
let fork_name = ForkName::from(&witness.fork_name);
|
||||
let chain = Chain::from_id(witness.blocks[0].chain_id());
|
||||
|
||||
// SCROLL_DEV_HARDFORKS will enable all forks
|
||||
let mut hardforks = (*SCROLL_DEV_HARDFORKS).clone();
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
// disable EuclidV2 fork for legacy chunk
|
||||
use sbv_primitives::{chainspec::ForkCondition, hardforks::ScrollHardfork};
|
||||
hardforks.insert(ScrollHardfork::EuclidV2, ForkCondition::Never);
|
||||
}
|
||||
|
||||
let inner = ChainSpec {
|
||||
chain,
|
||||
genesis_hash: Default::default(),
|
||||
genesis: Default::default(),
|
||||
genesis_header: Default::default(),
|
||||
paris_block_and_final_difficulty: Default::default(),
|
||||
hardforks,
|
||||
deposit_contract: Default::default(),
|
||||
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
|
||||
prune_delete_limit: 20000,
|
||||
blob_params: Default::default(),
|
||||
};
|
||||
let config = ScrollChainConfig::mainnet();
|
||||
let chain_spec: ScrollChainSpec = ScrollChainSpec { inner, config };
|
||||
|
||||
let (code_db, nodes_provider, block_hashes) = make_providers(&witness.blocks);
|
||||
let nodes_provider = manually_drop_on_zkvm!(nodes_provider);
|
||||
|
||||
let prev_state_root = witness.blocks[0].pre_state_root();
|
||||
let mut db = manually_drop_on_zkvm!(
|
||||
EvmDatabase::new_from_root(code_db, prev_state_root, &nodes_provider, block_hashes)
|
||||
.map_err(|e| format!("failed to create EvmDatabase: {}", e))?
|
||||
);
|
||||
for block in blocks.iter() {
|
||||
let output = manually_drop_on_zkvm!(
|
||||
EvmExecutor::new(std::sync::Arc::new(chain_spec.clone()), &db, block)
|
||||
.execute()
|
||||
.map_err(|e| format!("failed to execute block: {}", e))?
|
||||
);
|
||||
db.update(&nodes_provider, output.state.state.iter())
|
||||
.map_err(|e| format!("failed to update db: {}", e))?;
|
||||
}
|
||||
|
||||
let post_state_root = db.commit_changes();
|
||||
|
||||
let withdraw_root = db
|
||||
.withdraw_root()
|
||||
.map_err(|e| format!("failed to get withdraw root: {}", e))?;
|
||||
|
||||
let mut rlp_buffer = manually_drop_on_zkvm!(Vec::with_capacity(2048));
|
||||
let (tx_data_length, tx_data_digest) = blocks
|
||||
.iter()
|
||||
.flat_map(|b| b.body().transactions.iter())
|
||||
.tx_bytes_hash_in(rlp_buffer.as_mut());
|
||||
let _ = tx_data_length;
|
||||
|
||||
let sbv_chunk_info = {
|
||||
#[allow(unused_mut)]
|
||||
let mut builder = ChunkInfoBuilder::new(&chain_spec, pre_state_root.into(), &blocks);
|
||||
if fork_name == ForkName::EuclidV2 {
|
||||
builder.set_prev_msg_queue_hash(witness.prev_msg_queue_hash.into());
|
||||
}
|
||||
builder.build(withdraw_root)
|
||||
};
|
||||
if post_state_root != sbv_chunk_info.post_state_root() {
|
||||
return Err(format!(
|
||||
"state root mismatch: expected={}, found={}",
|
||||
sbv_chunk_info.post_state_root(),
|
||||
post_state_root
|
||||
));
|
||||
}
|
||||
|
||||
let chunk_info = ChunkInfo {
|
||||
chain_id: sbv_chunk_info.chain_id(),
|
||||
prev_state_root: sbv_chunk_info.prev_state_root(),
|
||||
post_state_root: sbv_chunk_info.post_state_root(),
|
||||
data_hash: sbv_chunk_info
|
||||
.clone()
|
||||
.into_legacy()
|
||||
.map(|x| x.data_hash)
|
||||
.unwrap_or_default(),
|
||||
withdraw_root,
|
||||
tx_data_digest,
|
||||
tx_data_length: u64::try_from(tx_data_length).expect("tx_data_length: u64"),
|
||||
initial_block_number: blocks[0].header().number,
|
||||
prev_msg_queue_hash: witness.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: sbv_chunk_info
|
||||
.into_euclid_v2()
|
||||
.map(|x| x.post_msg_queue_hash)
|
||||
.unwrap_or_default(),
|
||||
block_ctxs: blocks.iter().map(block_ctxv2_from_block).collect(),
|
||||
};
|
||||
|
||||
openvm::io::println(format!("withdraw_root = {:?}", withdraw_root));
|
||||
openvm::io::println(format!("tx_bytes_hash = {:?}", tx_data_digest));
|
||||
|
||||
// We should never touch that lazy lock... Or else we introduce 40M useless cycles.
|
||||
assert!(std::sync::LazyLock::get(&MAINNET).is_none());
|
||||
|
||||
Ok(chunk_info)
|
||||
}
|
||||
11
common/types-rs/chunk/src/lib.rs
Normal file
11
common/types-rs/chunk/src/lib.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
#![feature(lazy_get)]
|
||||
|
||||
mod utils;
|
||||
|
||||
mod witness;
|
||||
|
||||
pub use utils::make_providers;
|
||||
pub use witness::{ArchivedChunkWitness, ChunkWitness};
|
||||
|
||||
mod execute;
|
||||
pub use execute::execute;
|
||||
27
common/types-rs/chunk/src/public_inputs.rs
Normal file
27
common/types-rs/chunk/src/public_inputs.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use alloy_primitives::{B256, U256};
|
||||
use sbv_primitives::types::{
|
||||
consensus::BlockHeader,
|
||||
reth::{Block, RecoveredBlock},
|
||||
};
|
||||
|
||||
use types_base::public_inputs::chunk::BlockContextV2;
|
||||
|
||||
impl From<&RecoveredBlock<Block>> for BlockContextV2 {
|
||||
fn from(value: &RecoveredBlock<Block>) -> Self {
|
||||
Self {
|
||||
timestamp: value.timestamp,
|
||||
gas_limit: value.gas_limit,
|
||||
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
|
||||
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
|
||||
num_l1_msgs: u16::try_from(
|
||||
value
|
||||
.body()
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| tx.is_l1_message())
|
||||
.count(),
|
||||
)
|
||||
.expect("num l1 msgs u16"),
|
||||
}
|
||||
}
|
||||
}
|
||||
48
common/types-rs/chunk/src/utils.rs
Normal file
48
common/types-rs/chunk/src/utils.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
use sbv_kv::nohash::NoHashMap;
|
||||
use sbv_primitives::{B256, BlockWitness, Bytes, ext::BlockWitnessExt};
|
||||
use sbv_trie::{BlockWitnessTrieExt, TrieNode};
|
||||
|
||||
type CodeDb = NoHashMap<B256, Bytes>;
|
||||
|
||||
type NodesProvider = NoHashMap<B256, TrieNode>;
|
||||
|
||||
type BlockHashProvider = sbv_kv::null::NullProvider;
|
||||
|
||||
pub fn make_providers<W: BlockWitness>(
|
||||
witnesses: &[W],
|
||||
) -> (CodeDb, NodesProvider, BlockHashProvider) {
|
||||
let code_db = {
|
||||
// build code db
|
||||
let num_codes = witnesses.iter().map(|w| w.codes_iter().len()).sum();
|
||||
let mut code_db =
|
||||
NoHashMap::<B256, Bytes>::with_capacity_and_hasher(num_codes, Default::default());
|
||||
witnesses.import_codes(&mut code_db);
|
||||
code_db
|
||||
};
|
||||
let nodes_provider = {
|
||||
let num_states = witnesses.iter().map(|w| w.states_iter().len()).sum();
|
||||
let mut nodes_provider =
|
||||
NoHashMap::<B256, TrieNode>::with_capacity_and_hasher(num_states, Default::default());
|
||||
witnesses.import_nodes(&mut nodes_provider).unwrap();
|
||||
nodes_provider
|
||||
};
|
||||
let block_hashes = sbv_kv::null::NullProvider;
|
||||
|
||||
(code_db, nodes_provider, block_hashes)
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
macro_rules! manually_drop_on_zkvm {
|
||||
($e:expr) => {
|
||||
std::mem::ManuallyDrop::new($e)
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
|
||||
macro_rules! manually_drop_on_zkvm {
|
||||
($e:expr) => {
|
||||
$e
|
||||
};
|
||||
}
|
||||
71
common/types-rs/chunk/src/witness.rs
Normal file
71
common/types-rs/chunk/src/witness.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use alloy_primitives::B256;
|
||||
use sbv_primitives::types::BlockWitness;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use types_base::public_inputs::ForkName;
|
||||
|
||||
/// The witness type accepted by the chunk-circuit.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ChunkWitness {
|
||||
/// The block witness for each block in the chunk.
|
||||
pub blocks: Vec<BlockWitness>,
|
||||
/// The on-chain rolling L1 message queue hash before enqueueing any L1 msg tx from the chunk.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The code version specify the chain spec
|
||||
pub fork_name: ForkName,
|
||||
}
|
||||
|
||||
impl ChunkWitness {
|
||||
pub fn new(blocks: &[BlockWitness], prev_msg_queue_hash: B256, fork_name: ForkName) -> Self {
|
||||
let num_codes = blocks.iter().map(|w| w.codes.len()).sum();
|
||||
let num_states = blocks.iter().map(|w| w.states.len()).sum();
|
||||
let mut codes = HashSet::with_capacity(num_codes);
|
||||
let mut states = HashSet::with_capacity(num_states);
|
||||
|
||||
let blocks = blocks
|
||||
.iter()
|
||||
.map(|block| BlockWitness {
|
||||
chain_id: block.chain_id,
|
||||
header: block.header.clone(),
|
||||
pre_state_root: block.pre_state_root,
|
||||
transaction: block.transaction.clone(),
|
||||
withdrawals: block.withdrawals.clone(),
|
||||
states: block
|
||||
.states
|
||||
.iter()
|
||||
.filter(|s| states.insert(*s))
|
||||
.cloned()
|
||||
.collect(),
|
||||
codes: block
|
||||
.codes
|
||||
.iter()
|
||||
.filter(|c| codes.insert(*c))
|
||||
.cloned()
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
blocks,
|
||||
prev_msg_queue_hash,
|
||||
fork_name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_v1(blocks: &[BlockWitness]) -> Self {
|
||||
Self::new(blocks, Default::default(), ForkName::EuclidV1)
|
||||
}
|
||||
|
||||
pub fn new_v2(blocks: &[BlockWitness], prev_msg_queue_hash: B256) -> Self {
|
||||
Self::new(blocks, prev_msg_queue_hash, ForkName::EuclidV2)
|
||||
}
|
||||
}
|
||||
21
common/types-rs/src/lib.rs
Normal file
21
common/types-rs/src/lib.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
// re-export for a compatible interface with old circuit/types for prover
|
||||
|
||||
pub mod bundle {
|
||||
pub use types_base::public_inputs::bundle::{BundleInfo, BundleInfoV1, BundleInfoV2};
|
||||
pub use types_bundle::*;
|
||||
}
|
||||
|
||||
pub mod batch {
|
||||
pub use types_base::public_inputs::batch::{ArchivedBatchInfo, BatchInfo, VersionedBatchInfo};
|
||||
pub use types_batch::*;
|
||||
}
|
||||
|
||||
pub mod chunk {
|
||||
pub use types_base::public_inputs::chunk::{
|
||||
ArchivedChunkInfo, BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX, VersionedChunkInfo,
|
||||
};
|
||||
pub use types_chunk::*;
|
||||
}
|
||||
|
||||
pub use types_agg;
|
||||
pub use types_base::{public_inputs, utils};
|
||||
@@ -276,8 +276,8 @@ const (
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
|
||||
SenderTypeL2GasOracleDeprecated
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
case SenderTypeL2GasOracleDeprecated:
|
||||
return "SenderTypeL2GasOracleDeprecated"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
|
||||
@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
SenderTypeL2GasOracleDeprecated,
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
|
||||
1
common/types/message/batch-proof-sample.json
Normal file
1
common/types/message/batch-proof-sample.json
Normal file
File diff suppressed because one or more lines are too long
1
common/types/message/batch-task-sample.json
Normal file
1
common/types/message/batch-task-sample.json
Normal file
File diff suppressed because one or more lines are too long
1
common/types/message/batch-task-test-out.json
Normal file
1
common/types/message/batch-task-test-out.json
Normal file
File diff suppressed because one or more lines are too long
1
common/types/message/bundle-proof-sample.json
Normal file
1
common/types/message/bundle-proof-sample.json
Normal file
@@ -0,0 +1 @@
|
||||
{"metadata":{"bundle_info":{"chain_id":333333,"msg_queue_hash":"0x0101010101010101010101010101010101010101010101010101010101010101","num_batches":2,"prev_state_root":"0x5302a56cbbec7d14d48d592b805d4ec3c7011439dfaa90d44deee02a9326d203","prev_batch_hash":"0xabacadaeaf000000000000000000000000000000000000000000000000000000","post_state_root":"0xaf6696afb2e11052490051f0f9f6444be6e9f5bb82beb3c3dae846cfa59ed6e0","batch_hash":"0xf0ee5d6b9cd739eb1ff816a58486af8b08d42a8c50d6e5998e7a3947c7aae2a9","withdraw_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"bundle_pi_hash":"0x2028510c403837c6ed77660fd92814ba61d7b746e7268cc8dfc14d163d45e6bd"},"proof":{"proof":"CfpNiL6UpegsK3VcoAj9ey5daMbZDFiF1XpCKvrOeN0MPPLNCDrllJL/gN0E3qmq20kGLYpBQ8aZ3sgUrxpSyA+9GKK8NhZoIM75adOnV8AYCLXpmxfS81MxIai/+ghxDIUvJQJVgWKJPsMQp4lO/Qltc4eCNWeoR2jHua/VzSASQXDDQ5ozD6i448TVkjKiyEcHwFFMMuOebFUzDc85hA4AJGM1T9bPl5VVQkEpijdNF+1lzUfi27U0XRQbYz8aE8hiCLxR8Z2bHg65dvfa+TsaDI8oAlz33Q1yIadZBtceKsH53P5u6vwWp0dQvw8DGNv8G5zvsayHPNCvy4xz8hRT3E4G0Ome8voqqOxrc/A8u2fE6LoXKswvU6Uquv+LHwGMbTugRvQ0BBXlLQ29Hvj18rDzS6ll0OnEcRiaaEkGOZy7Kq1PGiF7ZxMZsJYCbhyPgg4TKpesYDUJygEN0iGNX90dmyzGLTTgJATMYBGD2U+XP/T+UOMbxFTl3TFNHWlCPhEAu5LBwZ0pD3XV1xNW1iUqwTSfg7Qz1SOUYkot10Q8EAKeXk3hluHK+nSQhOMfWC4tnvfQdMqepfymwwArzA/9GMA/Two7yuzgCz7vHb+56YKPZiDrh4cqSvpVI92hCF8GWHaTqWDR0fikx2Y7GLX8YBM3Rx8reQE+LYYGEJHJzD4cIc0MKiuet605ZPSAaKpb8JM2EgrCAfw+QAhBiwXQ3HOQkrt17tzqNJH7IeHF761v43D9w+IeqvetKEgYXEH3fHmN00dLV2Uws8C4956qze+SG81ScnZzbrIeiO9lnmUXSFzrL40K+3NqCZcFnfLhVidyEJepzJi50yOK5BUJdMFdNtvHtprICqLKyb7aRg39qoZ7RqyJTg5nAjQQBGelvRu/AN6zdyxja73Jo5gEovdIiMybi/IhfMwKGWgiRaOGxyHx9KZ/ZA/w7r3rce6vuDsUhk5hsgVj4wUW3BqoZ8iRIH8X6AjK1xli+S/HfgAkfmUVwNNBOcgYEcrqEbswsfYKOcoFn71DISLK0jmB44LTNyGxoWBMpIAOf/gGhQSNk0ojd4n4UXxShsqmJ57Kudw/mGimMm+Crhr5asxeiFH0eJNBgUEXDuveqE1d20UTRJ1UJ/hZGomsDLebTojSTtsMLWTtx/4Mqg+g3Odte1WKN6CgxF4kGRcW2tE3D1jiBys5FTHMAhmka3mUBwlciT7syDWBDlYVuSmwppCghdBMQfQL4s3Uh0vRG28LkU+UXcwYXwh3UK6cA1bBnKfAa9k7P5BuMxVh8p6he6EZr0kGNjKGPSxuVxgczO/C32GP+HVVsWlIMNmgB4GeMHIN3yJampOrLZIMlQuP9d9kOicvRia1ge5sFtT+Vmthnp1F7sR3P+ADB/WxKSxVbiLaVBo+zm/rZbyM9vU0CVLD69lzPC6xKcFkxewlWJU6o7rOz1qzh47fT+8qUcVYfpCSEtT/U8eX2JFnXCb0PPXWivofI28tnsuS8GjwUiOyzCoxxuIEOyz1HNRXBcO2dSKR2qM41zUs0btA2JkA3hTVW8YWn8czHxrZyooooaumzbUPQBOqO3fewnLLyQ9etBcjZJ8Xm/B1EBk9cRPWDjgx5Hq8C0soA+EsoNoaSQJu67HuFTRd/OWvKSliCoj1XVcqBobnJWmTU7kAgi73pMaq/G4ot2rRFSL9MbkJgHCyxBkrl9nkCVUJC5GphsrDS5P5/bmRS3iTNdxiXAzdwOIQqJpEO54oN+3CHZuZuUOgCcWTI3uxWq/gBDJrBTsv8EUqtNQJve0qwIh2PUuJl5DIqF0CvswN649gywc=","instances":"AAAAAAAAAAAAAAAAAAAAAAAAAAAApvhdIlw19IwSvukAAAAAAAAAAAAAAAAAAAAAAAAAAAAl72fyrHk3TaguHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAALh9HvEG69AvDlAAAAAAAAAAAAAAAAAAAAAAAAAAAAkGY9R6S+t36FIrAAAAAAAAAAAAAAAAAAAAAAAAAAAACoNqt7QwZoXUpj/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdaREhypq22OmnAAAAAAAAAAAAAAAAAAAAAAAAAAAAOXf2Vj0jGD1q4xQAAAAAAAAAAAAAAAAAAAAAAAAAAADZYAdKTg7m4hBHGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAll4nXKE0us1IMAAAAAAAAAAAAAAAAAAAAAAAAAAAAFfnJ8YXlwczTsyEAAAAAAAAAAAAAAAAAAAAAAAAAAAArXqULkWYvNST9PQAAAAAAAAAAAAAAAAAAAAAAAAAAAAArqteSdJMySnbMAC5TUWus+SXtvRWUNmCSMiMb4aZvb4hpJ5yXqjtih6gAIn9WQUOx/Z/rbbdComU0hCSwKwrewQgB3KolXKensAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADfAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL0="},"vk":"AhYAAAAABAAAAD2PumVP6pqldS0PKWW8Q4IvnE/rvtm5/2fXvG196sYhKtVFtg+WFGYJrU+eMUKZVjPurMpM8kbYiXvE18bnsU4Nu8s47Xabxy0EViND1dzsu5HicdAWl0xG5C+VpO2faJdK4nGwtD4WHtbdqWY72nSY5aKSDxAYO85vLy+9cJZlQsMNQlhTi/2q9PYQpC4D3Uf8E+yZ7gvLhd6cFdErlg4Oq/nthQkfxPAarVYLUFNGW80SgIloMDhutrky34D+Csw8T9j5UXpHz3K/2yuVSXK6OvMG4/058TXG09qKgXYP","git_version":"9f48bc4"}
|
||||
1
common/types/message/chunk-proof-sample.json
Normal file
1
common/types/message/chunk-proof-sample.json
Normal file
File diff suppressed because one or more lines are too long
@@ -1,20 +1,20 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
const (
|
||||
// StatusOk means generate proof success
|
||||
StatusOk RespStatus = iota
|
||||
// StatusProofError means generate proof failed
|
||||
StatusProofError
|
||||
EuclidFork = "euclid"
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
// ProofType represents the type of task.
|
||||
@@ -44,119 +44,263 @@ const (
|
||||
ProofTypeBundle
|
||||
)
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail.
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// it is a hex encoded big with fixed length on 48 bytes
|
||||
type Byte48 struct {
|
||||
hexutil.Big
|
||||
}
|
||||
|
||||
func (e Byte48) MarshalText() ([]byte, error) {
|
||||
i := e.ToInt()
|
||||
// overrite encode big
|
||||
if sign := i.Sign(); sign < 0 {
|
||||
// sanity check
|
||||
return nil, errors.New("Byte48 must be positive integer")
|
||||
} else {
|
||||
s := i.Text(16)
|
||||
if len(s) > 96 {
|
||||
return nil, errors.New("integer Exceed 384bit")
|
||||
}
|
||||
return []byte(fmt.Sprintf("0x%0*s", 96, s)), nil
|
||||
}
|
||||
}
|
||||
|
||||
func isString(input []byte) bool {
|
||||
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
|
||||
}
|
||||
|
||||
// hexutil.Big has limition of 256bit so we have to override it ...
|
||||
func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errors.New("not hex string")
|
||||
}
|
||||
|
||||
b, err := hexutil.Decode(string(input[1 : len(input)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) != 48 {
|
||||
return fmt.Errorf("not a 48 bytes hex string: %d", len(b))
|
||||
}
|
||||
var dec big.Int
|
||||
dec.SetBytes(b)
|
||||
*e = Byte48{(hexutil.Big)(dec)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*ChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
BatchProofs []*BatchProof `json:"batch_proofs"`
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
type ChunkInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
TxBytesHash common.Hash `json:"tx_data_digest"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
TxDataLength uint64 `json:"tx_data_length"`
|
||||
InitialBlockNumber uint64 `json:"initial_block_number"`
|
||||
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
|
||||
}
|
||||
|
||||
// BlockContextV2 is the block context for euclid v2
|
||||
type BlockContextV2 struct {
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
BaseFee hexutil.Big `json:"base_fee"`
|
||||
GasLimit uint64 `json:"gas_limit"`
|
||||
NumTxs uint16 `json:"num_txs"`
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
type OpenVMEvmProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
}
|
||||
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
func (p *OpenVMChunkProof) Proof() []byte {
|
||||
proofJson, err := json.Marshal(p.VmProof)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("marshaling error", err))
|
||||
}
|
||||
|
||||
return proofJson
|
||||
}
|
||||
|
||||
// OpenVMBatchInfo is for calculating pi_hash for batch header
|
||||
type OpenVMBatchInfo struct {
|
||||
ParentBatchHash common.Hash `json:"parent_batch_hash"`
|
||||
ParentStateRoot common.Hash `json:"parent_state_root"`
|
||||
StateRoot common.Hash `json:"state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type OpenVMBatchProof struct {
|
||||
MetaData struct {
|
||||
BatchInfo *OpenVMBatchInfo `json:"batch_info"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
func (p *OpenVMBatchProof) Proof() []byte {
|
||||
proofJson, err := json.Marshal(p.VmProof)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("marshaling error", err))
|
||||
}
|
||||
|
||||
return proofJson
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *OpenVMBatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
if ap.MetaData.BatchInfo == nil {
|
||||
return errors.New("batch info not ready")
|
||||
}
|
||||
|
||||
if ap.VmProof == nil {
|
||||
return errors.New("proof not ready")
|
||||
} else {
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
pf := ap.VmProof
|
||||
if pf.Proof == nil {
|
||||
return errors.New("proof data not ready")
|
||||
}
|
||||
if len(pf.PublicValues) == 0 {
|
||||
return errors.New("proof public value not ready")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenVMBundleInfo is for calculating pi_hash for bundle header
|
||||
type OpenVMBundleInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
NumBatches uint32 `json:"num_batches"`
|
||||
PrevBatchHash common.Hash `json:"prev_batch_hash"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
MsgQueueHash common.Hash `json:"msg_queue_hash"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type OpenVMBundleProof struct {
|
||||
MetaData struct {
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info"`
|
||||
BunndlePIHash common.Hash `json:"bundle_pi_hash"`
|
||||
} `json:"metadata"`
|
||||
|
||||
EvmProof *OpenVMEvmProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type BatchProof struct {
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type BundleProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
// Proof returns the proof bytes that are eventually passed as calldata for on-chain bundle proof verification.
|
||||
//
|
||||
// There are 12 accumulators for a SNARK proof. The accumulators are the first 12 elements of the EvmProof's
|
||||
// Instances field. The remaining items in Instances are supplied on-chain by the ScrollChain contract.
|
||||
//
|
||||
// The structure of these bytes is:
|
||||
// | byte index start | byte length | value | description |
|
||||
// |------------------|----------------|----------|---------------------|
|
||||
// | 0 | 32 | accs[0] | accumulator 1 |
|
||||
// | 32 | 32 | accs[1] | accumulator 2 |
|
||||
// | 32*i ... | 32 | accs[i] | accumulator i ... |
|
||||
// | 352 | 32 | accs[11] | accumulator 12 |
|
||||
// | 384 | dynamic | proof | proof bytes |
|
||||
func (p *OpenVMBundleProof) Proof() []byte {
|
||||
proofBytes := make([]byte, 0, 384+len(p.EvmProof.Proof))
|
||||
proofBytes = append(proofBytes, p.EvmProof.Instances[:384]...)
|
||||
return append(proofBytes, p.EvmProof.Proof...)
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BundleProof is in a legal format
|
||||
func (ap *BundleProof) SanityCheck() error {
|
||||
func (ap *OpenVMBundleProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
if ap.MetaData.BundleInfo == nil {
|
||||
return errors.New("bundle info not ready")
|
||||
}
|
||||
|
||||
if ap.EvmProof == nil {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
} else {
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
pf := ap.EvmProof
|
||||
if len(pf.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(pf.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
if len(pf.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
22
common/types/message/message_test.go
Normal file
22
common/types/message/message_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytes48(t *testing.T) {
|
||||
ti := &Byte48{}
|
||||
ti.UnmarshalText([]byte("0x1"))
|
||||
if s, err := ti.MarshalText(); err == nil {
|
||||
if len(s) != 98 {
|
||||
panic(fmt.Sprintf("wrong str: %s", s))
|
||||
}
|
||||
}
|
||||
ti.UnmarshalText([]byte("0x0"))
|
||||
if s, err := ti.MarshalText(); err == nil {
|
||||
if len(s) != 98 {
|
||||
panic(fmt.Sprintf("wrong str: %s", s))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,11 @@ var (
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
&MinCodecVersionFlag,
|
||||
}
|
||||
// ProposerToolFlags contains flags only used in proposer tool
|
||||
ProposerToolFlags = []cli.Flag{
|
||||
&StartL2BlockFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
@@ -72,12 +76,6 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
// ServicePortFlag is the port the service will listen on
|
||||
ServicePortFlag = cli.IntFlag{
|
||||
Name: "service.port",
|
||||
@@ -90,4 +88,16 @@ var (
|
||||
Usage: "Genesis file of the network",
|
||||
Value: "./conf/genesis.json",
|
||||
}
|
||||
// MinCodecVersionFlag defines the minimum codec version required for the chunk/batch/bundle proposers
|
||||
MinCodecVersionFlag = cli.UintFlag{
|
||||
Name: "min-codec-version",
|
||||
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
|
||||
Required: true,
|
||||
}
|
||||
// StartL2BlockFlag indicates the start L2 block number for proposer tool
|
||||
StartL2BlockFlag = cli.Uint64Flag{
|
||||
Name: "start-l2-block",
|
||||
Usage: "Start L2 block number for proposer tool",
|
||||
Value: 0,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
if strings.HasPrefix(proverVersion, "sdk") {
|
||||
return CheckProverSDKVersion(proverVersion)
|
||||
}
|
||||
|
||||
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
@@ -23,8 +27,18 @@ func CheckScrollProverVersion(proverVersion string) bool {
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
|
||||
// CheckProverSDKVersion check prover sdk version, it simply returns true for now,
|
||||
// and more checks will be added as we evolve.
|
||||
func CheckProverSDKVersion(proverVersion string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// CheckScrollRepoVersion checks if the proverVersion is at least the minimum required version.
|
||||
func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
if strings.HasPrefix(proverVersion, "sdk") {
|
||||
return CheckProverSDKWithMinVersion(proverVersion, minVersion)
|
||||
}
|
||||
|
||||
c, err := semver.NewConstraint(">= " + minVersion + "-0")
|
||||
if err != nil {
|
||||
log.Error("failed to initialize constraint", "minVersion", minVersion, "error", err)
|
||||
@@ -39,3 +53,9 @@ func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
|
||||
return c.Check(v)
|
||||
}
|
||||
|
||||
// CheckProverSDKWithMinVersion check prover sdk version is at least the minimum required version, it simply returns true for now,
|
||||
// and more checks will be added as we evolve.
|
||||
func CheckProverSDKWithMinVersion(proverVersion string, minVersion string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.66"
|
||||
var tag = "v4.5.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -90,18 +90,10 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwinV2",
|
||||
MinProverVersion: "v4.3.0",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
@@ -62,9 +62,9 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
log.Error("failed to unmarshal batch proof")
|
||||
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user