Compare commits

..

5 Commits

Author SHA1 Message Date
zimpha
af3913d607 Merge remote-tracking branch 'origin/develop' into sepolia 2024-06-14 00:31:41 +08:00
zimpha
c586d2f223 Merge branch 'develop' into sepolia 2024-05-29 11:39:09 +08:00
zimpha
0039ef7d5a Merge branch 'develop' into sepolia 2024-05-29 11:18:26 +08:00
zimpha
14730d46a3 merge with develop branch 2024-05-17 11:48:15 +08:00
zimpha
1cfecad426 add finalizeBatch and finalizeBatch4844 2024-04-12 13:04:11 +08:00
662 changed files with 75312 additions and 36993 deletions

View File

@@ -1,16 +0,0 @@
.github
.gitignore
.dockerignore
Dockerfile
Dockerfile.backup
.output
docs
openvm-clippy
target

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-12-06
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
@@ -42,10 +42,6 @@ jobs:
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Lint
working-directory: 'common'
run: |

138
.github/workflows/contracts.yml vendored Normal file
View File

@@ -0,0 +1,138 @@
name: Contracts
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'contracts/**'
- '.github/workflows/contracts.yaml'
defaults:
run:
working-directory: 'contracts'
jobs:
foundry:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Setup LCOV
uses: hrishikesh-kadam/setup-lcov@v1
- name: Install Node.js 18
uses: actions/setup-node@v2
with:
node-version: '18'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn dependencies
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('contracts/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Cache node_modules
id: npm_cache
uses: actions/cache@v2
with:
path: node_modules
key: node_modules-${{ hashFiles('contracts/yarn.lock') }}
- name: yarn install
# if: steps.npm_cache.outputs.cache-hit != 'true'
run: yarn install
- name: Compile with foundry
run: forge build --evm-version cancun
- name: Run foundry tests
run: forge test --evm-version cancun -vvv
- name: Run foundry coverage
run : forge coverage --evm-version cancun --report lcov
- name : Prune coverage
run : lcov --rc branch_coverage=1 --remove ./lcov.info -o ./lcov.info.pruned 'src/mocks/*' 'src/test/*' 'scripts/*' 'node_modules/*' 'lib/*'
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
files: contracts/lcov.info.pruned
flags: contracts
hardhat:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install Node.js 18
uses: actions/setup-node@v2
with:
node-version: '18'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Cache yarn dependencies
uses: actions/cache@v2
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('contracts/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Cache node_modules
id: npm_cache
uses: actions/cache@v2
with:
path: node_modules
key: node_modules-${{ hashFiles('contracts/yarn.lock') }}
- name: yarn install
# if: steps.npm_cache.outputs.cache-hit != 'true'
run: yarn install
- name: Compile with hardhat
run: npx hardhat compile
- name: Run hardhat tests
run: npx hardhat test

View File

@@ -1,41 +0,0 @@
name: Docker-coordinator-api-arm64
on:
workflow_dispatch:
inputs:
tag:
description: "tag of this image (suffix -arm64 is added automatically)"
required: true
type: string
jobs:
build-and-push-arm64-image:
runs-on: ubuntu-latest
strategy:
matrix:
arch:
- aarch64
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
docker buildx create --name multiarch --driver docker-container --use
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build docker image
uses: docker/build-push-action@v2
with:
platforms: linux/arm64
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: scrolltech/coordinator-api:${{inputs.tag}}-arm64

View File

@@ -9,6 +9,51 @@ env:
AWS_REGION: us-west-2
jobs:
event_watcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: event-watcher
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: event-watcher
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
gas_oracle:
runs-on: ubuntu-latest
steps:
@@ -49,8 +94,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -94,8 +139,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -139,8 +184,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -184,8 +229,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -229,8 +274,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -274,8 +319,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -307,13 +352,6 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Run custom script
run: |
./build/dockerfiles/coordinator-api/init-openvm.sh
- name: Build and push
uses: docker/build-push-action@v3
env:
@@ -323,10 +361,11 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -370,7 +409,7 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest

View File

@@ -9,13 +9,9 @@ on:
type: choice
options:
- "1.20"
- "1.20.14"
- "1.21"
- "1.21.13"
- "1.22"
- "1.22.12"
- "1.23"
- "1.23.7"
default: "1.21"
RUST_VERSION:
description: "Rust toolchain version"
@@ -24,7 +20,6 @@ on:
options:
- nightly-2023-12-03
- nightly-2022-12-10
- 1.86.0
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
@@ -48,7 +43,6 @@ on:
type: choice
options:
- 0.1.41
- 0.1.71
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true

99
.github/workflows/prover.yml vendored Normal file
View File

@@ -0,0 +1,99 @@
name: Prover
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'prover'
jobs:
skip_check:
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
cancel_others: 'true'
concurrent_skipping: 'same_content_newer'
paths_ignore: '["**/README.md"]'
fmt:
needs: [skip_check]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: rustfmt
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Cargo check
run: cargo check --all-features
- name: Cargo fmt
run: cargo fmt --all -- --check
clippy:
needs: [skip_check, fmt]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: clippy
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Run clippy
run: cargo clippy --all-features --all-targets -- -D warnings
compile:
needs: [skip_check, clippy]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Test
run: |
make prover

15
.gitmodules vendored
View File

@@ -1,3 +1,12 @@
[submodule "scroll-contracts"]
path = scroll-contracts
url = https://github.com/scroll-tech/scroll-contracts.git
[submodule "l2geth"]
path = l2geth
url = git@github.com:scroll-tech/go-ethereum.git
[submodule "contracts/lib/ds-test"]
path = contracts/lib/ds-test
url = https://github.com/dapphub/ds-test
[submodule "contracts/lib/forge-std"]
path = contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "contracts/lib/solmate"]
path = contracts/lib/solmate
url = https://github.com/rari-capital/solmate

View File

@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs

9675
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,93 +0,0 @@
[workspace]
members = [
"common/types-rs",
"common/types-rs/base",
"common/types-rs/aggregation",
"common/types-rs/chunk",
"common/types-rs/batch",
"common/types-rs/bundle",
"common/libzkp/impl",
"zkvm-prover/prover",
"zkvm-prover/verifier",
"zkvm-prover/integration",
"zkvm-prover/bin",
]
exclude = [
"prover"
]
resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", package = "scroll-zkvm-prover"}
openvm = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-build = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-custom-insn = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-rv32im-guest = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-native-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-native-compiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-native-recursion = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-native-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-continuations = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
openvm-sdk = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-stark-sdk = { git = "https://github.com/openvm-org/stark-backend.git", tag = "v1.0.1" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
sbv-kv = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
sbv-trie = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
alloy = { version = "0.11", default-features = false }
alloy-primitives = { version = "0.8", default-features = false }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "0.8", default-features = false }
rkyv = "0.8"
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_with = "3.11.0"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
bincode_v1 = { version = "1.3", package = "bincode"}
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
"loader_halo2",
"halo2-axiom",
"display",
] }
once_cell = "1.20"
base64 = "0.22"
#TODO: upgrade
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
scroll-zkvm-circuit-input-types = { path = "common/types-rs"}
scroll-zkvm-verifier = { path = "zkvm-prover/verifier"}
scroll-zkvm-prover = { path = "zkvm-prover/prover"}
[patch.crates-io]
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1

View File

@@ -1,47 +1,51 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.8.23
L2GETH_TAG=scroll-v5.3.0
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
update: ## Update dependencies
update:
go work sync
cd $(PWD)/bridge-history-api/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
lint: ## The code's format and security checks
lint: ## The code's format and security checks.
make -C rollup lint
make -C common lint
make -C coordinator lint
make -C database lint
make -C prover lint
make -C bridge-history-api lint
fmt: ## Format the code
fmt: ## format the code
go work sync
cd $(PWD)/bridge-history-api/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/prover/ && go mod tidy
cd $(PWD)/rollup/ && go mod tidy
cd $(PWD)/tests/integration-test/ && go mod tidy
goimports -local scroll-tech/bridge-history-api/ -w .
goimports -local scroll-tech/common/ -w .
goimports -local scroll-tech/coordinator/ -w .
goimports -local scroll-tech/database/ -w .
goimports -local scroll-tech/rollup/ -w .
goimports -local scroll-tech/tests/integration-test/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/prover/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w .
dev_docker: ## Build docker images for development/testing usages
dev_docker: ## build docker images for development/testing usages
docker pull postgres
docker build -t scroll_l1geth --platform linux/amd64 ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth --platform linux/amd64 ./common/testcontainers/docker/l2geth/
docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -1,6 +1,7 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
@@ -16,9 +17,10 @@
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="https://github.com/scroll-tech/scroll-contracts.git">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts.
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests
</pre>
@@ -50,6 +52,12 @@ go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
See [`contracts`](/contracts) for more details on the contracts.
## License
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.

View File

@@ -37,6 +37,6 @@ reset-env:
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
bridgehistoryapi-docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +0,0 @@
package backendabi
import (
"testing"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestEventSignatures(t *testing.T) {
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), L1RevertBatchV0EventSig)
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,uint256)")), L1RevertBatchV7EventSig)
}

View File

@@ -96,7 +96,7 @@ func action(ctx *cli.Context) error {
return nil
}
// Run bridge-history-backend api cmd instance.
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)

View File

@@ -68,10 +68,7 @@ func action(ctx *cli.Context) error {
observability.Server(ctx, db)
l1MessageFetcher, err := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
if err != nil {
log.Crit("failed to create L1MessageFetcher", "err", err)
}
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
go l1MessageFetcher.Start()
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
@@ -87,7 +84,7 @@ func action(ctx *cli.Context) error {
return nil
}
// Run bridge-history-backend fetcher cmd instance.
// Run event watcher cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)

View File

@@ -19,11 +19,7 @@
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"MessageQueueV2Addr": "0x0000000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"BlobScanAPIEndpoint": "https://api.blobscan.com/blobs/"
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"L2": {
"confirmation": 0,
@@ -43,7 +39,7 @@
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0xa1a12158bE6269D7580C63eC5E609Cdc0ddD82bC"
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -1,8 +1,6 @@
module scroll-tech/bridge-history-api
go 1.22
toolchain go1.22.2
go 1.21
require (
github.com/gin-contrib/cors v1.5.0
@@ -10,43 +8,41 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
golang.org/x/sync v0.6.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.10.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.13.0 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
@@ -59,16 +55,17 @@ require (
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
@@ -93,28 +90,29 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -11,11 +11,9 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -25,8 +23,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -55,20 +53,21 @@ github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@@ -88,8 +87,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -110,8 +109,9 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
@@ -171,13 +171,13 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
@@ -203,8 +203,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -303,18 +303,16 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
@@ -325,8 +323,6 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
@@ -343,14 +339,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
@@ -373,8 +369,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dz
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
@@ -387,21 +383,21 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -418,19 +414,20 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=

View File

@@ -6,7 +6,6 @@ import (
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
@@ -30,14 +29,7 @@ type FetcherConfig struct {
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
MessageQueueV2Addr string `json:"MessageQueueV2Addr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
}
// RedisConfig redis config
@@ -72,11 +64,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_BRIDGE_HISTORY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -2,7 +2,6 @@ package fetcher
import (
"context"
"fmt"
"math/big"
"time"
@@ -11,7 +10,6 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
@@ -37,32 +35,13 @@ type L1MessageFetcher struct {
}
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
blobClient := blob_client.NewBlobClients()
if cfg.BeaconNodeAPIEndpoint != "" {
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
if err != nil {
log.Warn("failed to create BeaconNodeClient", "err", err)
} else {
blobClient.AddBlobClient(beaconNodeClient)
}
}
if cfg.BlobScanAPIEndpoint != "" {
blobClient.AddBlobClient(blob_client.NewBlobScanClient(cfg.BlobScanAPIEndpoint))
}
if cfg.BlockNativeAPIEndpoint != "" {
blobClient.AddBlobClient(blob_client.NewBlockNativeClient(cfg.BlockNativeAPIEndpoint))
}
if blobClient.Size() == 0 {
return nil, fmt.Errorf("no blob client is configured")
}
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
c := &L1MessageFetcher{
ctx: ctx,
cfg: cfg,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client, blobClient),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
@@ -79,7 +58,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
Help: "Latest blockchain height the L1 message fetcher has synced with.",
})
return c, nil
return c
}
// Start starts the L1 message fetching process.

View File

@@ -141,7 +141,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
if updateErr := c.eventUpdateLogic.UpdateL2WithdrawMessageProofs(c.ctx, c.l2SyncHeight); updateErr != nil {
if updateErr := c.eventUpdateLogic.UpdateL1BatchIndexAndStatus(c.ctx, c.l2SyncHeight); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}

View File

@@ -2,7 +2,7 @@ package logic
import (
"context"
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -125,11 +125,6 @@ func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult
}
func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, batchIndex, startBlock, endBlock uint64) error {
if startBlock > endBlock {
log.Warn("start block is greater than end block", "start", startBlock, "end", endBlock)
return nil
}
l2WithdrawMessages, err := b.crossMessageOrm.GetL2WithdrawalsByBlockRange(ctx, startBlock, endBlock)
if err != nil {
log.Error("failed to get L2 withdrawals by batch index", "batch index", batchIndex, "err", err)
@@ -152,8 +147,8 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
}
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce)
return errors.New("nonce mismatch")
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actuall next message nonce", l2WithdrawMessages[0].MessageNonce)
return fmt.Errorf("nonce mismatch")
}
messageHashes := make([]common.Hash, len(l2WithdrawMessages))
@@ -178,42 +173,24 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
return nil
}
// UpdateL2WithdrawMessageProofs updates L2 withdrawal message proofs.
func (b *EventUpdateLogic) UpdateL2WithdrawMessageProofs(ctx context.Context, height uint64) error {
lastUpdatedFinalizedBlockHeight, err := b.batchEventOrm.GetLastUpdatedFinalizedBlockHeight(ctx)
// UpdateL1BatchIndexAndStatus updates L1 finalized batch index and status
func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, height uint64) error {
finalizedBatches, err := b.batchEventOrm.GetFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get last updated finalized block height", "error", err)
return err
}
finalizedBatches, err := b.batchEventOrm.GetUnupdatedFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get unupdated finalized batches >= block height", "error", err)
log.Error("failed to get batches >= block height", "error", err)
return err
}
for _, finalizedBatch := range finalizedBatches {
log.Info("update finalized batch or bundle info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
// This method is compatible with both "finalize by batch" and "finalize by bundle" modes:
// - In "finalize by batch" mode, each batch emits a FinalizedBatch event.
// - In "finalize by bundle" mode, all batches in the bundle emit only one FinalizedBatch event, using the last batch's index and hash.
//
// The method updates two types of information in L2 withdrawal messages:
// 1. Withdraw proof generation:
// - finalize by batch: Generates proofs for each batch.
// - finalize by bundle: Generates proofs for the entire bundle at once.
// 2. Batch index updating:
// - finalize by batch: Updates the batch index for withdrawal messages in each processed batch.
// - finalize by bundle: Updates the batch index for all withdrawal messages in the bundle, using the index of the last batch in the bundle.
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, lastUpdatedFinalizedBlockHeight+1, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
log.Info("update finalized batch info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, finalizedBatch.StartBlockNumber, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
return updateErr
}
if dbErr := b.batchEventOrm.UpdateBatchEventStatus(ctx, finalizedBatch.BatchIndex); dbErr != nil {
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
return dbErr
}
lastUpdatedFinalizedBlockHeight = finalizedBatch.EndBlockNumber
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight.Set(float64(finalizedBatch.EndBlockNumber))
}
return nil

View File

@@ -407,7 +407,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return err
}
} else {
// The transactions are sorted, thus we set the score as their index.
// The transactions are sorted, thus we set the score as their indices.
for _, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {

View File

@@ -2,16 +2,13 @@ package logic
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
@@ -22,17 +19,15 @@ import (
// L1EventParser the l1 event parser
type L1EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
blobClient blob_client.BlobClient
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL1EventParser creates l1 event parser
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client, blobClient blob_client.BlobClient) *L1EventParser {
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
return &L1EventParser{
cfg: cfg,
client: client,
blobClient: blobClient,
cfg: cfg,
client: client,
}
}
@@ -173,14 +168,6 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L1DepositWrappedTokenSig:
event := backendabi.WrappedTokenMessageEvent{}
if err := utils.UnpackLog(backendabi.L1WrappedTokenGatewayABI, &event, "DepositWrappedToken", vlog); err != nil {
log.Error("Failed to unpack DepositWrappedToken event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
@@ -237,21 +224,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
}
// ParseL1BatchEventLogs parses L1 watched batch events.
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client, blockTimestampsMap map[uint64]uint64) ([]*orm.BatchEvent, error) {
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
// with one transaction carrying multiple blobs,
// each CommitBatch event corresponds to a blob containing block range data.
// To correctly process these events, we need to:
// 1. Parsing the associated blob data to extract the block range for each event
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
// - Derive the index of the current batch by the number of parent batch hashes tracked
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
// The index map serves this purpose with:
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
@@ -266,59 +239,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
return nil, err
}
if version >= 7 { // It's a batch with version >= 7.
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(version))
if err != nil {
return nil, fmt.Errorf("unsupported codec version: %v, err: %w", version, err)
}
// we append the batch hash to the slice for the current commit transaction after processing the batch.
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
currentIndex := len(txBlobIndexMap[vlog.TxHash])
if currentIndex >= len(commitTx.BlobHashes()) {
return nil, fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
vlog.TxHash.String(), len(commitTx.BlobHashes()), currentIndex, event.BatchIndex.Uint64())
}
blobVersionedHash := commitTx.BlobHashes()[currentIndex]
// validate the batch hash
var parentBatchHash common.Hash
if currentIndex == 0 {
parentBatchHash, err = utils.GetParentBatchHashFromCalldata(commitTx.Data())
if err != nil {
return nil, fmt.Errorf("failed to get parent batch header from calldata, tx hash: %s, err: %w", vlog.TxHash.String(), err)
}
} else {
// here we need to subtract 1 from the current index to get the parent batch hash.
parentBatchHash = txBlobIndexMap[vlog.TxHash][currentIndex-1]
}
calculatedBatch, err := codec.NewDABatchFromParams(event.BatchIndex.Uint64(), blobVersionedHash, parentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex.Uint64(), err)
}
if calculatedBatch.Hash() != event.BatchHash {
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
}
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
if err != nil {
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
}
if len(blocks) == 0 {
return nil, fmt.Errorf("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex)
}
startBlock = blocks[0].Number()
endBlock = blocks[len(blocks)-1].Number()
txBlobIndexMap[vlog.TxHash] = append(txBlobIndexMap[vlog.TxHash], event.BatchHash)
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
@@ -327,8 +252,8 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
EndBlockNumber: endBlock,
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchV0EventSig:
event := backendabi.L1RevertBatchV0Event{}
case backendabi.L1RevertBatchEventSig:
event := backendabi.L1RevertBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
return nil, err
@@ -339,19 +264,6 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchV7EventSig:
event := backendabi.L1RevertBatchV7Event{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch0", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
for i := event.StartBatchIndex.Uint64(); i <= event.FinishBatchIndex.Uint64(); i++ {
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchIndex: i,
L1BlockNumber: vlog.BlockNumber,
})
}
case backendabi.L1FinalizeBatchEventSig:
event := backendabi.L1FinalizeBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
@@ -408,16 +320,6 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
QueueIndex: index,
})
}
case backendabi.L1ResetDequeuedTransactionEventSig:
event := backendabi.L1ResetDequeuedTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "ResetDequeuedTransaction", vlog); err != nil {
log.Error("Failed to unpack ResetDequeuedTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeResetDequeuedTransaction,
QueueIndex: event.StartIndex.Uint64(),
})
case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
@@ -469,27 +371,3 @@ func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMe
}
return sender.String(), nil
}
func (e *L1EventParser) getBatchBlockRangeFromBlob(ctx context.Context, codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
blob, err := e.blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobVersionedHash, l1BlockTime)
if err != nil {
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
}
if blob == nil {
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
}
blobPayload, err := codec.DecodeBlob(blob)
if err != nil {
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
}
blocks := blobPayload.Blocks()
if len(blocks) == 0 {
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
}
log.Debug("Successfully processed blob", "blobVersionedHash", blobVersionedHash.Hex(), "blocksCount", len(blocks))
return blocks, nil
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
@@ -50,10 +49,13 @@ type L1FetcherLogic struct {
}
// NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client, blobClient blob_client.BlobClient) *L1FetcherLogic {
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -67,8 +69,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
}
gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -100,30 +105,6 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.ETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.ETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.ETHGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
if common.HexToAddress(cfg.GasTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.GasTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.GasTokenGatewayAddr))
}
if common.HexToAddress(cfg.WrappedTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
}
if common.HexToAddress(cfg.MessageQueueV2Addr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.MessageQueueV2Addr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{
@@ -134,7 +115,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL1EventParser(cfg, client, blobClient),
parser: NewL1EventParser(cfg, client),
}
reg := prometheus.DefaultRegisterer
@@ -173,10 +154,14 @@ func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
return false, 0, lastBlockHash, blocks, nil
}
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) ([]*orm.CrossMessage, error) {
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
var l1RevertedTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
// Gateways: L1 deposit.
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
@@ -188,7 +173,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, receiptErr
return nil, nil, receiptErr
}
// Check if the transaction is failed
@@ -200,7 +185,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
sender, senderErr := signer.Sender(tx)
if senderErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
return nil, senderErr
return nil, nil, senderErr
}
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
@@ -214,7 +199,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
})
}
}
return l1RevertedTxs, nil
return blockTimestampsMap, l1RevertedTxs, nil
}
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
@@ -225,7 +210,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 17)
query.Topics[0] = make([]common.Hash, 14)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
@@ -234,15 +219,12 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
query.Topics[0][8] = backendabi.L1RevertBatchV0EventSig
query.Topics[0][9] = backendabi.L1RevertBatchV7EventSig
query.Topics[0][10] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][11] = backendabi.L1QueueTransactionEventSig
query.Topics[0][12] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][13] = backendabi.L1DropTransactionEventSig
query.Topics[0][14] = backendabi.L1ResetDequeuedTransactionEventSig
query.Topics[0][15] = backendabi.L1BridgeBatchDepositSig
query.Topics[0][16] = backendabi.L1DepositWrappedTokenSig
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
query.Topics[0][13] = backendabi.L1BridgeBatchDepositSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -266,18 +248,12 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return isReorg, reorgHeight, blockHash, nil, nil
}
l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
if err != nil {
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
// Map block number to block timestamp to avoid fetching block header multiple times to get block timestamp.
blockTimestampsMap := make(map[uint64]uint64)
for _, block := range blocks {
blockTimestampsMap[block.NumberU64()] = block.Time()
}
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
@@ -290,7 +266,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client, blockTimestampsMap)
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
if err != nil {
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
@@ -363,10 +339,6 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case btypes.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
// one ResetDequeuedTransaction event could indicate reset multiple skipped messages,
// this metric only counts the number of events, not the number of skipped messages.
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_reset_skipped_messages").Add(1)
}
}

View File

@@ -54,6 +54,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -67,6 +68,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -98,11 +100,6 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L2FetcherLogic{

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -46,7 +45,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
db = db.Model(&BatchEvent{})
db = db.Order("l1_block_number desc")
if err := db.First(&batch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
@@ -54,26 +53,8 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
return batch.L1BlockNumber, nil
}
// GetLastUpdatedFinalizedBlockHeight returns the last updated finalized block height in db.
func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (uint64, error) {
var batch BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
db = db.Order("batch_index desc")
if err := db.First(&batch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// No finalized batch found, return genesis batch's end block number.
return 0, nil
}
return 0, fmt.Errorf("failed to get last updated finalized block height, error: %w", err)
}
return batch.EndBlockNumber, nil
}
// GetUnupdatedFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
// GetFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
var batches []*BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
@@ -82,10 +63,10 @@ func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Conte
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
return nil, fmt.Errorf("failed to get batches >= block height, error: %w", err)
}
return batches, nil
}
@@ -111,13 +92,12 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["l1_block_number"] = l1BatchEvent.L1BlockNumber
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
case btypes.BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -85,7 +84,7 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
@@ -109,7 +108,7 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
@@ -128,10 +127,10 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return messages, nil
}
@@ -218,12 +217,6 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
db = db.Where("tx_status = ?", types.TxStatusTypeSkipped)
// reset skipped messages that the nonce is greater than or equal to the queue index.
db = db.Where("message_nonce >= ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSent
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
@@ -237,7 +230,7 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeDequeueTransaction, btypes.MessageQueueEventTypeResetDequeuedTransaction:
case btypes.MessageQueueEventTypeDequeueTransaction:
continue
case btypes.MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.

View File

@@ -70,7 +70,6 @@ const (
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
MessageQueueEventTypeResetDequeuedTransaction
)
// BatchStatusType represents the type of batch status.

View File

@@ -38,7 +38,7 @@ func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return errors.New("event signature mismatch")
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
@@ -66,68 +66,32 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
// GetBatchVersionAndBlockRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uint64, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return 0, 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
method := backendabi.IScrollChainABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
// special case: import genesis batch
method = backendabi.IScrollChainABI.Methods["importGenesisBatch"]
_, err2 := method.Inputs.Unpack(calldata[4:])
if err2 == nil {
// genesis batch
return 0, 0, nil
}
// none of "commitBatch" and "importGenesisBatch" match, give up
return 0, 0, err
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
var chunks [][]byte
var version uint8
if method.Name == "importGenesisBatch" {
return 0, 0, 0, nil
} else if method.Name == "commitBatch" {
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
var args commitBatchArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
version = args.Version
} else if method.Name == "commitBatchWithBlobProof" {
type commitBatchWithBlobProofArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
BlobDataProof []byte
}
var args commitBatchWithBlobProofArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
version = args.Version
} else if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
if len(values) < 3 {
return 0, 0, 0, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
}
var ok bool
version, ok = values[0].(uint8)
if !ok {
return 0, 0, 0, fmt.Errorf("invalid version type: %T", values[0])
}
return version, 0, 0, nil
return 0, 0, err
}
var startBlock uint64
@@ -136,48 +100,19 @@ func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uin
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(chunks) == 0 {
return 0, 0, 0, errors.New("invalid chunks")
if len(args.Chunks) == 0 {
return 0, 0, errors.New("invalid chunks")
}
chunk := chunks[0]
chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = chunks[len(chunks)-1]
chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return version, startBlock, finishBlock, err
}
// GetParentBatchHashFromCalldata gets the parent batch hash from calldata.
// It only supports commitBatches and commitAndFinalizeBatch, which only accept batches >= v7.
func GetParentBatchHashFromCalldata(txData []byte) (common.Hash, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return common.Hash{}, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
if len(values) < 3 {
return common.Hash{}, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
}
parentBatchHash, ok := values[1].([32]byte)
if !ok {
return common.Hash{}, fmt.Errorf("invalid parentBatchHash type: %T", values[1])
}
return common.BytesToHash(parentBatchHash[:]), nil
}
return common.Hash{}, fmt.Errorf("method %s does not support parent batch header", method.Name)
return startBlock, finishBlock, err
}
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +0,0 @@
[patch."https://github.com/scroll-tech/scroll.git"]
scroll-zkvm-circuit-input-types-base = { path = "../common/types-rs/base"}
scroll-zkvm-circuit-input-types-aggregation = { path = "../common/types-rs/aggregation"}
scroll-zkvm-circuit-input-types-chunk = { path = "../common/types-rs/chunk"}
scroll-zkvm-circuit-input-types-batch = { path = "../common/types-rs/batch"}
scroll-zkvm-circuit-input-types-bundle = { path = "../common/types-rs/bundle"}

View File

@@ -91,7 +91,7 @@ linters-settings:
#local-prefixes: github.com/org/project
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 40
min-complexity: 30
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
@@ -254,9 +254,6 @@ issues:
- linters:
- wsl
text: "expressions should not be cuddled with declarations or returns"
- linters:
- govet
text: 'shadow: declaration of "(err|ctx)" shadows declaration at'
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./
@@ -11,13 +11,11 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/api && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/bridgehistoryapi-api
cd /src/bridge-history-api/cmd/api && go build -v -p 4 -o /bin/bridgehistoryapi-api
# Pull bridgehistoryapi-api into a second stage deploy ubuntu container
FROM ubuntu:20.04
# Pull bridgehistoryapi-api into a second stage deploy alpine container
FROM alpine:latest
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
COPY --from=builder /bin/bridgehistoryapi-api /bin/
WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-api"]
ENTRYPOINT ["bridgehistoryapi-api"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./
@@ -10,11 +10,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-ldl"
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
WORKDIR /app
ENTRYPOINT ["db_cli"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./
@@ -11,14 +11,11 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/fetcher && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
cd /src/bridge-history-api/cmd/fetcher && go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
# Pull bridgehistoryapi-fetcher into a second stage deploy ubuntu container
FROM ubuntu:20.04
# Pull bridgehistoryapi-fetcher into a second stage deploy alpine container
FROM alpine:latest
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install ca-certificates vim netcat-openbsd net-tools curl -y
RUN update-ca-certificates
COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/
WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-fetcher"]
ENTRYPOINT ["bridgehistoryapi-fetcher"]

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -9,10 +9,6 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
@@ -27,6 +23,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -37,14 +34,12 @@ FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator_api /bin/

View File

@@ -1,24 +0,0 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }

View File

@@ -1,2 +0,0 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -uex
OPENVM_GPU_COMMIT=dfa10b4
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout openvm-gpu
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -15,12 +16,10 @@ RUN go mod download -x
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/coordinator/cmd/cron/ && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/coordinator_cron
cd /src/coordinator/cmd/cron/ && go build -v -p 4 -o /bin/coordinator_cron
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
# Pull coordinator into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/coordinator_cron /bin/
WORKDIR /app
ENTRYPOINT ["coordinator_cron"]
ENTRYPOINT ["coordinator_cron"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -16,11 +17,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-ldl"
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
WORKDIR /app
ENTRYPOINT ["db_cli"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -7,24 +7,23 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build proposer_tool
# Build event_watcher
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/proposer_tool/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/proposer_tool
cd /src/rollup/cmd/event_watcher/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/event_watcher
# Pull proposer_tool into a second stage deploy ubuntu container
# Pull event_watcher into a second stage deploy alpine container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/proposer_tool /bin/
COPY --from=builder /bin/event_watcher /bin/
WORKDIR /app
ENTRYPOINT ["proposer_tool"]
ENTRYPOINT ["event_watcher"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -18,13 +19,11 @@ RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy ubuntu container
# Pull gas_oracle into a second stage deploy alpine container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/gas_oracle /bin/
WORKDIR /app
ENTRYPOINT ["gas_oracle"]
ENTRYPOINT ["gas_oracle"]

View File

@@ -1,5 +1,5 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.22.12
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
@@ -36,7 +36,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.22.12
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
@@ -32,7 +32,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,23 +0,0 @@
FROM ubuntu:24.04 AS builder
RUN apt-get update -y && apt-get upgrade -y
# Install basic packages
RUN apt-get install build-essential curl wget git pkg-config -y
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
ENV CARGO_HOME=/root/.cargo
COPY . /src
RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -18,11 +19,9 @@ RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
# Pull rollup_relayer into a second stage deploy alpine container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/

89
common/forks/forks.go Normal file
View File

@@ -0,0 +1,89 @@
package forks
import (
"math"
"math/big"
"sort"
"github.com/scroll-tech/go-ethereum/params"
)
// CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) {
type nameFork struct {
name string
block *big.Int
}
forkHeightNameMap := make(map[uint64]string)
for _, fork := range []nameFork{
{name: "homestead", block: config.HomesteadBlock},
{name: "daoFork", block: config.DAOForkBlock},
{name: "eip150", block: config.EIP150Block},
{name: "eip155", block: config.EIP155Block},
{name: "eip158", block: config.EIP158Block},
{name: "byzantium", block: config.ByzantiumBlock},
{name: "constantinople", block: config.ConstantinopleBlock},
{name: "petersburg", block: config.PetersburgBlock},
{name: "istanbul", block: config.IstanbulBlock},
{name: "muirGlacier", block: config.MuirGlacierBlock},
{name: "berlin", block: config.BerlinBlock},
{name: "london", block: config.LondonBlock},
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
{name: "archimedes", block: config.ArchimedesBlock},
{name: "shanghai", block: config.ShanghaiBlock},
{name: "bernoulli", block: config.BernoulliBlock},
{name: "curie", block: config.CurieBlock},
} {
if fork.block == nil {
continue
}
height := fork.block.Uint64()
// only keep latest fork for at each height, discard the rest
forkHeightNameMap[height] = fork.name
}
forkHeightsMap := make(map[uint64]bool)
forkNameHeightMap := make(map[string]uint64)
for height, name := range forkHeightNameMap {
forkHeightsMap[height] = true
forkNameHeightMap[name] = height
}
var forkHeights []uint64
for height := range forkHeightsMap {
forkHeights = append(forkHeights, height)
}
sort.Slice(forkHeights, func(i, j int) bool {
return forkHeights[i] < forkHeights[j]
})
return forkHeights, forkHeightsMap, forkNameHeightMap
}
// BlocksUntilFork returns the number of blocks until the next fork
// returns 0 if there is no fork scheduled for the future
func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
for _, forkHeight := range forkHeights {
if forkHeight > blockHeight {
return forkHeight - blockHeight
}
}
return 0
}
// BlockRange returns the block range of the hard fork
// Need ensure the forkHeights is incremental
func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
to = math.MaxInt64
for _, height := range forkHeights {
if currentForkHeight < height {
to = height
return
}
from = height
}
return
}

142
common/forks/forks_test.go Normal file
View File

@@ -0,0 +1,142 @@
package forks
import (
"math"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/require"
)
func TestCollectSortedForkBlocks(t *testing.T) {
l, m, n := CollectSortedForkHeights(&params.ChainConfig{
ArchimedesBlock: big.NewInt(0),
ShanghaiBlock: big.NewInt(3),
BernoulliBlock: big.NewInt(3),
CurieBlock: big.NewInt(4),
})
require.Equal(t, l, []uint64{
0,
3,
4,
})
require.Equal(t, map[uint64]bool{
3: true,
4: true,
0: true,
}, m)
require.Equal(t, map[string]uint64{
"archimedes": 0,
"bernoulli": 3,
"curie": 4,
}, n)
}
func TestBlocksUntilFork(t *testing.T) {
tests := map[string]struct {
block uint64
forks []uint64
expected uint64
}{
"NoFork": {
block: 44,
forks: []uint64{},
expected: 0,
},
"BeforeFork": {
block: 0,
forks: []uint64{1, 5},
expected: 1,
},
"OnFork": {
block: 1,
forks: []uint64{1, 5},
expected: 4,
},
"OnLastFork": {
block: 5,
forks: []uint64{1, 5},
expected: 0,
},
"AfterFork": {
block: 5,
forks: []uint64{1, 5},
expected: 0,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, test.expected, BlocksUntilFork(test.block, test.forks))
})
}
}
func TestBlockRange(t *testing.T) {
tests := []struct {
name string
forkHeight uint64
forkHeights []uint64
expectedFrom uint64
expectedTo uint64
}{
{
name: "ToInfinite",
forkHeight: 300,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 300,
expectedTo: math.MaxInt64,
},
{
name: "To300",
forkHeight: 200,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 200,
expectedTo: 300,
},
{
name: "To200",
forkHeight: 100,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To100",
forkHeight: 0,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 0,
expectedTo: 100,
},
{
name: "To200-1",
forkHeight: 100,
forkHeights: []uint64{100, 200},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To2",
forkHeight: 1,
forkHeights: []uint64{1, 2},
expectedFrom: 1,
expectedTo: 2,
},
{
name: "ToInfinite-1",
forkHeight: 0,
forkHeights: []uint64{0},
expectedFrom: 0,
expectedTo: math.MaxInt64,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
from, to := BlockRange(test.forkHeight, test.forkHeights)
require.Equal(t, test.expectedFrom, from)
require.Equal(t, test.expectedTo, to)
})
}
}

View File

@@ -1,12 +1,10 @@
module scroll-tech/common
go 1.22
toolchain go1.22.2
go 1.21
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/bits-and-blooms/bitset v1.20.0
github.com/bits-and-blooms/bitset v1.12.0
github.com/docker/docker v26.1.0+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
@@ -15,8 +13,8 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/stretchr/testify v1.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0
@@ -32,7 +30,7 @@ require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.45 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect
@@ -55,18 +53,18 @@ require (
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/cloudflare/cfssl v1.6.5 // indirect
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
github.com/consensys/bavard v0.1.29 // indirect
github.com/consensys/gnark-crypto v0.16.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/containerd v1.7.12 // indirect
github.com/containerd/continuity v0.4.2 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
@@ -79,7 +77,7 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsevents v0.1.1 // indirect
@@ -119,9 +117,9 @@ require (
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/in-toto/in-toto-golang v0.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -136,12 +134,12 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
@@ -184,8 +182,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect
@@ -193,17 +190,16 @@ require (
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
@@ -214,7 +210,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
@@ -231,21 +227,19 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect

View File

@@ -27,11 +27,9 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -72,8 +70,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
@@ -121,10 +119,10 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
@@ -153,10 +151,10 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -165,8 +163,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@@ -214,8 +212,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
@@ -381,13 +379,13 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
@@ -436,8 +434,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -455,8 +453,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -471,9 +469,8 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -630,18 +627,16 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
@@ -663,8 +658,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
@@ -674,8 +667,8 @@ github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IEx
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@@ -705,10 +698,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
@@ -721,12 +714,10 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
@@ -763,9 +754,8 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ=
@@ -806,15 +796,11 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
@@ -829,8 +815,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
@@ -840,8 +826,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -860,8 +846,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
@@ -873,8 +859,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -914,23 +900,22 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -945,8 +930,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -6,8 +6,6 @@ export RUST_BACKTRACE=full
export RUST_LOG=debug
export RUST_MIN_STACK=100000000
export PROVER_OUTPUT_DIR=test_zkp_test
export SCROLL_PROVER_ASSETS_DIR=/assets/test_assets
export DARWIN_V2_TEST_DIR=/assets
#export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64
mkdir -p $PROVER_OUTPUT_DIR
@@ -15,16 +13,32 @@ mkdir -p $PROVER_OUTPUT_DIR
REPO=$(realpath ../..)
function build_test_bins() {
cd impl
cargo build --release
ln -f -s $(realpath target/release/libzkp.so) $REPO/prover/core/lib
ln -f -s $(realpath target/release/libzkp.so) $REPO/coordinator/internal/logic/verifier/lib
cd $REPO/prover
make tests_binary
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
}
function build_test_bins_old() {
cd $REPO
cd prover
make libzkp
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd ..
cd coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd ..
cd common/libzkp
}
build_test_bins
rm -rf $PROVER_OUTPUT_DIR/*
#rm -rf test_zkp_test/*
#rm -rf prover.log verifier.log
$REPO/prover/prover.test --exact zk_circuits_handler::darwin_v2::tests::test_circuits 2>&1 | tee prover.log
#$REPO/prover/core.test -test.v 2>&1 | tee prover.log
$REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log

File diff suppressed because it is too large Load Diff

View File

@@ -7,19 +7,34 @@ edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
scroll-zkvm-prover.workspace = true
scroll-zkvm-verifier.workspace = true
[patch.crates-io]
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
env_logger = "0.11.0"
[dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.1", default-features = false, features = ["parallel_syn", "scroll"] }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
base64.workspace = true
once_cell.workspace = true
serde.workspace = true
once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json.workspace = true
anyhow = "1"
serde_json = "1.0.66"
[profile.test]
opt-level = 3

View File

@@ -1 +1 @@
nightly-2024-12-06
nightly-2023-12-03

View File

@@ -0,0 +1,198 @@
use crate::{
types::{CheckChunkProofsResponse, ProofResult},
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
check_chunk_hashes,
consts::AGG_VK_FILENAME,
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &AGG_VK_FILENAME) {
panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_verify");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
let check_result: Result<bool, String> = panic_catch(|| {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
if chunk_proofs.is_empty() {
return Err("provided chunk proofs are empty.".to_string());
}
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
let valid = prover_ref.check_protocol_of_chunks(&chunk_proofs);
Ok(valid)
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match check_result {
Ok(valid) => CheckChunkProofsResponse {
ok: valid,
error: None,
},
Err(err) => CheckChunkProofsResponse {
ok: false,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes: *const c_char,
chunk_proofs: *const c_char,
) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_hashes = serde_json::from_slice::<Vec<ChunkInfo>>(&chunk_hashes)
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
if chunk_hashes.len() != chunk_proofs.len() {
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
chunk_hashes.len(), chunk_proofs.len()));
}
let chunk_hashes_proofs: Vec<(_,_)> = chunk_hashes
.into_iter()
.zip(chunk_proofs.clone())
.collect();
check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?;
let batch = BatchProvingTask {
chunk_proofs
};
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"bernoulli" => 2,
"curie" => 3,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as curie");
3
}
};
let verified = panic_catch(|| {
if fork_id == 2 {
// before upgrade#3(DA Compression)
verify_evm_calldata(
include_bytes!("plonk_verifier_0.10.3.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char
}
// This function is only used for debugging on Go side.
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn block_traces_to_chunk_info(block_traces: *const c_char) -> *const c_char {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
let witness_block = chunk_trace_to_witness_block(block_traces).unwrap();
let chunk_info = ChunkInfo::from_witness_block(&witness_block, false);
let chunk_info_bytes = serde_json::to_vec(&chunk_info).unwrap();
vec_to_c_char(chunk_info_bytes)
}

View File

@@ -0,0 +1,108 @@
use crate::{
types::ProofResult,
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
consts::CHUNK_VK_FILENAME,
utils::init_env_and_log,
zkevm::{Prover, Verifier},
BlockTrace, ChunkProof, ChunkProvingTask,
};
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &CHUNK_VK_FILENAME) {
panic!("{} must exist in folder {}", *CHUNK_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_verify");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
let chunk = ChunkProvingTask::from(block_traces);
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_chunk_proof(chunk, None, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
verified.unwrap_or(false) as c_char
}

View File

@@ -1,76 +1,4 @@
mod batch;
mod chunk;
mod types;
mod utils;
mod verifier;
use std::path::Path;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
_dump_vk(fork_name, file);
}
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Ok(verifier) = verifier {
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
}
}

Binary file not shown.

View File

@@ -0,0 +1,22 @@
use serde::{Deserialize, Serialize};
// Represents the result of a chunk proof checking operation.
// `ok` indicates whether the proof checking was successful.
// `error` provides additional details in case the check failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CheckChunkProofsResponse {
pub ok: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
// Encapsulates the result from generating a proof.
// `message` holds the generated proof in byte slice format.
// `error` provides additional details in case the proof generation failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ProofResult {
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<Vec<u8>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}

View File

@@ -1,9 +1,29 @@
use once_cell::sync::Lazy;
use std::{
ffi::CStr,
env,
ffi::{CStr, CString},
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
path::PathBuf,
};
// Only used for debugging.
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
/// # Safety
#[no_mangle]
pub extern "C" fn free_c_chars(ptr: *mut c_char) {
if ptr.is_null() {
log::warn!("Try to free an empty pointer!");
return;
}
unsafe {
let _ = CString::from_raw(ptr);
}
}
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
@@ -14,6 +34,21 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec()
}
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}
pub(crate) fn file_exists(dir: &str, filename: &str) -> bool {
let mut path = PathBuf::from(dir);
path.push(filename);
path.exists()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {

View File

@@ -1,88 +0,0 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
Chunk,
Batch,
Bundle,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VKDump {
pub chunk_vk: String,
pub batch_vk: String,
pub bundle_vk: String,
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
fn dump_vk(&self, file: &Path);
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub fork_name: String,
pub params_path: String,
pub assets_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub low_version_circuit: CircuitConfig,
pub high_version_circuit: CircuitConfig,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
}

View File

@@ -1,66 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
use base64::{prelude::BASE64_STANDARD, Engine};
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,66 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidV2Verifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV2,
}
impl EuclidV2Verifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidV2Verifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
use base64::{prelude::BASE64_STANDARD, Engine};
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,12 +1,15 @@
// BatchVerifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
void init(char* config);
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);
char verify_chunk_proof(char* proof, char* fork_name);
void dump_vk(char* fork_name, char* file);
char* block_traces_to_chunk_info(char* block_traces);
void free_c_chars(char* ptr);

View File

@@ -2,7 +2,6 @@ package testcontainers
import (
"context"
"errors"
"fmt"
"log"
"os"
@@ -21,10 +20,9 @@ import (
// TestcontainerApps testcontainers struct
type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
web3SignerContainer *testcontainers.DockerContainer
postgresContainer *postgres.PostgresContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
// common time stamp in nanoseconds.
Timestamp int
@@ -113,51 +111,10 @@ func (t *TestcontainerApps) StartPoSL1Container() error {
return nil
}
func (t *TestcontainerApps) StartWeb3SignerContainer(chainId int) error {
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
return nil
}
var (
err error
rootDir string
)
if rootDir, err = findProjectRootDir(); err != nil {
return fmt.Errorf("failed to find project root directory: %v", err)
}
// web3signerconf/keyconf.yaml may contain multiple keys configured and web3signer then choses one corresponding to from field of tx
web3SignerConfDir := filepath.Join(rootDir, "common", "testcontainers", "web3signerconf")
req := testcontainers.ContainerRequest{
Image: "consensys/web3signer:develop",
ExposedPorts: []string{"9000/tcp"},
Cmd: []string{"--key-config-path", "/web3signerconf/", "eth1", "--chain-id", fmt.Sprintf("%d", chainId)},
Files: []testcontainers.ContainerFile{
{
HostFilePath: web3SignerConfDir,
ContainerFilePath: "/",
FileMode: 0o777,
},
},
WaitingFor: wait.ForLog("ready to handle signing requests"),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start web3signer container: %s", err)
return err
}
t.web3SignerContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
if t.poSL1Container == nil {
return "", errors.New("PoS L1 container is not running")
return "", fmt.Errorf("PoS L1 container is not running")
}
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
if err != nil {
@@ -178,7 +135,7 @@ func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
// GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
return "", errors.New("postgres is not running")
return "", fmt.Errorf("postgres is not running")
}
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
}
@@ -186,7 +143,7 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
return "", errors.New("l2 geth is not running")
return "", fmt.Errorf("l2 geth is not running")
}
endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
@@ -195,14 +152,6 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
return endpoint, nil
}
// GetWeb3SignerEndpoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetWeb3SignerEndpoint() (string, error) {
if t.web3SignerContainer == nil || !t.web3SignerContainer.IsRunning() {
return "", errors.New("web3signer is not running")
}
return t.web3SignerContainer.PortEndpoint(context.Background(), "9000/tcp", "http")
}
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
endpoint, err := t.GetDBEndPoint()
@@ -251,11 +200,6 @@ func (t *TestcontainerApps) Free() {
t.poSL1Container = nil
}
}
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
if err := t.web3SignerContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop web3signer container: %s", err)
}
}
}
// findProjectRootDir find project root directory
@@ -273,7 +217,7 @@ func findProjectRootDir() (string, error) {
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", errors.New("go.work file not found in any parent directory")
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir

View File

@@ -44,11 +44,6 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
endpoint, err = testApps.GetWeb3SignerEndpoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
// test free testcontainers
testApps.Free()
endpoint, err = testApps.GetDBEndPoint()
@@ -62,8 +57,4 @@ func TestNewTestcontainerApps(t *testing.T) {
endpoint, err = testApps.GetPoSL1EndPoint()
assert.EqualError(t, err, "PoS L1 container is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetWeb3SignerEndpoint()
assert.EqualError(t, err, "web3signer is not running")
assert.Empty(t, endpoint)
}

View File

@@ -1,7 +0,0 @@
type: "file-raw"
keyType: "SECP256K1"
privateKey: "0x1313131313131313131313131313131313131313131313131313131313131313"
---
type: "file-raw"
keyType: "SECP256K1"
privateKey: "0x1212121212121212121212121212121212121212121212121212121212121212"

View File

@@ -1,17 +0,0 @@
[package]
name = "scroll-zkvm-circuit-input-types"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
readme.workspace = true
repository.workspace = true
version = "0.2.0"
[dependencies]
types-base = { path = "base", package = "scroll-zkvm-circuit-input-types-base"}
types-agg = { path = "aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
types-chunk = { path = "chunk", package = "scroll-zkvm-circuit-input-types-chunk"}
types-batch = { path = "batch", package = "scroll-zkvm-circuit-input-types-batch"}
types-bundle = { path = "bundle", package = "scroll-zkvm-circuit-input-types-bundle"}

View File

@@ -1,24 +0,0 @@
# Input Types for circuits
A series of separated crates for the input types accepted by circuits as input.
This crate help decoupling circuits with other crates and keep their dependencies neat and controllable. Avoiding to involve crates which is not compatible with the tootlchain of openvm from indirect dependency.
### Code structure
```
types-rs
├── base
├── circuit
├── aggregation
<following are layer-oriented crates>
├── chunk
├── batch
└── bundle
```

View File

@@ -1,14 +0,0 @@
[package]
name = "scroll-zkvm-circuit-input-types-aggregation"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
readme.workspace = true
repository.workspace = true
version = "0.2.0"
[dependencies]
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
rkyv.workspace = true
serde.workspace = true

View File

@@ -1,81 +0,0 @@
/// Represents an openvm program commitments and public values.
#[derive(
Clone,
Debug,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct AggregationInput {
/// Public values.
pub public_values: Vec<u32>,
/// Represent the commitment needed to verify a root proof
pub commitment: ProgramCommitment,
}
/// Represent the commitment needed to verify a [`RootProof`].
#[derive(
Clone,
Debug,
Default,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct ProgramCommitment {
/// The commitment to the child program exe.
pub exe: [u32; 8],
/// The commitment to the child program leaf.
pub leaf: [u32; 8],
}
impl ProgramCommitment {
pub fn deserialize(commitment_bytes: &[u8]) -> Self {
// TODO: temporary skip deserialize if no vk is provided
if commitment_bytes.is_empty() {
return Default::default();
}
let archived_data =
rkyv::access::<ArchivedProgramCommitment, rkyv::rancor::BoxedError>(commitment_bytes)
.unwrap();
Self {
exe: archived_data.exe.map(|u32_le| u32_le.to_native()),
leaf: archived_data.leaf.map(|u32_le| u32_le.to_native()),
}
}
pub fn serialize(&self) -> Vec<u8> {
rkyv::to_bytes::<rkyv::rancor::BoxedError>(self)
.map(|v| v.to_vec())
.unwrap()
}
}
impl From<&ArchivedProgramCommitment> for ProgramCommitment {
fn from(archived: &ArchivedProgramCommitment) -> Self {
Self {
exe: archived.exe.map(|u32_le| u32_le.to_native()),
leaf: archived.leaf.map(|u32_le| u32_le.to_native()),
}
}
}
/// Number of public-input values, i.e. [u32; N].
///
/// Note that the actual value for each u32 is a byte.
pub const NUM_PUBLIC_VALUES: usize = 32;
/// Witness for an [`AggregationCircuit`][AggCircuit] that also carries proofs that are being
/// aggregated.
pub trait ProofCarryingWitness {
/// Get the root proofs from the witness.
fn get_proofs(&self) -> Vec<AggregationInput>;
}

View File

@@ -1,21 +0,0 @@
[package]
name = "scroll-zkvm-circuit-input-types-base"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
readme.workspace = true
repository.workspace = true
version = "0.2.0"
[dependencies]
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
alloy-serde.workspace = true
rkyv.workspace = true
serde.workspace = true
itertools.workspace = true
tiny-keccak = { workspace = true }
sha3 = "0.10.8"
sha2 = "0.10.8"
[features]
default = []

View File

@@ -1,2 +0,0 @@
pub mod public_inputs;
pub mod utils;

View File

@@ -1,81 +0,0 @@
use alloy_primitives::B256;
pub mod batch;
pub mod bundle;
pub mod chunk;
/// Defines behaviour to be implemented by types representing the public-input values of a circuit.
pub trait PublicInputs {
/// Keccak-256 digest of the public inputs. The public-input hash are revealed as public values
/// via [`openvm::io::reveal`].
fn pi_hash(&self) -> B256;
/// Validation logic between public inputs of two contiguous instances.
fn validate(&self, prev_pi: &Self);
}
#[derive(
Default,
Debug,
Copy,
Clone,
PartialEq,
Eq,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub enum ForkName {
#[default]
EuclidV1,
EuclidV2,
}
impl From<&ArchivedForkName> for ForkName {
fn from(archived: &ArchivedForkName) -> Self {
match archived {
ArchivedForkName::EuclidV1 => ForkName::EuclidV1,
ArchivedForkName::EuclidV2 => ForkName::EuclidV2,
}
}
}
impl From<Option<&str>> for ForkName {
fn from(value: Option<&str>) -> Self {
match value {
None => Default::default(),
Some("euclidv1") => ForkName::EuclidV1,
Some("euclidv2") => ForkName::EuclidV2,
Some(s) => unreachable!("hardfork not accepted: {s}"),
}
}
}
impl From<&str> for ForkName {
fn from(value: &str) -> Self {
match value {
"euclidv1" => ForkName::EuclidV1,
"euclidv2" => ForkName::EuclidV2,
s => unreachable!("hardfork not accepted: {s}"),
}
}
}
/// helper trait to extend PublicInputs
pub trait MultiVersionPublicInputs {
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256;
fn validate(&self, prev_pi: &Self, fork_name: ForkName);
}
impl<T: MultiVersionPublicInputs> PublicInputs for (T, ForkName) {
fn pi_hash(&self) -> B256 {
self.0.pi_hash_by_fork(self.1)
}
fn validate(&self, prev_pi: &Self) {
assert_eq!(self.1, prev_pi.1);
self.0.validate(&prev_pi.0, self.1)
}
}

View File

@@ -1,144 +0,0 @@
use alloy_primitives::B256;
use crate::{
public_inputs::{ForkName, MultiVersionPublicInputs},
utils::keccak256,
};
/// Represents public-input values for a batch.
#[derive(
Clone,
Debug,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct BatchInfo {
/// The state root before applying the batch.
#[rkyv()]
pub parent_state_root: B256,
/// The batch hash of the parent batch.
#[rkyv()]
pub parent_batch_hash: B256,
/// The state root after applying txs in the batch.
#[rkyv()]
pub state_root: B256,
/// The batch header hash of the batch.
#[rkyv()]
pub batch_hash: B256,
/// The EIP-155 chain ID of all txs in the batch.
#[rkyv()]
pub chain_id: u64,
/// The withdraw root of the last block in the last chunk in the batch.
#[rkyv()]
pub withdraw_root: B256,
/// The L1 msg queue hash at the end of the previous batch.
#[rkyv()]
pub prev_msg_queue_hash: B256,
/// The L1 msg queue hash at the end of the current batch.
#[rkyv()]
pub post_msg_queue_hash: B256,
}
impl From<&ArchivedBatchInfo> for BatchInfo {
fn from(archived: &ArchivedBatchInfo) -> Self {
Self {
parent_state_root: archived.parent_state_root.into(),
parent_batch_hash: archived.parent_batch_hash.into(),
state_root: archived.state_root.into(),
batch_hash: archived.batch_hash.into(),
chain_id: archived.chain_id.into(),
withdraw_root: archived.withdraw_root.into(),
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
}
}
}
impl BatchInfo {
/// Public input hash for a batch (euclidv1 or da-codec@v6) is defined as
///
/// keccak(
/// parent state root ||
/// parent batch hash ||
/// state root ||
/// batch hash ||
/// chain id ||
/// withdraw root ||
/// )
fn pi_hash_euclidv1(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(self.parent_state_root.as_slice())
.chain(self.parent_batch_hash.as_slice())
.chain(self.state_root.as_slice())
.chain(self.batch_hash.as_slice())
.chain(self.chain_id.to_be_bytes().as_slice())
.chain(self.withdraw_root.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
/// Public input hash for a batch (euclidv2 or da-codec@v7) is defined as
///
/// keccak(
/// parent state root ||
/// parent batch hash ||
/// state root ||
/// batch hash ||
/// chain id ||
/// withdraw root ||
/// prev msg queue hash ||
/// post msg queue hash
/// )
fn pi_hash_euclidv2(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(self.parent_state_root.as_slice())
.chain(self.parent_batch_hash.as_slice())
.chain(self.state_root.as_slice())
.chain(self.batch_hash.as_slice())
.chain(self.chain_id.to_be_bytes().as_slice())
.chain(self.withdraw_root.as_slice())
.chain(self.prev_msg_queue_hash.as_slice())
.chain(self.post_msg_queue_hash.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
}
pub type VersionedBatchInfo = (BatchInfo, ForkName);
impl MultiVersionPublicInputs for BatchInfo {
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
match fork_name {
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
}
}
/// Validate public inputs between 2 contiguous batches.
///
/// - chain id MUST match
/// - state roots MUST be chained
/// - batch hashes MUST be chained
/// - L1 msg queue hashes MUST be chained
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
assert_eq!(self.chain_id, prev_pi.chain_id);
assert_eq!(self.parent_state_root, prev_pi.state_root);
assert_eq!(self.parent_batch_hash, prev_pi.batch_hash);
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
if fork_name == ForkName::EuclidV1 {
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
}
}
}

View File

@@ -1,149 +0,0 @@
use alloy_primitives::B256;
use crate::{
public_inputs::{ForkName, MultiVersionPublicInputs, PublicInputs},
utils::keccak256,
};
/// Represents fields required to compute the public-inputs digest of a bundle.
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct BundleInfo {
/// The EIP-155 chain ID of all txs in the bundle.
pub chain_id: u64,
/// The L1 msg queue hash at the end of the last batch in the bundle.
/// Not a phase 1 field so we make it omitable
#[serde(default)]
pub msg_queue_hash: B256,
/// The number of batches bundled together in the bundle.
pub num_batches: u32,
/// The last finalized on-chain state root.
pub prev_state_root: B256,
/// The last finalized on-chain batch hash.
pub prev_batch_hash: B256,
/// The state root after applying every batch in the bundle.
///
/// Upon verification of the EVM-verifiable bundle proof, this state root will be finalized
/// on-chain.
pub post_state_root: B256,
/// The batch hash of the last batch in the bundle.
///
/// Upon verification of the EVM-verifiable bundle proof, this batch hash will be finalized
/// on-chain.
pub batch_hash: B256,
/// The withdrawals root at the last block in the last chunk in the last batch in the bundle.
pub withdraw_root: B256,
}
impl BundleInfo {
/// Public input hash for a bundle (euclidv1 or da-codec@v6) is defined as
///
/// keccak(
/// chain id ||
/// num batches ||
/// prev state root ||
/// prev batch hash ||
/// post state root ||
/// batch hash ||
/// withdraw root
/// )
pub fn pi_hash_euclidv1(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(self.chain_id.to_be_bytes().as_slice())
.chain(self.num_batches.to_be_bytes().as_slice())
.chain(self.prev_state_root.as_slice())
.chain(self.prev_batch_hash.as_slice())
.chain(self.post_state_root.as_slice())
.chain(self.batch_hash.as_slice())
.chain(self.withdraw_root.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
/// Public input hash for a bundle (euclidv2 or da-codec@v7) is defined as
///
/// keccak(
/// chain id ||
/// msg_queue_hash ||
/// num batches ||
/// prev state root ||
/// prev batch hash ||
/// post state root ||
/// batch hash ||
/// withdraw root
/// )
pub fn pi_hash_euclidv2(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(self.chain_id.to_be_bytes().as_slice())
.chain(self.msg_queue_hash.as_slice())
.chain(self.num_batches.to_be_bytes().as_slice())
.chain(self.prev_state_root.as_slice())
.chain(self.prev_batch_hash.as_slice())
.chain(self.post_state_root.as_slice())
.chain(self.batch_hash.as_slice())
.chain(self.withdraw_root.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
pub fn pi_hash(&self, fork_name: ForkName) -> B256 {
match fork_name {
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
}
}
}
impl MultiVersionPublicInputs for BundleInfo {
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
match fork_name {
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
}
}
fn validate(&self, _prev_pi: &Self, _fork_name: ForkName) {
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
}
}
#[derive(Clone, Debug)]
pub struct BundleInfoV1(pub BundleInfo);
#[derive(Clone, Debug)]
pub struct BundleInfoV2(pub BundleInfo);
impl From<BundleInfo> for BundleInfoV1 {
fn from(value: BundleInfo) -> Self {
Self(value)
}
}
impl From<BundleInfo> for BundleInfoV2 {
fn from(value: BundleInfo) -> Self {
Self(value)
}
}
impl PublicInputs for BundleInfoV1 {
fn pi_hash(&self) -> B256 {
self.0.pi_hash_euclidv1()
}
fn validate(&self, _prev_pi: &Self) {
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
}
}
impl PublicInputs for BundleInfoV2 {
fn pi_hash(&self) -> B256 {
self.0.pi_hash_euclidv2()
}
fn validate(&self, _prev_pi: &Self) {
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
}
}

View File

@@ -1,248 +0,0 @@
use alloy_primitives::{B256, U256};
use crate::{
public_inputs::{ForkName, MultiVersionPublicInputs},
utils::keccak256,
};
/// Number of bytes used to serialise [`BlockContextV2`].
pub const SIZE_BLOCK_CTX: usize = 52;
/// Represents the version 2 of block context.
///
/// The difference between v2 and v1 is that the block number field has been removed since v2.
#[derive(
Debug,
Clone,
PartialEq,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct BlockContextV2 {
/// The timestamp of the block.
pub timestamp: u64,
/// The base fee of the block.
pub base_fee: U256,
/// The gas limit of the block.
pub gas_limit: u64,
/// The number of transactions in the block, including both L1 msg txs as well as L2 txs.
pub num_txs: u16,
/// The number of L1 msg txs in the block.
pub num_l1_msgs: u16,
}
impl From<&ArchivedBlockContextV2> for BlockContextV2 {
fn from(archived: &ArchivedBlockContextV2) -> Self {
Self {
timestamp: archived.timestamp.into(),
base_fee: archived.base_fee.into(),
gas_limit: archived.gas_limit.into(),
num_txs: archived.num_txs.into(),
num_l1_msgs: archived.num_l1_msgs.into(),
}
}
}
impl From<&[u8]> for BlockContextV2 {
fn from(bytes: &[u8]) -> Self {
assert_eq!(bytes.len(), SIZE_BLOCK_CTX);
let timestamp = u64::from_be_bytes(bytes[0..8].try_into().expect("should not fail"));
let base_fee = U256::from_be_slice(&bytes[8..40]);
let gas_limit = u64::from_be_bytes(bytes[40..48].try_into().expect("should not fail"));
let num_txs = u16::from_be_bytes(bytes[48..50].try_into().expect("should not fail"));
let num_l1_msgs = u16::from_be_bytes(bytes[50..52].try_into().expect("should not fail"));
Self {
timestamp,
base_fee,
gas_limit,
num_txs,
num_l1_msgs,
}
}
}
impl BlockContextV2 {
/// Serialize the block context in packed form.
pub fn to_bytes(&self) -> Vec<u8> {
std::iter::empty()
.chain(self.timestamp.to_be_bytes())
.chain(self.base_fee.to_be_bytes::<32>())
.chain(self.gas_limit.to_be_bytes())
.chain(self.num_txs.to_be_bytes())
.chain(self.num_l1_msgs.to_be_bytes())
.collect()
}
}
/// Represents header-like information for the chunk.
#[derive(
Debug,
Clone,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct ChunkInfo {
/// The EIP-155 chain ID for all txs in the chunk.
#[rkyv()]
pub chain_id: u64,
/// The state root before applying the chunk.
#[rkyv()]
pub prev_state_root: B256,
/// The state root after applying the chunk.
#[rkyv()]
pub post_state_root: B256,
/// The withdrawals root after applying the chunk.
#[rkyv()]
pub withdraw_root: B256,
/// Digest of L1 message txs force included in the chunk.
/// It is a legacy field and can be omitted in new defination
#[rkyv()]
#[serde(default)]
pub data_hash: B256,
/// Digest of L2 tx data flattened over all L2 txs in the chunk.
#[rkyv()]
pub tx_data_digest: B256,
/// The L1 msg queue hash at the end of the previous chunk.
#[rkyv()]
pub prev_msg_queue_hash: B256,
/// The L1 msg queue hash at the end of the current chunk.
#[rkyv()]
pub post_msg_queue_hash: B256,
/// The length of rlp encoded L2 tx bytes flattened over all L2 txs in the chunk.
#[rkyv()]
pub tx_data_length: u64,
/// The block number of the first block in the chunk.
#[rkyv()]
pub initial_block_number: u64,
/// The block contexts of the blocks in the chunk.
#[rkyv()]
pub block_ctxs: Vec<BlockContextV2>,
}
impl ChunkInfo {
/// Public input hash for a given chunk (euclidv1 or da-codec@v6) is defined as
///
/// keccak(
/// chain id ||
/// prev state root ||
/// post state root ||
/// withdraw root ||
/// chunk data hash ||
/// tx data hash
/// )
pub fn pi_hash_euclidv1(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(&self.chain_id.to_be_bytes())
.chain(self.prev_state_root.as_slice())
.chain(self.post_state_root.as_slice())
.chain(self.withdraw_root.as_slice())
.chain(self.data_hash.as_slice())
.chain(self.tx_data_digest.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
/// Public input hash for a given chunk (euclidv2 or da-codec@v7) is defined as
///
/// keccak(
/// chain id ||
/// prev state root ||
/// post state root ||
/// withdraw root ||
/// tx data digest ||
/// prev msg queue hash ||
/// post msg queue hash ||
/// initial block number ||
/// block_ctx for block_ctx in block_ctxs
/// )
pub fn pi_hash_euclidv2(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(&self.chain_id.to_be_bytes())
.chain(self.prev_state_root.as_slice())
.chain(self.post_state_root.as_slice())
.chain(self.withdraw_root.as_slice())
.chain(self.tx_data_digest.as_slice())
.chain(self.prev_msg_queue_hash.as_slice())
.chain(self.post_msg_queue_hash.as_slice())
.chain(&self.initial_block_number.to_be_bytes())
.chain(
self.block_ctxs
.iter()
.flat_map(|block_ctx| block_ctx.to_bytes())
.collect::<Vec<u8>>()
.as_slice(),
)
.cloned()
.collect::<Vec<u8>>(),
)
}
}
impl From<&ArchivedChunkInfo> for ChunkInfo {
fn from(archived: &ArchivedChunkInfo) -> Self {
Self {
chain_id: archived.chain_id.into(),
prev_state_root: archived.prev_state_root.into(),
post_state_root: archived.post_state_root.into(),
withdraw_root: archived.withdraw_root.into(),
data_hash: archived.data_hash.into(),
tx_data_digest: archived.tx_data_digest.into(),
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
tx_data_length: archived.tx_data_length.into(),
initial_block_number: archived.initial_block_number.into(),
block_ctxs: archived
.block_ctxs
.iter()
.map(BlockContextV2::from)
.collect(),
}
}
}
pub type VersionedChunkInfo = (ChunkInfo, ForkName);
impl MultiVersionPublicInputs for ChunkInfo {
/// Compute the public input hash for the chunk.
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
match fork_name {
ForkName::EuclidV1 => {
assert_ne!(self.data_hash, B256::ZERO, "v6 must has valid data hash");
self.pi_hash_euclidv1()
}
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
}
}
/// Validate public inputs between 2 contiguous chunks.
///
/// - chain id MUST match
/// - state roots MUST be chained
/// - L1 msg queue hash MUST be chained
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
assert_eq!(self.chain_id, prev_pi.chain_id);
assert_eq!(self.prev_state_root, prev_pi.post_state_root);
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
// message queue hash is used only after euclidv2 (da-codec@v7)
if fork_name == ForkName::EuclidV1 {
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
}
}
}

View File

@@ -1,35 +0,0 @@
use alloy_primitives::B256;
use tiny_keccak::{Hasher, Keccak};
/// From the utility of ether-rs
///
/// Computes the Keccak-256 hash of input bytes.
///
/// Note that strings are interpreted as UTF-8 bytes,
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> B256 {
let mut output = [0u8; 32];
let mut hasher = Keccak::v256();
hasher.update(bytes.as_ref());
hasher.finalize(&mut output);
B256::from(output)
}
pub fn keccak256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
use sha3::{Digest, Keccak256};
let mut output = [0u8; 32];
let mut hasher = Keccak256::new();
hasher.update(bytes.as_ref());
output.copy_from_slice(hasher.finalize().as_ref());
B256::from(output)
}
pub fn sha256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
use sha2::{Digest, Sha256};
let mut output = [0u8; 32];
let mut hasher = Sha256::new();
hasher.update(bytes.as_ref());
output.copy_from_slice(hasher.finalize().as_ref());
B256::from(output)
}

View File

@@ -1,2 +0,0 @@
mod hash;
pub use hash::{keccak256, keccak256_rv32, sha256_rv32};

View File

@@ -1,21 +0,0 @@
[package]
name = "scroll-zkvm-circuit-input-types-batch"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
readme.workspace = true
repository.workspace = true
version = "0.2.0"
[dependencies]
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
rkyv.workspace = true
serde.workspace = true
itertools.workspace = true
vm-zstd = { workspace = true }
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
[features]
default = []

View File

@@ -1,30 +0,0 @@
use alloy_primitives::B256;
pub mod v6;
pub mod v7;
pub trait BatchHeader {
/// The DA-codec version for the batch header.
fn version(&self) -> u8;
/// The incremental index of the batch.
fn index(&self) -> u64;
/// The batch header digest of the parent batch.
fn parent_batch_hash(&self) -> B256;
/// The batch header digest.
fn batch_hash(&self) -> B256;
}
/// Reference header indicate the version of batch header base on which batch hash
/// should be calculated.
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
#[rkyv(derive(Debug))]
pub enum ReferenceHeader {
/// Represents DA-codec v6.
V6(v6::BatchHeaderV6),
/// Represents DA-codec v7.
V7(v7::BatchHeaderV7),
}

View File

@@ -1,151 +0,0 @@
use super::BatchHeader;
use alloy_primitives::B256;
use types_base::utils::keccak256;
/// Represents the header summarising the batch of chunks as per DA-codec v6.
#[derive(
Clone,
Copy,
Debug,
Default,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct BatchHeaderV6 {
/// The DA-codec version for the batch.
#[rkyv()]
pub version: u8,
/// The index of the batch
#[rkyv()]
pub batch_index: u64,
/// Number of L1 messages popped in the batch
#[rkyv()]
pub l1_message_popped: u64,
/// Number of total L1 messages popped after the batch
#[rkyv()]
pub total_l1_message_popped: u64,
/// The parent batch hash
#[rkyv()]
pub parent_batch_hash: B256,
/// The timestamp of the last block in this batch
#[rkyv()]
pub last_block_timestamp: u64,
/// The data hash of the batch
#[rkyv()]
pub data_hash: B256,
/// The versioned hash of the blob with this batch's data
#[rkyv()]
pub blob_versioned_hash: B256,
/// The blob data proof: z (32), y (32)
#[rkyv()]
pub blob_data_proof: [B256; 2],
}
impl BatchHeader for BatchHeaderV6 {
fn version(&self) -> u8 {
self.version
}
fn index(&self) -> u64 {
self.batch_index
}
fn parent_batch_hash(&self) -> B256 {
self.parent_batch_hash
}
/// Batch hash as per DA-codec v6:
///
/// keccak(
/// version ||
/// batch index ||
/// l1 message popped ||
/// total l1 message popped ||
/// batch data hash ||
/// versioned hash ||
/// parent batch hash ||
/// last block timestamp ||
/// z ||
/// y
/// )
fn batch_hash(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(vec![self.version].as_slice())
.chain(self.batch_index.to_be_bytes().as_slice())
.chain(self.l1_message_popped.to_be_bytes().as_slice())
.chain(self.total_l1_message_popped.to_be_bytes().as_slice())
.chain(self.data_hash.as_slice())
.chain(self.blob_versioned_hash.as_slice())
.chain(self.parent_batch_hash.as_slice())
.chain(self.last_block_timestamp.to_be_bytes().as_slice())
.chain(self.blob_data_proof[0].as_slice())
.chain(self.blob_data_proof[1].as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
}
impl BatchHeader for ArchivedBatchHeaderV6 {
fn version(&self) -> u8 {
self.version
}
fn index(&self) -> u64 {
self.batch_index.into()
}
fn parent_batch_hash(&self) -> B256 {
self.parent_batch_hash.into()
}
fn batch_hash(&self) -> B256 {
let batch_index: u64 = self.batch_index.into();
let l1_message_popped: u64 = self.l1_message_popped.into();
let total_l1_message_popped: u64 = self.total_l1_message_popped.into();
let data_hash: B256 = self.data_hash.into();
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
let parent_batch_hash: B256 = self.parent_batch_hash.into();
let last_block_timestamp: u64 = self.last_block_timestamp.into();
let blob_data_proof: [B256; 2] = self.blob_data_proof.map(|h| h.into());
keccak256(
std::iter::empty()
.chain(vec![self.version].as_slice())
.chain(batch_index.to_be_bytes().as_slice())
.chain(l1_message_popped.to_be_bytes().as_slice())
.chain(total_l1_message_popped.to_be_bytes().as_slice())
.chain(data_hash.as_slice())
.chain(blob_versioned_hash.as_slice())
.chain(parent_batch_hash.as_slice())
.chain(last_block_timestamp.to_be_bytes().as_slice())
.chain(blob_data_proof[0].as_slice())
.chain(blob_data_proof[1].as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
}
impl From<&ArchivedBatchHeaderV6> for BatchHeaderV6 {
fn from(archived: &ArchivedBatchHeaderV6) -> Self {
Self {
version: archived.version,
batch_index: archived.batch_index.into(),
l1_message_popped: archived.l1_message_popped.into(),
total_l1_message_popped: archived.total_l1_message_popped.into(),
parent_batch_hash: archived.parent_batch_hash.into(),
last_block_timestamp: archived.last_block_timestamp.into(),
data_hash: archived.data_hash.into(),
blob_versioned_hash: archived.blob_versioned_hash.into(),
blob_data_proof: [
archived.blob_data_proof[0].into(),
archived.blob_data_proof[1].into(),
],
}
}
}

View File

@@ -1,106 +0,0 @@
use alloy_primitives::B256;
use super::BatchHeader;
use types_base::utils::keccak256;
/// Represents the header summarising the batch of chunks as per DA-codec v7.
#[derive(
Clone,
Copy,
Debug,
Default,
rkyv::Archive,
rkyv::Deserialize,
rkyv::Serialize,
serde::Deserialize,
serde::Serialize,
)]
#[rkyv(derive(Debug))]
pub struct BatchHeaderV7 {
/// The DA-codec version for the batch.
#[rkyv()]
pub version: u8,
/// The index of the batch
#[rkyv()]
pub batch_index: u64,
/// The parent batch hash
#[rkyv()]
pub parent_batch_hash: B256,
/// The versioned hash of the blob with this batch's data
#[rkyv()]
pub blob_versioned_hash: B256,
}
impl BatchHeader for BatchHeaderV7 {
fn version(&self) -> u8 {
self.version
}
fn index(&self) -> u64 {
self.batch_index
}
fn parent_batch_hash(&self) -> B256 {
self.parent_batch_hash
}
/// Batch hash as per DA-codec v7:
///
/// keccak(
/// version ||
/// batch index ||
/// versioned hash ||
/// parent batch hash
/// )
fn batch_hash(&self) -> B256 {
keccak256(
std::iter::empty()
.chain(vec![self.version].as_slice())
.chain(self.batch_index.to_be_bytes().as_slice())
.chain(self.blob_versioned_hash.as_slice())
.chain(self.parent_batch_hash.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
}
impl BatchHeader for ArchivedBatchHeaderV7 {
fn version(&self) -> u8 {
self.version
}
fn index(&self) -> u64 {
self.batch_index.into()
}
fn parent_batch_hash(&self) -> B256 {
self.parent_batch_hash.into()
}
fn batch_hash(&self) -> B256 {
let batch_index: u64 = self.batch_index.into();
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
let parent_batch_hash: B256 = self.parent_batch_hash.into();
keccak256(
std::iter::empty()
.chain(vec![self.version].as_slice())
.chain(batch_index.to_be_bytes().as_slice())
.chain(blob_versioned_hash.as_slice())
.chain(parent_batch_hash.as_slice())
.cloned()
.collect::<Vec<u8>>(),
)
}
}
impl From<&ArchivedBatchHeaderV7> for BatchHeaderV7 {
fn from(archived: &ArchivedBatchHeaderV7) -> Self {
Self {
version: archived.version,
batch_index: archived.batch_index.into(),
parent_batch_hash: archived.parent_batch_hash.into(),
blob_versioned_hash: archived.blob_versioned_hash.into(),
}
}
}

View File

@@ -1,17 +0,0 @@
mod header;
pub use header::{
ArchivedReferenceHeader, BatchHeader, ReferenceHeader,
v6::{ArchivedBatchHeaderV6, BatchHeaderV6},
v7::{ArchivedBatchHeaderV7, BatchHeaderV7},
};
mod payload;
pub use payload::{
v6::{EnvelopeV6, PayloadV6},
v7::{EnvelopeV7, PayloadV7},
};
pub use payload::{BLOB_WIDTH, N_BLOB_BYTES, N_DATA_BYTES_PER_COEFFICIENT};
mod witness;
pub use witness::{ArchivedBatchWitness, BatchWitness, Bytes48, PointEvalWitness};

View File

@@ -1,15 +0,0 @@
pub mod v6;
pub mod v7;
/// The number data bytes we pack each BLS12-381 scalar into. The most-significant byte is 0.
pub const N_DATA_BYTES_PER_COEFFICIENT: usize = 31;
/// The number of BLS12-381 scalar fields that effectively represent an EIP-4844 blob.
pub const BLOB_WIDTH: usize = 4096;
/// The effective (reduced) number of bytes we can use within a blob.
///
/// EIP-4844 requires that each 32-bytes chunk of bytes represent a BLS12-381 scalar field element
/// in its canonical form. As a result, we set the most-significant byte in each such chunk to 0.
/// This allows us to use only up to 31 bytes in each such chunk, hence the reduced capacity.
pub const N_BLOB_BYTES: usize = BLOB_WIDTH * N_DATA_BYTES_PER_COEFFICIENT;

View File

@@ -1,212 +0,0 @@
use alloy_primitives::B256;
use itertools::Itertools;
use crate::BatchHeaderV6;
use types_base::{public_inputs::chunk::ChunkInfo, utils::keccak256};
/// The default max chunks for v6 payload
pub const N_MAX_CHUNKS: usize = 45;
/// The number of bytes to encode number of chunks in a batch.
const N_BYTES_NUM_CHUNKS: usize = 2;
/// The number of rows to encode chunk size (u32).
const N_BYTES_CHUNK_SIZE: usize = 4;
impl From<&[u8]> for EnvelopeV6 {
fn from(blob_bytes: &[u8]) -> Self {
let is_encoded = blob_bytes[0] & 1 == 1;
Self {
is_encoded,
envelope_bytes: if blob_bytes[0] & 1 == 1 {
vm_zstd::process(&blob_bytes[1..]).unwrap().decoded_data
} else {
Vec::from(&blob_bytes[1..])
},
}
}
}
#[derive(Debug, Clone)]
pub struct EnvelopeV6 {
/// The original envelope bytes supplied.
///
/// Caching just for re-use later in challenge digest computation.
pub envelope_bytes: Vec<u8>,
/// If the enveloped bytes is encoded (compressed) in envelop
pub is_encoded: bool,
}
impl EnvelopeV6 {
/// Parse payload bytes and obtain challenge digest
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
let payload = Payload::from(self);
payload.get_challenge_digest(versioned_hash)
}
}
impl From<&EnvelopeV6> for Payload {
fn from(envelope: &EnvelopeV6) -> Self {
Self::from_payload(&envelope.envelope_bytes)
}
}
/// Payload that describes a batch.
#[derive(Clone, Debug, Default)]
pub struct Payload {
/// Metadata that encodes the sizes of every chunk in the batch.
pub metadata_digest: B256,
/// The Keccak digests of transaction bytes for every chunk in the batch.
///
/// The `chunk_data_digest` is a part of the chunk-circuit's public input and hence used to
/// verify that the transaction bytes included in the chunk-circuit indeed match the
/// transaction bytes made available in the batch.
pub chunk_data_digests: Vec<B256>,
}
pub type PayloadV6 = Payload;
impl Payload {
/// For raw payload data (read from decompressed enveloped data), which is raw batch bytes
/// with metadata, this function segments the byte stream into chunk segments.
///
/// This method is used INSIDE OF zkvm since we can not generate (compress) batch data within
/// the vm program
///
/// The structure of batch bytes is as follows:
///
/// | Byte Index | Size | Hint |
/// |--------------------------------------------------------------|-------------------------------|-------------------------------------|
/// | 0 | N_BYTES_NUM_CHUNKS | Number of chunks |
/// | N_BYTES_NUM_CHUNKS | N_BYTES_CHUNK_SIZE | Size of chunks[0] |
/// | N_BYTES_NUM_CHUNKS + N_BYTES_CHUNK_SIZE | N_BYTES_CHUNK_SIZE | Size of chunks[1] |
/// | N_BYTES_NUM_CHUNKS + (i * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[i] |
/// | N_BYTES_NUM_CHUNKS + ((N_MAX_CHUNKS-1) * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[N_MAX_CHUNKS-1] |
/// | N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE) | Size of chunks[0] | L2 tx bytes of chunks[0] |
/// | "" + Size_of_chunks[0] | Size of chunks[1] | L2 tx bytes of chunks[1] |
/// | "" + Size_of_chunks[i-1] | Size of chunks[i] | L2 tx bytes of chunks[i] |
/// | "" + Size_of_chunks[Num_chunks-1] | Size of chunks[Num_chunks-1] | L2 tx bytes of chunks[Num_chunks-1] |
pub fn from_payload(batch_bytes_with_metadata: &[u8]) -> Self {
// Get the metadata bytes and metadata digest.
let n_bytes_metadata = Self::n_bytes_metadata();
let metadata_bytes = &batch_bytes_with_metadata[..n_bytes_metadata];
let metadata_digest = keccak256(metadata_bytes);
// The remaining bytes represent the chunk data (L2 tx bytes) segmented as chunks.
let batch_bytes = &batch_bytes_with_metadata[n_bytes_metadata..];
// The number of chunks in the batch.
let valid_chunks = metadata_bytes[..N_BYTES_NUM_CHUNKS]
.iter()
.fold(0usize, |acc, &d| acc * 256usize + d as usize);
// The size of each chunk in the batch.
let chunk_sizes = metadata_bytes[N_BYTES_NUM_CHUNKS..]
.iter()
.chunks(N_BYTES_CHUNK_SIZE)
.into_iter()
.map(|bytes| bytes.fold(0usize, |acc, &d| acc * 256usize + d as usize))
.collect::<Vec<usize>>();
// For every unused chunk, the chunk size should be set to 0.
for &unused_chunk_size in chunk_sizes.iter().skip(valid_chunks) {
assert_eq!(unused_chunk_size, 0, "unused chunk has size 0");
}
// Segment the batch bytes based on the chunk sizes.
let (segmented_batch_data, remaining_bytes) =
chunk_sizes.into_iter().take(valid_chunks).fold(
(Vec::new(), batch_bytes),
|(mut datas, rest_bytes), size| {
datas.push(Vec::from(&rest_bytes[..size]));
(datas, &rest_bytes[size..])
},
);
// After segmenting the batch data into chunks, no bytes should be left.
assert!(
remaining_bytes.is_empty(),
"chunk segmentation len must add up to the correct value"
);
// Compute the chunk data digests based on the segmented data.
let chunk_data_digests = segmented_batch_data
.iter()
.map(|bytes| B256::from(keccak256(bytes)))
.collect();
Self {
metadata_digest,
chunk_data_digests,
}
}
/// Compute the challenge digest from blob bytes. which is the combination of
/// digest for bytes in each chunk
pub fn get_challenge_digest(&self, versioned_hash: B256) -> B256 {
keccak256(self.get_challenge_digest_preimage(versioned_hash))
}
/// The number of bytes in payload Data to represent the "payload metadata" section: a u16 to
/// represent the size of chunks and max_chunks * u32 to represent chunk sizes
const fn n_bytes_metadata() -> usize {
N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE)
}
/// Validate the payload contents.
pub fn validate<'a>(
&self,
header: &BatchHeaderV6,
chunk_infos: &'a [ChunkInfo],
) -> (&'a ChunkInfo, &'a ChunkInfo) {
// There should be at least 1 chunk info.
assert!(!chunk_infos.is_empty(), "at least 1 chunk info");
// Get the first and last chunks' info, to construct the batch info.
let (first_chunk, last_chunk) = (
chunk_infos.first().expect("at least one chunk in batch"),
chunk_infos.last().expect("at least one chunk in batch"),
);
for (&chunk_data_digest, chunk_info) in self.chunk_data_digests.iter().zip_eq(chunk_infos) {
assert_eq!(chunk_data_digest, chunk_info.tx_data_digest)
}
// Validate the l1-msg identifier data_hash for the batch.
let batch_data_hash_preimage = chunk_infos
.iter()
.flat_map(|chunk_info| chunk_info.data_hash.0)
.collect::<Vec<_>>();
let batch_data_hash = keccak256(batch_data_hash_preimage);
assert_eq!(batch_data_hash, header.data_hash);
(first_chunk, last_chunk)
}
/// Get the preimage for the challenge digest.
pub(crate) fn get_challenge_digest_preimage(&self, versioned_hash: B256) -> Vec<u8> {
// preimage =
// metadata_digest ||
// chunk[0].chunk_data_digest || ...
// chunk[N_SNARKS-1].chunk_data_digest ||
// blob_versioned_hash
//
// where chunk_data_digest for a padded chunk is set equal to the "last valid chunk"'s
// chunk_data_digest.
let mut preimage = self.metadata_digest.to_vec();
let last_digest = self
.chunk_data_digests
.last()
.expect("at least we have one");
for chunk_digest in self
.chunk_data_digests
.iter()
.chain(std::iter::repeat(last_digest))
.take(N_MAX_CHUNKS)
{
preimage.extend_from_slice(chunk_digest.as_slice());
}
preimage.extend_from_slice(versioned_hash.as_slice());
preimage
}
}

View File

@@ -1,256 +0,0 @@
use alloy_primitives::B256;
use crate::BatchHeaderV7;
use types_base::{
public_inputs::chunk::{BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX},
utils::keccak256,
};
use super::N_BLOB_BYTES;
/// da-codec@v7
const DA_CODEC_VERSION: u8 = 7;
/// Represents the data contained within an EIP-4844 blob that is published on-chain.
///
/// The bytes following some metadata represent zstd-encoded [`PayloadV7`] if the envelope is
/// indicated as `is_encoded == true`.
#[derive(Debug, Clone)]
pub struct EnvelopeV7 {
/// The original envelope bytes supplied.
///
/// Caching just for re-use later in challenge digest computation.
pub envelope_bytes: Vec<u8>,
/// The version from da-codec, i.e. v7 in this case.
pub version: u8,
/// A single byte boolean flag (value is 0 or 1) to denote whether or not the following blob
/// bytes represent a batch in its zstd-encoded or raw form.
pub is_encoded: u8,
/// The unpadded bytes that possibly encode the [`PayloadV7`].
pub unpadded_bytes: Vec<u8>,
}
impl From<&[u8]> for EnvelopeV7 {
fn from(blob_bytes: &[u8]) -> Self {
// The number of bytes is as expected.
assert_eq!(blob_bytes.len(), N_BLOB_BYTES);
// The version of the blob encoding was as expected, i.e. da-codec@v7.
let version = blob_bytes[0];
assert_eq!(version, DA_CODEC_VERSION);
// Calculate the unpadded size of the encoded payload.
//
// It should be at most the maximum number of bytes allowed.
let unpadded_size = (blob_bytes[1] as usize) * 256 * 256
+ (blob_bytes[2] as usize) * 256
+ blob_bytes[3] as usize;
assert!(unpadded_size <= N_BLOB_BYTES - 5);
// Whether the envelope represents encoded payload or raw payload.
//
// Is a boolean.
let is_encoded = blob_bytes[4];
assert!(is_encoded <= 1);
// The padded bytes are all 0s.
for &padded_byte in blob_bytes.iter().skip(5 + unpadded_size) {
assert_eq!(padded_byte, 0);
}
Self {
version,
is_encoded,
unpadded_bytes: blob_bytes[5..(5 + unpadded_size)].to_vec(),
envelope_bytes: blob_bytes.to_vec(),
}
}
}
impl EnvelopeV7 {
/// The verification of the EIP-4844 blob is done via point-evaluation precompile
/// implemented in-circuit.
///
/// We require a random challenge point for this, and using Fiat-Shamir we compute it with
/// every byte in the blob along with the blob's versioned hash, i.e. an identifier for its KZG
/// commitment.
///
/// keccak256(
/// keccak256(envelope) ||
/// versioned hash
/// )
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
keccak256(
std::iter::empty()
.chain(keccak256(&self.envelope_bytes))
.chain(versioned_hash.0)
.collect::<Vec<u8>>(),
)
}
}
/// Represents the batch data, eventually encoded into an [`EnvelopeV7`].
///
/// | Field | # Bytes | Type | Index |
/// |------------------------|---------|----------------|---------------|
/// | prevL1MessageQueueHash | 32 | bytes32 | 0 |
/// | postL1MessageQueueHash | 32 | bytes32 | 32 |
/// | initialL2BlockNumber | 8 | u64 | 64 |
/// | numBlocks | 2 | u16 | 72 |
/// | blockCtxs[0] | 52 | BlockContextV2 | 74 |
/// | ... blockCtxs[i] ... | 52 | BlockContextV2 | 74 + 52*i |
/// | blockCtxs[n-1] | 52 | BlockContextV2 | 74 + 52*(n-1) |
/// | l2TxsData | dynamic | bytes | 74 + 52*n |
#[derive(Debug, Clone)]
pub struct PayloadV7 {
/// The version from da-codec, i.e. v7 in this case.
///
/// Note: This is not really a part of payload, simply coopied from the envelope for
/// convenience.
pub version: u8,
/// Message queue hash at the end of the previous batch.
pub prev_msg_queue_hash: B256,
/// Message queue hash at the end of the current batch.
pub post_msg_queue_hash: B256,
/// The block number of the first block in the batch.
pub initial_block_number: u64,
/// The number of blocks in the batch.
pub num_blocks: u16,
/// The block contexts of each block in the batch.
pub block_contexts: Vec<BlockContextV2>,
/// The L2 tx data flattened over every tx in every block in the batch.
pub tx_data: Vec<u8>,
}
const INDEX_PREV_MSG_QUEUE_HASH: usize = 0;
const INDEX_POST_MSG_QUEUE_HASH: usize = INDEX_PREV_MSG_QUEUE_HASH + 32;
const INDEX_L2_BLOCK_NUM: usize = INDEX_POST_MSG_QUEUE_HASH + 32;
const INDEX_NUM_BLOCKS: usize = INDEX_L2_BLOCK_NUM + 8;
const INDEX_BLOCK_CTX: usize = INDEX_NUM_BLOCKS + 2;
impl From<&EnvelopeV7> for PayloadV7 {
fn from(envelope: &EnvelopeV7) -> Self {
// Conditionally decode depending on the flag set in the envelope.
let payload_bytes = if envelope.is_encoded & 1 == 1 {
vm_zstd::process(&envelope.unpadded_bytes)
.expect("zstd decode should succeed")
.decoded_data
} else {
envelope.unpadded_bytes.to_vec()
};
// Sanity check on the payload size.
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX);
let num_blocks = u16::from_be_bytes(
payload_bytes[INDEX_NUM_BLOCKS..INDEX_BLOCK_CTX]
.try_into()
.expect("should not fail"),
);
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX));
// Deserialize the other fields.
let prev_msg_queue_hash =
B256::from_slice(&payload_bytes[INDEX_PREV_MSG_QUEUE_HASH..INDEX_POST_MSG_QUEUE_HASH]);
let post_msg_queue_hash =
B256::from_slice(&payload_bytes[INDEX_POST_MSG_QUEUE_HASH..INDEX_L2_BLOCK_NUM]);
let initial_block_number = u64::from_be_bytes(
payload_bytes[INDEX_L2_BLOCK_NUM..INDEX_NUM_BLOCKS]
.try_into()
.expect("should not fail"),
);
// Deserialize block contexts depending on the number of blocks in the batch.
let mut block_contexts = Vec::with_capacity(num_blocks as usize);
for i in 0..num_blocks {
let start = (i as usize) * SIZE_BLOCK_CTX + INDEX_BLOCK_CTX;
block_contexts.push(BlockContextV2::from(
&payload_bytes[start..(start + SIZE_BLOCK_CTX)],
));
}
// All remaining bytes are flattened L2 txs.
let tx_data =
payload_bytes[INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX)..].to_vec();
Self {
version: envelope.version,
prev_msg_queue_hash,
post_msg_queue_hash,
initial_block_number,
num_blocks,
block_contexts,
tx_data,
}
}
}
impl PayloadV7 {
/// Validate the payload contents.
pub fn validate<'a>(
&self,
header: &BatchHeaderV7,
chunk_infos: &'a [ChunkInfo],
) -> (&'a ChunkInfo, &'a ChunkInfo) {
// Get the first and last chunks' info, to construct the batch info.
let (first_chunk, last_chunk) = (
chunk_infos.first().expect("at least one chunk in batch"),
chunk_infos.last().expect("at least one chunk in batch"),
);
// version from payload is what's present in the on-chain batch header
assert_eq!(self.version, header.version);
// number of blocks in the batch
assert_eq!(
usize::from(self.num_blocks),
chunk_infos
.iter()
.flat_map(|chunk_info| &chunk_info.block_ctxs)
.count()
);
assert_eq!(usize::from(self.num_blocks), self.block_contexts.len());
// the block number of the first block in the batch
assert_eq!(self.initial_block_number, first_chunk.initial_block_number);
// prev message queue hash
assert_eq!(self.prev_msg_queue_hash, first_chunk.prev_msg_queue_hash);
// post message queue hash
assert_eq!(self.post_msg_queue_hash, last_chunk.post_msg_queue_hash);
// for each chunk, the tx_data_digest, i.e. keccak digest of the rlp-encoded L2 tx bytes
// flattened over every tx in the chunk, should be re-computed and matched against the
// public input of the chunk-circuit.
//
// first check that the total size of rlp-encoded tx data flattened over all txs in the
// chunk is in fact the size available from the payload.
assert_eq!(
u64::try_from(self.tx_data.len()).expect("len(tx-data) is u64"),
chunk_infos
.iter()
.map(|chunk_info| chunk_info.tx_data_length)
.sum::<u64>(),
);
let mut index: usize = 0;
for chunk_info in chunk_infos.iter() {
let chunk_size = chunk_info.tx_data_length as usize;
let chunk_tx_data_digest =
keccak256(&self.tx_data.as_slice()[index..(index + chunk_size)]);
assert_eq!(chunk_tx_data_digest, chunk_info.tx_data_digest);
index += chunk_size;
}
// for each block in the batch, check that the block context matches what's provided as
// witness.
for (block_ctx, witness_block_ctx) in self.block_contexts.iter().zip(
chunk_infos
.iter()
.flat_map(|chunk_info| &chunk_info.block_ctxs),
) {
assert_eq!(block_ctx, witness_block_ctx);
}
(first_chunk, last_chunk)
}
}

View File

@@ -1,57 +0,0 @@
use crate::header::ReferenceHeader;
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
use types_base::public_inputs::{ForkName, chunk::ChunkInfo};
/// Simply rewrap byte48 to avoid unnecessary dep
pub type Bytes48 = [u8; 48];
/// Witness required by applying point evaluation
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
#[rkyv(derive(Debug))]
pub struct PointEvalWitness {
/// kzg commitment
#[rkyv()]
pub kzg_commitment: Bytes48,
/// kzg proof
#[rkyv()]
pub kzg_proof: Bytes48,
}
/// Witness to the batch circuit.
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
#[rkyv(derive(Debug))]
pub struct BatchWitness {
/// Flattened root proofs from all chunks in the batch.
#[rkyv()]
pub chunk_proofs: Vec<AggregationInput>,
/// Chunk infos.
#[rkyv()]
pub chunk_infos: Vec<ChunkInfo>,
/// Blob bytes.
#[rkyv()]
pub blob_bytes: Vec<u8>,
/// Witness for point evaluation
pub point_eval_witness: PointEvalWitness,
/// Header for reference.
#[rkyv()]
pub reference_header: ReferenceHeader,
/// The code version specify the chain spec
#[rkyv()]
pub fork_name: ForkName,
}
impl ProofCarryingWitness for ArchivedBatchWitness {
fn get_proofs(&self) -> Vec<AggregationInput> {
self.chunk_proofs
.iter()
.map(|archived| AggregationInput {
public_values: archived
.public_values
.iter()
.map(|u32_le| u32_le.to_native())
.collect(),
commitment: ProgramCommitment::from(&archived.commitment),
})
.collect()
}
}

View File

@@ -1,21 +0,0 @@
[package]
name = "scroll-zkvm-circuit-input-types-bundle"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
readme.workspace = true
repository.workspace = true
version = "0.2.0"
[dependencies]
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
rkyv.workspace = true
serde.workspace = true
itertools.workspace = true
vm-zstd = { workspace = true }
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
[features]
default = []

View File

@@ -1,2 +0,0 @@
mod witness;
pub use witness::{ArchivedBundleWitness, BundleWitness};

View File

@@ -1,30 +0,0 @@
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
use types_base::public_inputs::batch::BatchInfo;
/// The witness for the bundle circuit.
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
#[rkyv(derive(Debug))]
pub struct BundleWitness {
/// Batch proofs being aggregated in the bundle.
#[rkyv()]
pub batch_proofs: Vec<AggregationInput>,
/// Public-input values for the corresponding batch proofs.
#[rkyv()]
pub batch_infos: Vec<BatchInfo>,
}
impl ProofCarryingWitness for ArchivedBundleWitness {
fn get_proofs(&self) -> Vec<AggregationInput> {
self.batch_proofs
.iter()
.map(|archived| AggregationInput {
public_values: archived
.public_values
.iter()
.map(|u32_le| u32_le.to_native())
.collect(),
commitment: ProgramCommitment::from(&archived.commitment),
})
.collect()
}
}

View File

@@ -1,28 +0,0 @@
[package]
name = "scroll-zkvm-circuit-input-types-chunk"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
readme.workspace = true
repository.workspace = true
version = "0.2.0"
[dependencies]
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
rkyv.workspace = true
sbv-trie = { workspace = true }
sbv-core = { workspace = true }
sbv-primitives = { workspace = true }
sbv-kv = { workspace = true }
serde.workspace = true
itertools.workspace = true
openvm = { workspace = true, features = ["std"] }
openvm-rv32im-guest = { workspace = true }
openvm-custom-insn = { workspace = true }
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
[features]
default = []
openvm = ["sbv-trie/openvm", "sbv-core/openvm", "sbv-primitives/openvm"]

View File

@@ -1,167 +0,0 @@
use sbv_core::{EvmDatabase, EvmExecutor};
use sbv_primitives::{
BlockWitness,
chainspec::{
BaseFeeParams, BaseFeeParamsKind, Chain, MAINNET,
reth_chainspec::ChainSpec,
scroll::{ScrollChainConfig, ScrollChainSpec},
},
ext::{BlockWitnessChunkExt, TxBytesHashExt},
hardforks::SCROLL_DEV_HARDFORKS,
types::{
consensus::BlockHeader,
reth::{Block, BlockWitnessRethExt, RecoveredBlock},
scroll::ChunkInfoBuilder,
},
};
use crate::{ArchivedChunkWitness, make_providers, manually_drop_on_zkvm};
use types_base::public_inputs::{
ForkName,
chunk::{BlockContextV2, ChunkInfo},
};
fn block_ctxv2_from_block(value: &RecoveredBlock<Block>) -> BlockContextV2 {
use alloy_primitives::U256;
BlockContextV2 {
timestamp: value.timestamp,
gas_limit: value.gas_limit,
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
num_l1_msgs: u16::try_from(
value
.body()
.transactions
.iter()
.filter(|tx| tx.is_l1_message())
.count(),
)
.expect("num l1 msgs u16"),
}
}
type Witness = ArchivedChunkWitness;
pub fn execute(witness: &Witness) -> Result<ChunkInfo, String> {
if witness.blocks.is_empty() {
return Err("At least one witness must be provided in chunk mode".into());
}
if !witness.blocks.has_same_chain_id() {
return Err("All witnesses must have the same chain id in chunk mode".into());
}
if !witness.blocks.has_seq_block_number() {
return Err("All witnesses must have sequential block numbers in chunk mode".into());
}
// Get the blocks to build the basic chunk-info.
let blocks = manually_drop_on_zkvm!(
witness
.blocks
.iter()
.map(|w| w.build_reth_block())
.collect::<Result<Vec<RecoveredBlock<Block>>, _>>()
.map_err(|e| e.to_string())?
);
let pre_state_root = witness.blocks[0].pre_state_root;
let fork_name = ForkName::from(&witness.fork_name);
let chain = Chain::from_id(witness.blocks[0].chain_id());
// SCROLL_DEV_HARDFORKS will enable all forks
let mut hardforks = (*SCROLL_DEV_HARDFORKS).clone();
if fork_name == ForkName::EuclidV1 {
// disable EuclidV2 fork for legacy chunk
use sbv_primitives::{chainspec::ForkCondition, hardforks::ScrollHardfork};
hardforks.insert(ScrollHardfork::EuclidV2, ForkCondition::Never);
}
let inner = ChainSpec {
chain,
genesis_hash: Default::default(),
genesis: Default::default(),
genesis_header: Default::default(),
paris_block_and_final_difficulty: Default::default(),
hardforks,
deposit_contract: Default::default(),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
prune_delete_limit: 20000,
blob_params: Default::default(),
};
let config = ScrollChainConfig::mainnet();
let chain_spec: ScrollChainSpec = ScrollChainSpec { inner, config };
let (code_db, nodes_provider, block_hashes) = make_providers(&witness.blocks);
let nodes_provider = manually_drop_on_zkvm!(nodes_provider);
let prev_state_root = witness.blocks[0].pre_state_root();
let mut db = manually_drop_on_zkvm!(
EvmDatabase::new_from_root(code_db, prev_state_root, &nodes_provider, block_hashes)
.map_err(|e| format!("failed to create EvmDatabase: {}", e))?
);
for block in blocks.iter() {
let output = manually_drop_on_zkvm!(
EvmExecutor::new(std::sync::Arc::new(chain_spec.clone()), &db, block)
.execute()
.map_err(|e| format!("failed to execute block: {}", e))?
);
db.update(&nodes_provider, output.state.state.iter())
.map_err(|e| format!("failed to update db: {}", e))?;
}
let post_state_root = db.commit_changes();
let withdraw_root = db
.withdraw_root()
.map_err(|e| format!("failed to get withdraw root: {}", e))?;
let mut rlp_buffer = manually_drop_on_zkvm!(Vec::with_capacity(2048));
let (tx_data_length, tx_data_digest) = blocks
.iter()
.flat_map(|b| b.body().transactions.iter())
.tx_bytes_hash_in(rlp_buffer.as_mut());
let _ = tx_data_length;
let sbv_chunk_info = {
#[allow(unused_mut)]
let mut builder = ChunkInfoBuilder::new(&chain_spec, pre_state_root.into(), &blocks);
if fork_name == ForkName::EuclidV2 {
builder.set_prev_msg_queue_hash(witness.prev_msg_queue_hash.into());
}
builder.build(withdraw_root)
};
if post_state_root != sbv_chunk_info.post_state_root() {
return Err(format!(
"state root mismatch: expected={}, found={}",
sbv_chunk_info.post_state_root(),
post_state_root
));
}
let chunk_info = ChunkInfo {
chain_id: sbv_chunk_info.chain_id(),
prev_state_root: sbv_chunk_info.prev_state_root(),
post_state_root: sbv_chunk_info.post_state_root(),
data_hash: sbv_chunk_info
.clone()
.into_legacy()
.map(|x| x.data_hash)
.unwrap_or_default(),
withdraw_root,
tx_data_digest,
tx_data_length: u64::try_from(tx_data_length).expect("tx_data_length: u64"),
initial_block_number: blocks[0].header().number,
prev_msg_queue_hash: witness.prev_msg_queue_hash.into(),
post_msg_queue_hash: sbv_chunk_info
.into_euclid_v2()
.map(|x| x.post_msg_queue_hash)
.unwrap_or_default(),
block_ctxs: blocks.iter().map(block_ctxv2_from_block).collect(),
};
openvm::io::println(format!("withdraw_root = {:?}", withdraw_root));
openvm::io::println(format!("tx_bytes_hash = {:?}", tx_data_digest));
// We should never touch that lazy lock... Or else we introduce 40M useless cycles.
assert!(std::sync::LazyLock::get(&MAINNET).is_none());
Ok(chunk_info)
}

Some files were not shown because too many files have changed in this diff Show More