mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
72 Commits
refactor/z
...
fix/macos
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a05300ab3 | ||
|
|
5d378a015d | ||
|
|
88066d72e8 | ||
|
|
226d32f9bf | ||
|
|
2ecc42e2f5 | ||
|
|
de72e2dccb | ||
|
|
edb51236e2 | ||
|
|
15a23478d1 | ||
|
|
9100a0bd4a | ||
|
|
0ede0cd41f | ||
|
|
9dceae1ca2 | ||
|
|
235ba874c6 | ||
|
|
6bee33036f | ||
|
|
1985e54ab3 | ||
|
|
bfc0fdd7ce | ||
|
|
426c57a5fa | ||
|
|
b7fdf48c30 | ||
|
|
ad0c918944 | ||
|
|
1098876183 | ||
|
|
9e520e7769 | ||
|
|
de7f6e56a9 | ||
|
|
3b323198dc | ||
|
|
c11e0283e8 | ||
|
|
a5a7844646 | ||
|
|
7ff5b190ec | ||
|
|
b297edd28d | ||
|
|
47c85d4983 | ||
|
|
1552e98b79 | ||
|
|
a65b3066a3 | ||
|
|
1f2b397bbd | ||
|
|
ae791a0714 | ||
|
|
c012f7132d | ||
|
|
6897cc54bd | ||
|
|
d21fa36803 | ||
|
|
fc75299eb3 | ||
|
|
4bfcd35d0c | ||
|
|
6d62f8e5fa | ||
|
|
392ae07736 | ||
|
|
db80b47820 | ||
|
|
daa1387208 | ||
|
|
67b05558e2 | ||
|
|
1e447b0fef | ||
|
|
f7c6ecadf4 | ||
|
|
9d94f943e5 | ||
|
|
de17ad43ff | ||
|
|
4233ad928c | ||
|
|
3050ccb40f | ||
|
|
12e89201a1 | ||
|
|
a0ee508bbd | ||
|
|
b8909d3795 | ||
|
|
b7a172a519 | ||
|
|
80807dbb75 | ||
|
|
a776ca7c82 | ||
|
|
ea38ae7e96 | ||
|
|
9dc57c6126 | ||
|
|
9367565a31 | ||
|
|
d2f7663d26 | ||
|
|
b0943b1035 | ||
|
|
5d6b5a89f4 | ||
|
|
4ee459a602 | ||
|
|
276385fd0a | ||
|
|
82fb15de3b | ||
|
|
5204ad50e0 | ||
|
|
f824fb0efc | ||
|
|
a55c7bdc77 | ||
|
|
47b1a037a9 | ||
|
|
ae34020c34 | ||
|
|
fa9fab6e98 | ||
|
|
c4f869a33a | ||
|
|
0cee9a51e6 | ||
|
|
97de988228 | ||
|
|
a12175dafc |
@@ -1,16 +0,0 @@
|
||||
.github
|
||||
|
||||
.gitignore
|
||||
|
||||
.dockerignore
|
||||
|
||||
Dockerfile
|
||||
Dockerfile.backup
|
||||
|
||||
.output
|
||||
|
||||
docs
|
||||
|
||||
openvm-clippy
|
||||
|
||||
target
|
||||
18
.github/workflows/common.yml
vendored
18
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2024-12-06
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -41,16 +41,12 @@ jobs:
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
workspaces: ". -> target"
|
||||
# - name: Lint
|
||||
# working-directory: 'common'
|
||||
# run: |
|
||||
# rm -rf $HOME/.cache/golangci-lint
|
||||
# make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/coordinator.yml
vendored
4
.github/workflows/coordinator.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
- name: Test coordinator packages
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
make libzkp
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
91
.github/workflows/docker.yml
vendored
91
.github/workflows/docker.yml
vendored
@@ -10,7 +10,8 @@ env:
|
||||
|
||||
jobs:
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -50,12 +51,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -95,12 +95,55 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: blob-uploader
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: blob-uploader
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/blob_uploader.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -140,12 +183,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -185,12 +227,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -230,12 +271,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -275,12 +315,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -307,13 +346,6 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Run custom script
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/init-openvm.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
@@ -326,12 +358,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -371,6 +402,4 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
1
.github/workflows/integration.yml
vendored
1
.github/workflows/integration.yml
vendored
@@ -38,6 +38,7 @@ jobs:
|
||||
make dev_docker
|
||||
make -C rollup mock_abi
|
||||
make -C common/bytecode all
|
||||
make -C coordinator/internal/logic/libzkp build
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...
|
||||
|
||||
11
.github/workflows/intermediate-docker.yml
vendored
11
.github/workflows/intermediate-docker.yml
vendored
@@ -22,10 +22,9 @@ on:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
- nightly-2025-08-18
|
||||
default: "nightly-2025-08-18"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
required: false
|
||||
@@ -40,7 +39,8 @@ on:
|
||||
options:
|
||||
- "11.7.1"
|
||||
- "12.2.2"
|
||||
default: "11.7.1"
|
||||
- "12.9.1"
|
||||
default: "12.9.1"
|
||||
CARGO_CHEF_TAG:
|
||||
description: "Cargo chef version"
|
||||
required: true
|
||||
@@ -69,7 +69,8 @@ defaults:
|
||||
|
||||
jobs:
|
||||
build-and-publish-intermediate:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,3 +24,4 @@ sftp-config.json
|
||||
*~
|
||||
|
||||
target
|
||||
zkvm-prover/config.json
|
||||
5169
Cargo.lock
generated
5169
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
86
Cargo.toml
86
Cargo.toml
@@ -1,19 +1,9 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"common/types-rs",
|
||||
"common/types-rs/base",
|
||||
"common/types-rs/aggregation",
|
||||
"common/types-rs/chunk",
|
||||
"common/types-rs/batch",
|
||||
"common/types-rs/bundle",
|
||||
"common/libzkp/impl",
|
||||
"zkvm-prover/prover",
|
||||
"zkvm-prover/verifier",
|
||||
"zkvm-prover/integration",
|
||||
"zkvm-prover/bin",
|
||||
]
|
||||
exclude = [
|
||||
"prover"
|
||||
"crates/libzkp",
|
||||
"crates/l2geth",
|
||||
"crates/libzkp_c",
|
||||
"crates/prover-bin",
|
||||
]
|
||||
|
||||
resolver = "2"
|
||||
@@ -24,70 +14,54 @@ edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.8"
|
||||
version = "4.7.1"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", package = "scroll-zkvm-prover"}
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
|
||||
openvm = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-build = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-custom-insn = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-rv32im-guest = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-compiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-recursion = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-continuations = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-sdk = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-stark-sdk = { git = "https://github.com/openvm-org/stark-backend.git", tag = "v1.0.1" }
|
||||
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-kv = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-trie = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll"] }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
metrics-tracing-context = "0.16.0"
|
||||
|
||||
alloy = { version = "0.11", default-features = false }
|
||||
alloy-primitives = { version = "0.8", default-features = false }
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "0.8", default-features = false }
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
rkyv = "0.8"
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_with = "3.11.0"
|
||||
serde_derive = "1.0"
|
||||
serde_with = "3"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tracing = "0.1"
|
||||
eyre = "0.6"
|
||||
bincode_v1 = { version = "1.3", package = "bincode"}
|
||||
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
|
||||
"loader_halo2",
|
||||
"halo2-axiom",
|
||||
"display",
|
||||
] }
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
#TODO: upgrade
|
||||
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
|
||||
|
||||
scroll-zkvm-circuit-input-types = { path = "common/types-rs"}
|
||||
scroll-zkvm-verifier = { path = "zkvm-prover/verifier"}
|
||||
scroll-zkvm-prover = { path = "zkvm-prover/prover"}
|
||||
|
||||
[patch.crates-io]
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
|
||||
revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
codegen-units = 1
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
L2GETH_TAG=scroll-v5.9.7
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -28,7 +28,7 @@ We welcome community contributions to this repository. Before you submit any iss
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.21
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
|
||||
@@ -10,15 +10,18 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251113125950-906b730d541d
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
// Hotfix for header hash incompatibility issue.
|
||||
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
|
||||
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
@@ -30,10 +33,10 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/consensys/bavard v0.1.27 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
@@ -41,7 +44,7 @@ require (
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -98,7 +101,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
@@ -110,7 +113,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
||||
@@ -53,16 +53,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
|
||||
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -88,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -214,8 +214,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -341,10 +341,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
@@ -387,8 +387,8 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
||||
@@ -38,6 +38,7 @@ type FetcherConfig struct {
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
AwsS3Endpoint string `json:"AwsS3Endpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -39,6 +39,9 @@ type L1MessageFetcher struct {
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.AwsS3Endpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
|
||||
}
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
|
||||
@@ -361,7 +361,6 @@ func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepos
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
|
||||
total, err := h.redis.ZCard(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zcard result", "error", err)
|
||||
@@ -372,6 +371,10 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
if start >= total {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
@@ -450,5 +453,6 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
|
||||
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -252,6 +253,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
|
||||
// Cache for the previous transaction to avoid duplicate fetches
|
||||
var lastTxHash common.Hash
|
||||
var lastTx *types.Transaction
|
||||
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
@@ -261,11 +267,28 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
log.Error("Failed to unpack CommitBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
|
||||
// Get transaction, reuse if it's the same as previous
|
||||
var commitTx *types.Transaction
|
||||
if lastTxHash == vlog.TxHash && lastTx != nil {
|
||||
commitTx = lastTx
|
||||
} else {
|
||||
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
|
||||
|
||||
// Create 10-second timeout context for transaction fetch
|
||||
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
|
||||
txCancel()
|
||||
|
||||
if err != nil || isPending {
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
commitTx = fetchedTx
|
||||
lastTxHash = vlog.TxHash
|
||||
lastTx = commitTx
|
||||
}
|
||||
|
||||
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
@@ -305,7 +328,13 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
|
||||
|
||||
// Create 20-second timeout context for blob processing
|
||||
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
blobCancel()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
|
||||
|
||||
@@ -154,10 +154,10 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
db = db.Limit(10000)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
[patch."https://github.com/scroll-tech/scroll.git"]
|
||||
scroll-zkvm-circuit-input-types-base = { path = "../common/types-rs/base"}
|
||||
scroll-zkvm-circuit-input-types-aggregation = { path = "../common/types-rs/aggregation"}
|
||||
scroll-zkvm-circuit-input-types-chunk = { path = "../common/types-rs/chunk"}
|
||||
scroll-zkvm-circuit-input-types-batch = { path = "../common/types-rs/batch"}
|
||||
scroll-zkvm-circuit-input-types-bundle = { path = "../common/types-rs/bundle"}
|
||||
16
build/common.mk
Normal file
16
build/common.mk
Normal file
@@ -0,0 +1,16 @@
|
||||
UNAME_S := $(shell uname -s)
|
||||
IS_DARWIN := $(findstring Darwin,$(UNAME_S))
|
||||
|
||||
SHLIB_EXT := so
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
SHLIB_EXT := dylib
|
||||
endif
|
||||
|
||||
LIB_ZKP_NAME := libzkp.$(SHLIB_EXT)
|
||||
|
||||
define macos_codesign
|
||||
@if [ -n "$(IS_DARWIN)" ]; then \
|
||||
codesign --force --sign - '$(1)'; \
|
||||
codesign --verify --deep --verbose '$(1)'; \
|
||||
fi
|
||||
endef
|
||||
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build blob_uploader
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
|
||||
|
||||
# Pull blob_uploader into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/blob_uploader /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["blob_uploader"]
|
||||
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,26 +1,25 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
COPY ./common/libzkp/impl/ .
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY ./rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
RUN cargo build --release
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY .git .git
|
||||
RUN cargo build --release -p libzkp-c
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -35,9 +34,9 @@ RUN go mod download -x
|
||||
# Build coordinator
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
|
||||
RUN mv coordinator/internal/logic/libzkp/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,24 +0,0 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
@@ -1,2 +0,0 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
OPENVM_GPU_COMMIT=dfa10b4
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout openvm-gpu
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -18,6 +18,6 @@ RUN cd /src/zkvm-prover && make prover
|
||||
|
||||
FROM ubuntu:24.04 AS runtime
|
||||
|
||||
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
|
||||
COPY --from=builder /src/target/release/prover /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["prover"]
|
||||
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["rollup_relayer"]
|
||||
@@ -0,0 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
3
common/.gitignore
vendored
3
common/.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
/build/bin
|
||||
.idea
|
||||
libzkp/impl/target
|
||||
libzkp/interface/*.a
|
||||
libzkp
|
||||
@@ -4,5 +4,4 @@ test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
|
||||
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
}
|
||||
|
||||
// InitDB init the db handler
|
||||
|
||||
@@ -15,7 +15,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -64,7 +64,7 @@ require (
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
@@ -79,7 +79,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/da-codec v0.9.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -198,7 +198,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
|
||||
@@ -155,8 +155,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -214,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -707,8 +707,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
|
||||
7197
common/libzkp/impl/Cargo.lock
generated
7197
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "zkp"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
scroll-zkvm-prover.workspace = true
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
|
||||
env_logger = "0.11.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
base64.workspace = true
|
||||
once_cell.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive = "1.0"
|
||||
serde_json.workspace = true
|
||||
anyhow = "1"
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
@@ -1,11 +0,0 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
|
||||
build:
|
||||
@cargo build --release
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
@cargo check --all-features
|
||||
@cargo clippy --release -- -D warnings
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2024-12-06
|
||||
@@ -1,76 +0,0 @@
|
||||
mod utils;
|
||||
mod verifier;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec};
|
||||
use libc::c_char;
|
||||
use verifier::{TaskType, VerifierConfig};
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init(config: *const c_char) {
|
||||
let config_str = c_char_to_str(config);
|
||||
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
|
||||
verifier::init(verifier_config);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_chunk_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Chunk)
|
||||
}
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Err(e) = verifier {
|
||||
log::warn!("failed to get verifier, error: {:#}", e);
|
||||
return 0 as c_char;
|
||||
}
|
||||
match verifier.unwrap().verify(task_type, proof) {
|
||||
Err(e) => {
|
||||
log::error!("{:?} verify failed, error: {:#}", task_type, e);
|
||||
false as c_char
|
||||
}
|
||||
Ok(result) => result as c_char,
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Batch)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_bundle_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Bundle)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
_dump_vk(fork_name, file);
|
||||
}
|
||||
|
||||
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Ok(verifier) = verifier {
|
||||
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
use std::{
|
||||
ffi::CStr,
|
||||
os::raw::c_char,
|
||||
panic::{catch_unwind, AssertUnwindSafe},
|
||||
};
|
||||
|
||||
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
|
||||
let cstr = unsafe { CStr::from_ptr(c) };
|
||||
cstr.to_str().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
|
||||
let cstr = unsafe { CStr::from_ptr(c) };
|
||||
cstr.to_bytes().to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
|
||||
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
|
||||
if let Some(s) = err.downcast_ref::<String>() {
|
||||
s.to_string()
|
||||
} else if let Some(s) = err.downcast_ref::<&str>() {
|
||||
s.to_string()
|
||||
} else {
|
||||
format!("unable to get panic info {err:?}")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclid;
|
||||
mod euclidv2;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use euclid::EuclidVerifier;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
Chunk,
|
||||
Batch,
|
||||
Bundle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VKDump {
|
||||
pub chunk_vk: String,
|
||||
pub batch_vk: String,
|
||||
pub bundle_vk: String,
|
||||
}
|
||||
|
||||
pub trait ProofVerifier {
|
||||
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
|
||||
fn dump_vk(&self, file: &Path);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifierConfig {
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
|
||||
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_LOW
|
||||
.set(VerifierPair(
|
||||
"euclid".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
"euclidV2".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
|
||||
unsafe {
|
||||
if let Some(verifier) = VERIFIER_LOW.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(verifier) = VERIFIER_HIGH.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
bail!("failed to get verifier, key not found, {}", fork_name)
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidVerifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV1,
|
||||
}
|
||||
|
||||
impl EuclidVerifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidVerifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV2,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
// BatchVerifier is used to:
|
||||
// - Verify a batch proof
|
||||
// - Verify a bundle proof
|
||||
void init(char* config);
|
||||
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_bundle_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
void dump_vk(char* fork_name, char* file);
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/compose"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
@@ -220,11 +221,21 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
|
||||
rpcCli, err := t.GetL2Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.NewClient(rpcCli), nil
|
||||
}
|
||||
|
||||
// GetL2GethClient returns a rpc client by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2Client() (*rpc.Client, error) {
|
||||
endpoint, err := t.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
client, err := rpc.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
common/testdata/blobdata.json
vendored
Normal file
4
common/testdata/blobdata.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
types-base = { path = "base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
types-chunk = { path = "chunk", package = "scroll-zkvm-circuit-input-types-chunk"}
|
||||
types-batch = { path = "batch", package = "scroll-zkvm-circuit-input-types-batch"}
|
||||
types-bundle = { path = "bundle", package = "scroll-zkvm-circuit-input-types-bundle"}
|
||||
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Input Types for circuits
|
||||
|
||||
A series of separated crates for the input types accepted by circuits as input.
|
||||
|
||||
This crate help decoupling circuits with other crates and keep their dependencies neat and controllable. Avoiding to involve crates which is not compatible with the tootlchain of openvm from indirect dependency.
|
||||
|
||||
### Code structure
|
||||
```
|
||||
types-rs
|
||||
│
|
||||
├── base
|
||||
│
|
||||
├── circuit
|
||||
│
|
||||
├── aggregation
|
||||
│
|
||||
<following are layer-oriented crates>
|
||||
│
|
||||
├── chunk
|
||||
│
|
||||
├── batch
|
||||
│
|
||||
└── bundle
|
||||
```
|
||||
@@ -1,14 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-aggregation"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
/// Represents an openvm program commitments and public values.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct AggregationInput {
|
||||
/// Public values.
|
||||
pub public_values: Vec<u32>,
|
||||
/// Represent the commitment needed to verify a root proof
|
||||
pub commitment: ProgramCommitment,
|
||||
}
|
||||
|
||||
/// Represent the commitment needed to verify a [`RootProof`].
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ProgramCommitment {
|
||||
/// The commitment to the child program exe.
|
||||
pub exe: [u32; 8],
|
||||
/// The commitment to the child program leaf.
|
||||
pub leaf: [u32; 8],
|
||||
}
|
||||
|
||||
impl ProgramCommitment {
|
||||
pub fn deserialize(commitment_bytes: &[u8]) -> Self {
|
||||
// TODO: temporary skip deserialize if no vk is provided
|
||||
if commitment_bytes.is_empty() {
|
||||
return Default::default();
|
||||
}
|
||||
|
||||
let archived_data =
|
||||
rkyv::access::<ArchivedProgramCommitment, rkyv::rancor::BoxedError>(commitment_bytes)
|
||||
.unwrap();
|
||||
|
||||
Self {
|
||||
exe: archived_data.exe.map(|u32_le| u32_le.to_native()),
|
||||
leaf: archived_data.leaf.map(|u32_le| u32_le.to_native()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
rkyv::to_bytes::<rkyv::rancor::BoxedError>(self)
|
||||
.map(|v| v.to_vec())
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedProgramCommitment> for ProgramCommitment {
|
||||
fn from(archived: &ArchivedProgramCommitment) -> Self {
|
||||
Self {
|
||||
exe: archived.exe.map(|u32_le| u32_le.to_native()),
|
||||
leaf: archived.leaf.map(|u32_le| u32_le.to_native()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of public-input values, i.e. [u32; N].
|
||||
///
|
||||
/// Note that the actual value for each u32 is a byte.
|
||||
pub const NUM_PUBLIC_VALUES: usize = 32;
|
||||
|
||||
/// Witness for an [`AggregationCircuit`][AggCircuit] that also carries proofs that are being
|
||||
/// aggregated.
|
||||
pub trait ProofCarryingWitness {
|
||||
/// Get the root proofs from the witness.
|
||||
fn get_proofs(&self) -> Vec<AggregationInput>;
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-base"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
alloy-serde.workspace = true
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
tiny-keccak = { workspace = true }
|
||||
sha3 = "0.10.8"
|
||||
sha2 = "0.10.8"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -1,2 +0,0 @@
|
||||
pub mod public_inputs;
|
||||
pub mod utils;
|
||||
@@ -1,81 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
pub mod batch;
|
||||
pub mod bundle;
|
||||
pub mod chunk;
|
||||
|
||||
/// Defines behaviour to be implemented by types representing the public-input values of a circuit.
|
||||
pub trait PublicInputs {
|
||||
/// Keccak-256 digest of the public inputs. The public-input hash are revealed as public values
|
||||
/// via [`openvm::io::reveal`].
|
||||
fn pi_hash(&self) -> B256;
|
||||
|
||||
/// Validation logic between public inputs of two contiguous instances.
|
||||
fn validate(&self, prev_pi: &Self);
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Default,
|
||||
Debug,
|
||||
Copy,
|
||||
Clone,
|
||||
PartialEq,
|
||||
Eq,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub enum ForkName {
|
||||
#[default]
|
||||
EuclidV1,
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl From<&ArchivedForkName> for ForkName {
|
||||
fn from(archived: &ArchivedForkName) -> Self {
|
||||
match archived {
|
||||
ArchivedForkName::EuclidV1 => ForkName::EuclidV1,
|
||||
ArchivedForkName::EuclidV2 => ForkName::EuclidV2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<&str>> for ForkName {
|
||||
fn from(value: Option<&str>) -> Self {
|
||||
match value {
|
||||
None => Default::default(),
|
||||
Some("euclidv1") => ForkName::EuclidV1,
|
||||
Some("euclidv2") => ForkName::EuclidV2,
|
||||
Some(s) => unreachable!("hardfork not accepted: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ForkName {
|
||||
fn from(value: &str) -> Self {
|
||||
match value {
|
||||
"euclidv1" => ForkName::EuclidV1,
|
||||
"euclidv2" => ForkName::EuclidV2,
|
||||
s => unreachable!("hardfork not accepted: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// helper trait to extend PublicInputs
|
||||
pub trait MultiVersionPublicInputs {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256;
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName);
|
||||
}
|
||||
|
||||
impl<T: MultiVersionPublicInputs> PublicInputs for (T, ForkName) {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_by_fork(self.1)
|
||||
}
|
||||
|
||||
fn validate(&self, prev_pi: &Self) {
|
||||
assert_eq!(self.1, prev_pi.1);
|
||||
self.0.validate(&prev_pi.0, self.1)
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Represents public-input values for a batch.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchInfo {
|
||||
/// The state root before applying the batch.
|
||||
#[rkyv()]
|
||||
pub parent_state_root: B256,
|
||||
/// The batch hash of the parent batch.
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The state root after applying txs in the batch.
|
||||
#[rkyv()]
|
||||
pub state_root: B256,
|
||||
/// The batch header hash of the batch.
|
||||
#[rkyv()]
|
||||
pub batch_hash: B256,
|
||||
/// The EIP-155 chain ID of all txs in the batch.
|
||||
#[rkyv()]
|
||||
pub chain_id: u64,
|
||||
/// The withdraw root of the last block in the last chunk in the batch.
|
||||
#[rkyv()]
|
||||
pub withdraw_root: B256,
|
||||
/// The L1 msg queue hash at the end of the previous batch.
|
||||
#[rkyv()]
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The L1 msg queue hash at the end of the current batch.
|
||||
#[rkyv()]
|
||||
pub post_msg_queue_hash: B256,
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchInfo> for BatchInfo {
|
||||
fn from(archived: &ArchivedBatchInfo) -> Self {
|
||||
Self {
|
||||
parent_state_root: archived.parent_state_root.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
state_root: archived.state_root.into(),
|
||||
batch_hash: archived.batch_hash.into(),
|
||||
chain_id: archived.chain_id.into(),
|
||||
withdraw_root: archived.withdraw_root.into(),
|
||||
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchInfo {
|
||||
/// Public input hash for a batch (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// parent state root ||
|
||||
/// parent batch hash ||
|
||||
/// state root ||
|
||||
/// batch hash ||
|
||||
/// chain id ||
|
||||
/// withdraw root ||
|
||||
/// )
|
||||
fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.parent_state_root.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a batch (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// parent state root ||
|
||||
/// parent batch hash ||
|
||||
/// state root ||
|
||||
/// batch hash ||
|
||||
/// chain id ||
|
||||
/// withdraw root ||
|
||||
/// prev msg queue hash ||
|
||||
/// post msg queue hash
|
||||
/// )
|
||||
fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.parent_state_root.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.prev_msg_queue_hash.as_slice())
|
||||
.chain(self.post_msg_queue_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type VersionedBatchInfo = (BatchInfo, ForkName);
|
||||
|
||||
impl MultiVersionPublicInputs for BatchInfo {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate public inputs between 2 contiguous batches.
|
||||
///
|
||||
/// - chain id MUST match
|
||||
/// - state roots MUST be chained
|
||||
/// - batch hashes MUST be chained
|
||||
/// - L1 msg queue hashes MUST be chained
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
|
||||
assert_eq!(self.chain_id, prev_pi.chain_id);
|
||||
assert_eq!(self.parent_state_root, prev_pi.state_root);
|
||||
assert_eq!(self.parent_batch_hash, prev_pi.batch_hash);
|
||||
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
|
||||
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs, PublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Represents fields required to compute the public-inputs digest of a bundle.
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct BundleInfo {
|
||||
/// The EIP-155 chain ID of all txs in the bundle.
|
||||
pub chain_id: u64,
|
||||
/// The L1 msg queue hash at the end of the last batch in the bundle.
|
||||
/// Not a phase 1 field so we make it omitable
|
||||
#[serde(default)]
|
||||
pub msg_queue_hash: B256,
|
||||
/// The number of batches bundled together in the bundle.
|
||||
pub num_batches: u32,
|
||||
/// The last finalized on-chain state root.
|
||||
pub prev_state_root: B256,
|
||||
/// The last finalized on-chain batch hash.
|
||||
pub prev_batch_hash: B256,
|
||||
/// The state root after applying every batch in the bundle.
|
||||
///
|
||||
/// Upon verification of the EVM-verifiable bundle proof, this state root will be finalized
|
||||
/// on-chain.
|
||||
pub post_state_root: B256,
|
||||
/// The batch hash of the last batch in the bundle.
|
||||
///
|
||||
/// Upon verification of the EVM-verifiable bundle proof, this batch hash will be finalized
|
||||
/// on-chain.
|
||||
pub batch_hash: B256,
|
||||
/// The withdrawals root at the last block in the last chunk in the last batch in the bundle.
|
||||
pub withdraw_root: B256,
|
||||
}
|
||||
|
||||
impl BundleInfo {
|
||||
/// Public input hash for a bundle (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// num batches ||
|
||||
/// prev state root ||
|
||||
/// prev batch hash ||
|
||||
/// post state root ||
|
||||
/// batch hash ||
|
||||
/// withdraw root
|
||||
/// )
|
||||
pub fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.num_batches.to_be_bytes().as_slice())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.prev_batch_hash.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a bundle (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// msg_queue_hash ||
|
||||
/// num batches ||
|
||||
/// prev state root ||
|
||||
/// prev batch hash ||
|
||||
/// post state root ||
|
||||
/// batch hash ||
|
||||
/// withdraw root
|
||||
/// )
|
||||
pub fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.msg_queue_hash.as_slice())
|
||||
.chain(self.num_batches.to_be_bytes().as_slice())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.prev_batch_hash.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn pi_hash(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiVersionPublicInputs for BundleInfo {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self, _fork_name: ForkName) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BundleInfoV1(pub BundleInfo);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BundleInfoV2(pub BundleInfo);
|
||||
|
||||
impl From<BundleInfo> for BundleInfoV1 {
|
||||
fn from(value: BundleInfo) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BundleInfo> for BundleInfoV2 {
|
||||
fn from(value: BundleInfo) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInputs for BundleInfoV1 {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_euclidv1()
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInputs for BundleInfoV2 {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_euclidv2()
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
use alloy_primitives::{B256, U256};
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Number of bytes used to serialise [`BlockContextV2`].
|
||||
pub const SIZE_BLOCK_CTX: usize = 52;
|
||||
|
||||
/// Represents the version 2 of block context.
|
||||
///
|
||||
/// The difference between v2 and v1 is that the block number field has been removed since v2.
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
PartialEq,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BlockContextV2 {
|
||||
/// The timestamp of the block.
|
||||
pub timestamp: u64,
|
||||
/// The base fee of the block.
|
||||
pub base_fee: U256,
|
||||
/// The gas limit of the block.
|
||||
pub gas_limit: u64,
|
||||
/// The number of transactions in the block, including both L1 msg txs as well as L2 txs.
|
||||
pub num_txs: u16,
|
||||
/// The number of L1 msg txs in the block.
|
||||
pub num_l1_msgs: u16,
|
||||
}
|
||||
|
||||
impl From<&ArchivedBlockContextV2> for BlockContextV2 {
|
||||
fn from(archived: &ArchivedBlockContextV2) -> Self {
|
||||
Self {
|
||||
timestamp: archived.timestamp.into(),
|
||||
base_fee: archived.base_fee.into(),
|
||||
gas_limit: archived.gas_limit.into(),
|
||||
num_txs: archived.num_txs.into(),
|
||||
num_l1_msgs: archived.num_l1_msgs.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&[u8]> for BlockContextV2 {
|
||||
fn from(bytes: &[u8]) -> Self {
|
||||
assert_eq!(bytes.len(), SIZE_BLOCK_CTX);
|
||||
|
||||
let timestamp = u64::from_be_bytes(bytes[0..8].try_into().expect("should not fail"));
|
||||
let base_fee = U256::from_be_slice(&bytes[8..40]);
|
||||
let gas_limit = u64::from_be_bytes(bytes[40..48].try_into().expect("should not fail"));
|
||||
let num_txs = u16::from_be_bytes(bytes[48..50].try_into().expect("should not fail"));
|
||||
let num_l1_msgs = u16::from_be_bytes(bytes[50..52].try_into().expect("should not fail"));
|
||||
|
||||
Self {
|
||||
timestamp,
|
||||
base_fee,
|
||||
gas_limit,
|
||||
num_txs,
|
||||
num_l1_msgs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockContextV2 {
|
||||
/// Serialize the block context in packed form.
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
std::iter::empty()
|
||||
.chain(self.timestamp.to_be_bytes())
|
||||
.chain(self.base_fee.to_be_bytes::<32>())
|
||||
.chain(self.gas_limit.to_be_bytes())
|
||||
.chain(self.num_txs.to_be_bytes())
|
||||
.chain(self.num_l1_msgs.to_be_bytes())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents header-like information for the chunk.
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ChunkInfo {
|
||||
/// The EIP-155 chain ID for all txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub chain_id: u64,
|
||||
/// The state root before applying the chunk.
|
||||
#[rkyv()]
|
||||
pub prev_state_root: B256,
|
||||
/// The state root after applying the chunk.
|
||||
#[rkyv()]
|
||||
pub post_state_root: B256,
|
||||
/// The withdrawals root after applying the chunk.
|
||||
#[rkyv()]
|
||||
pub withdraw_root: B256,
|
||||
/// Digest of L1 message txs force included in the chunk.
|
||||
/// It is a legacy field and can be omitted in new defination
|
||||
#[rkyv()]
|
||||
#[serde(default)]
|
||||
pub data_hash: B256,
|
||||
/// Digest of L2 tx data flattened over all L2 txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub tx_data_digest: B256,
|
||||
/// The L1 msg queue hash at the end of the previous chunk.
|
||||
#[rkyv()]
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The L1 msg queue hash at the end of the current chunk.
|
||||
#[rkyv()]
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// The length of rlp encoded L2 tx bytes flattened over all L2 txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub tx_data_length: u64,
|
||||
/// The block number of the first block in the chunk.
|
||||
#[rkyv()]
|
||||
pub initial_block_number: u64,
|
||||
/// The block contexts of the blocks in the chunk.
|
||||
#[rkyv()]
|
||||
pub block_ctxs: Vec<BlockContextV2>,
|
||||
}
|
||||
|
||||
impl ChunkInfo {
|
||||
/// Public input hash for a given chunk (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// prev state root ||
|
||||
/// post state root ||
|
||||
/// withdraw root ||
|
||||
/// chunk data hash ||
|
||||
/// tx data hash
|
||||
/// )
|
||||
pub fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(&self.chain_id.to_be_bytes())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.data_hash.as_slice())
|
||||
.chain(self.tx_data_digest.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a given chunk (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// prev state root ||
|
||||
/// post state root ||
|
||||
/// withdraw root ||
|
||||
/// tx data digest ||
|
||||
/// prev msg queue hash ||
|
||||
/// post msg queue hash ||
|
||||
/// initial block number ||
|
||||
/// block_ctx for block_ctx in block_ctxs
|
||||
/// )
|
||||
pub fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(&self.chain_id.to_be_bytes())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.tx_data_digest.as_slice())
|
||||
.chain(self.prev_msg_queue_hash.as_slice())
|
||||
.chain(self.post_msg_queue_hash.as_slice())
|
||||
.chain(&self.initial_block_number.to_be_bytes())
|
||||
.chain(
|
||||
self.block_ctxs
|
||||
.iter()
|
||||
.flat_map(|block_ctx| block_ctx.to_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
.as_slice(),
|
||||
)
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedChunkInfo> for ChunkInfo {
|
||||
fn from(archived: &ArchivedChunkInfo) -> Self {
|
||||
Self {
|
||||
chain_id: archived.chain_id.into(),
|
||||
prev_state_root: archived.prev_state_root.into(),
|
||||
post_state_root: archived.post_state_root.into(),
|
||||
withdraw_root: archived.withdraw_root.into(),
|
||||
data_hash: archived.data_hash.into(),
|
||||
tx_data_digest: archived.tx_data_digest.into(),
|
||||
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
|
||||
tx_data_length: archived.tx_data_length.into(),
|
||||
initial_block_number: archived.initial_block_number.into(),
|
||||
block_ctxs: archived
|
||||
.block_ctxs
|
||||
.iter()
|
||||
.map(BlockContextV2::from)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type VersionedChunkInfo = (ChunkInfo, ForkName);
|
||||
|
||||
impl MultiVersionPublicInputs for ChunkInfo {
|
||||
/// Compute the public input hash for the chunk.
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => {
|
||||
assert_ne!(self.data_hash, B256::ZERO, "v6 must has valid data hash");
|
||||
self.pi_hash_euclidv1()
|
||||
}
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate public inputs between 2 contiguous chunks.
|
||||
///
|
||||
/// - chain id MUST match
|
||||
/// - state roots MUST be chained
|
||||
/// - L1 msg queue hash MUST be chained
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
|
||||
assert_eq!(self.chain_id, prev_pi.chain_id);
|
||||
assert_eq!(self.prev_state_root, prev_pi.post_state_root);
|
||||
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
|
||||
|
||||
// message queue hash is used only after euclidv2 (da-codec@v7)
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
/// From the utility of ether-rs
|
||||
///
|
||||
/// Computes the Keccak-256 hash of input bytes.
|
||||
///
|
||||
/// Note that strings are interpreted as UTF-8 bytes,
|
||||
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
let mut output = [0u8; 32];
|
||||
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(bytes.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
B256::from(output)
|
||||
}
|
||||
|
||||
pub fn keccak256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
use sha3::{Digest, Keccak256};
|
||||
let mut output = [0u8; 32];
|
||||
let mut hasher = Keccak256::new();
|
||||
hasher.update(bytes.as_ref());
|
||||
output.copy_from_slice(hasher.finalize().as_ref());
|
||||
B256::from(output)
|
||||
}
|
||||
|
||||
pub fn sha256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut output = [0u8; 32];
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(bytes.as_ref());
|
||||
output.copy_from_slice(hasher.finalize().as_ref());
|
||||
B256::from(output)
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
mod hash;
|
||||
pub use hash::{keccak256, keccak256_rv32, sha256_rv32};
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-batch"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
vm-zstd = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -1,30 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
pub mod v6;
|
||||
|
||||
pub mod v7;
|
||||
|
||||
pub trait BatchHeader {
|
||||
/// The DA-codec version for the batch header.
|
||||
fn version(&self) -> u8;
|
||||
|
||||
/// The incremental index of the batch.
|
||||
fn index(&self) -> u64;
|
||||
|
||||
/// The batch header digest of the parent batch.
|
||||
fn parent_batch_hash(&self) -> B256;
|
||||
|
||||
/// The batch header digest.
|
||||
fn batch_hash(&self) -> B256;
|
||||
}
|
||||
|
||||
/// Reference header indicate the version of batch header base on which batch hash
|
||||
/// should be calculated.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub enum ReferenceHeader {
|
||||
/// Represents DA-codec v6.
|
||||
V6(v6::BatchHeaderV6),
|
||||
/// Represents DA-codec v7.
|
||||
V7(v7::BatchHeaderV7),
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
use super::BatchHeader;
|
||||
use alloy_primitives::B256;
|
||||
use types_base::utils::keccak256;
|
||||
|
||||
/// Represents the header summarising the batch of chunks as per DA-codec v6.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchHeaderV6 {
|
||||
/// The DA-codec version for the batch.
|
||||
#[rkyv()]
|
||||
pub version: u8,
|
||||
/// The index of the batch
|
||||
#[rkyv()]
|
||||
pub batch_index: u64,
|
||||
/// Number of L1 messages popped in the batch
|
||||
#[rkyv()]
|
||||
pub l1_message_popped: u64,
|
||||
/// Number of total L1 messages popped after the batch
|
||||
#[rkyv()]
|
||||
pub total_l1_message_popped: u64,
|
||||
/// The parent batch hash
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The timestamp of the last block in this batch
|
||||
#[rkyv()]
|
||||
pub last_block_timestamp: u64,
|
||||
/// The data hash of the batch
|
||||
#[rkyv()]
|
||||
pub data_hash: B256,
|
||||
/// The versioned hash of the blob with this batch's data
|
||||
#[rkyv()]
|
||||
pub blob_versioned_hash: B256,
|
||||
/// The blob data proof: z (32), y (32)
|
||||
#[rkyv()]
|
||||
pub blob_data_proof: [B256; 2],
|
||||
}
|
||||
|
||||
impl BatchHeader for BatchHeaderV6 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash
|
||||
}
|
||||
|
||||
/// Batch hash as per DA-codec v6:
|
||||
///
|
||||
/// keccak(
|
||||
/// version ||
|
||||
/// batch index ||
|
||||
/// l1 message popped ||
|
||||
/// total l1 message popped ||
|
||||
/// batch data hash ||
|
||||
/// versioned hash ||
|
||||
/// parent batch hash ||
|
||||
/// last block timestamp ||
|
||||
/// z ||
|
||||
/// y
|
||||
/// )
|
||||
fn batch_hash(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(self.batch_index.to_be_bytes().as_slice())
|
||||
.chain(self.l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(self.total_l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(self.data_hash.as_slice())
|
||||
.chain(self.blob_versioned_hash.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.last_block_timestamp.to_be_bytes().as_slice())
|
||||
.chain(self.blob_data_proof[0].as_slice())
|
||||
.chain(self.blob_data_proof[1].as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchHeader for ArchivedBatchHeaderV6 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index.into()
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash.into()
|
||||
}
|
||||
|
||||
fn batch_hash(&self) -> B256 {
|
||||
let batch_index: u64 = self.batch_index.into();
|
||||
let l1_message_popped: u64 = self.l1_message_popped.into();
|
||||
let total_l1_message_popped: u64 = self.total_l1_message_popped.into();
|
||||
let data_hash: B256 = self.data_hash.into();
|
||||
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
|
||||
let parent_batch_hash: B256 = self.parent_batch_hash.into();
|
||||
let last_block_timestamp: u64 = self.last_block_timestamp.into();
|
||||
let blob_data_proof: [B256; 2] = self.blob_data_proof.map(|h| h.into());
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(batch_index.to_be_bytes().as_slice())
|
||||
.chain(l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(total_l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(data_hash.as_slice())
|
||||
.chain(blob_versioned_hash.as_slice())
|
||||
.chain(parent_batch_hash.as_slice())
|
||||
.chain(last_block_timestamp.to_be_bytes().as_slice())
|
||||
.chain(blob_data_proof[0].as_slice())
|
||||
.chain(blob_data_proof[1].as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchHeaderV6> for BatchHeaderV6 {
|
||||
fn from(archived: &ArchivedBatchHeaderV6) -> Self {
|
||||
Self {
|
||||
version: archived.version,
|
||||
batch_index: archived.batch_index.into(),
|
||||
l1_message_popped: archived.l1_message_popped.into(),
|
||||
total_l1_message_popped: archived.total_l1_message_popped.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
last_block_timestamp: archived.last_block_timestamp.into(),
|
||||
data_hash: archived.data_hash.into(),
|
||||
blob_versioned_hash: archived.blob_versioned_hash.into(),
|
||||
blob_data_proof: [
|
||||
archived.blob_data_proof[0].into(),
|
||||
archived.blob_data_proof[1].into(),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use super::BatchHeader;
|
||||
use types_base::utils::keccak256;
|
||||
|
||||
/// Represents the header summarising the batch of chunks as per DA-codec v7.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchHeaderV7 {
|
||||
/// The DA-codec version for the batch.
|
||||
#[rkyv()]
|
||||
pub version: u8,
|
||||
/// The index of the batch
|
||||
#[rkyv()]
|
||||
pub batch_index: u64,
|
||||
/// The parent batch hash
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The versioned hash of the blob with this batch's data
|
||||
#[rkyv()]
|
||||
pub blob_versioned_hash: B256,
|
||||
}
|
||||
|
||||
impl BatchHeader for BatchHeaderV7 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash
|
||||
}
|
||||
|
||||
/// Batch hash as per DA-codec v7:
|
||||
///
|
||||
/// keccak(
|
||||
/// version ||
|
||||
/// batch index ||
|
||||
/// versioned hash ||
|
||||
/// parent batch hash
|
||||
/// )
|
||||
fn batch_hash(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(self.batch_index.to_be_bytes().as_slice())
|
||||
.chain(self.blob_versioned_hash.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchHeader for ArchivedBatchHeaderV7 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index.into()
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash.into()
|
||||
}
|
||||
|
||||
fn batch_hash(&self) -> B256 {
|
||||
let batch_index: u64 = self.batch_index.into();
|
||||
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
|
||||
let parent_batch_hash: B256 = self.parent_batch_hash.into();
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(batch_index.to_be_bytes().as_slice())
|
||||
.chain(blob_versioned_hash.as_slice())
|
||||
.chain(parent_batch_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchHeaderV7> for BatchHeaderV7 {
|
||||
fn from(archived: &ArchivedBatchHeaderV7) -> Self {
|
||||
Self {
|
||||
version: archived.version,
|
||||
batch_index: archived.batch_index.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
blob_versioned_hash: archived.blob_versioned_hash.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
mod header;
|
||||
pub use header::{
|
||||
ArchivedReferenceHeader, BatchHeader, ReferenceHeader,
|
||||
v6::{ArchivedBatchHeaderV6, BatchHeaderV6},
|
||||
v7::{ArchivedBatchHeaderV7, BatchHeaderV7},
|
||||
};
|
||||
|
||||
mod payload;
|
||||
pub use payload::{
|
||||
v6::{EnvelopeV6, PayloadV6},
|
||||
v7::{EnvelopeV7, PayloadV7},
|
||||
};
|
||||
|
||||
pub use payload::{BLOB_WIDTH, N_BLOB_BYTES, N_DATA_BYTES_PER_COEFFICIENT};
|
||||
|
||||
mod witness;
|
||||
pub use witness::{ArchivedBatchWitness, BatchWitness, Bytes48, PointEvalWitness};
|
||||
@@ -1,15 +0,0 @@
|
||||
pub mod v6;
|
||||
pub mod v7;
|
||||
|
||||
/// The number data bytes we pack each BLS12-381 scalar into. The most-significant byte is 0.
|
||||
pub const N_DATA_BYTES_PER_COEFFICIENT: usize = 31;
|
||||
|
||||
/// The number of BLS12-381 scalar fields that effectively represent an EIP-4844 blob.
|
||||
pub const BLOB_WIDTH: usize = 4096;
|
||||
|
||||
/// The effective (reduced) number of bytes we can use within a blob.
|
||||
///
|
||||
/// EIP-4844 requires that each 32-bytes chunk of bytes represent a BLS12-381 scalar field element
|
||||
/// in its canonical form. As a result, we set the most-significant byte in each such chunk to 0.
|
||||
/// This allows us to use only up to 31 bytes in each such chunk, hence the reduced capacity.
|
||||
pub const N_BLOB_BYTES: usize = BLOB_WIDTH * N_DATA_BYTES_PER_COEFFICIENT;
|
||||
@@ -1,212 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::BatchHeaderV6;
|
||||
use types_base::{public_inputs::chunk::ChunkInfo, utils::keccak256};
|
||||
|
||||
/// The default max chunks for v6 payload
|
||||
pub const N_MAX_CHUNKS: usize = 45;
|
||||
|
||||
/// The number of bytes to encode number of chunks in a batch.
|
||||
const N_BYTES_NUM_CHUNKS: usize = 2;
|
||||
|
||||
/// The number of rows to encode chunk size (u32).
|
||||
const N_BYTES_CHUNK_SIZE: usize = 4;
|
||||
|
||||
impl From<&[u8]> for EnvelopeV6 {
|
||||
fn from(blob_bytes: &[u8]) -> Self {
|
||||
let is_encoded = blob_bytes[0] & 1 == 1;
|
||||
Self {
|
||||
is_encoded,
|
||||
envelope_bytes: if blob_bytes[0] & 1 == 1 {
|
||||
vm_zstd::process(&blob_bytes[1..]).unwrap().decoded_data
|
||||
} else {
|
||||
Vec::from(&blob_bytes[1..])
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvelopeV6 {
|
||||
/// The original envelope bytes supplied.
|
||||
///
|
||||
/// Caching just for re-use later in challenge digest computation.
|
||||
pub envelope_bytes: Vec<u8>,
|
||||
/// If the enveloped bytes is encoded (compressed) in envelop
|
||||
pub is_encoded: bool,
|
||||
}
|
||||
|
||||
impl EnvelopeV6 {
|
||||
/// Parse payload bytes and obtain challenge digest
|
||||
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
let payload = Payload::from(self);
|
||||
payload.get_challenge_digest(versioned_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&EnvelopeV6> for Payload {
|
||||
fn from(envelope: &EnvelopeV6) -> Self {
|
||||
Self::from_payload(&envelope.envelope_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload that describes a batch.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Payload {
|
||||
/// Metadata that encodes the sizes of every chunk in the batch.
|
||||
pub metadata_digest: B256,
|
||||
/// The Keccak digests of transaction bytes for every chunk in the batch.
|
||||
///
|
||||
/// The `chunk_data_digest` is a part of the chunk-circuit's public input and hence used to
|
||||
/// verify that the transaction bytes included in the chunk-circuit indeed match the
|
||||
/// transaction bytes made available in the batch.
|
||||
pub chunk_data_digests: Vec<B256>,
|
||||
}
|
||||
|
||||
pub type PayloadV6 = Payload;
|
||||
|
||||
impl Payload {
|
||||
/// For raw payload data (read from decompressed enveloped data), which is raw batch bytes
|
||||
/// with metadata, this function segments the byte stream into chunk segments.
|
||||
///
|
||||
/// This method is used INSIDE OF zkvm since we can not generate (compress) batch data within
|
||||
/// the vm program
|
||||
///
|
||||
/// The structure of batch bytes is as follows:
|
||||
///
|
||||
/// | Byte Index | Size | Hint |
|
||||
/// |--------------------------------------------------------------|-------------------------------|-------------------------------------|
|
||||
/// | 0 | N_BYTES_NUM_CHUNKS | Number of chunks |
|
||||
/// | N_BYTES_NUM_CHUNKS | N_BYTES_CHUNK_SIZE | Size of chunks[0] |
|
||||
/// | N_BYTES_NUM_CHUNKS + N_BYTES_CHUNK_SIZE | N_BYTES_CHUNK_SIZE | Size of chunks[1] |
|
||||
/// | N_BYTES_NUM_CHUNKS + (i * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[i] |
|
||||
/// | N_BYTES_NUM_CHUNKS + ((N_MAX_CHUNKS-1) * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[N_MAX_CHUNKS-1] |
|
||||
/// | N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE) | Size of chunks[0] | L2 tx bytes of chunks[0] |
|
||||
/// | "" + Size_of_chunks[0] | Size of chunks[1] | L2 tx bytes of chunks[1] |
|
||||
/// | "" + Size_of_chunks[i-1] | Size of chunks[i] | L2 tx bytes of chunks[i] |
|
||||
/// | "" + Size_of_chunks[Num_chunks-1] | Size of chunks[Num_chunks-1] | L2 tx bytes of chunks[Num_chunks-1] |
|
||||
pub fn from_payload(batch_bytes_with_metadata: &[u8]) -> Self {
|
||||
// Get the metadata bytes and metadata digest.
|
||||
let n_bytes_metadata = Self::n_bytes_metadata();
|
||||
let metadata_bytes = &batch_bytes_with_metadata[..n_bytes_metadata];
|
||||
let metadata_digest = keccak256(metadata_bytes);
|
||||
|
||||
// The remaining bytes represent the chunk data (L2 tx bytes) segmented as chunks.
|
||||
let batch_bytes = &batch_bytes_with_metadata[n_bytes_metadata..];
|
||||
|
||||
// The number of chunks in the batch.
|
||||
let valid_chunks = metadata_bytes[..N_BYTES_NUM_CHUNKS]
|
||||
.iter()
|
||||
.fold(0usize, |acc, &d| acc * 256usize + d as usize);
|
||||
|
||||
// The size of each chunk in the batch.
|
||||
let chunk_sizes = metadata_bytes[N_BYTES_NUM_CHUNKS..]
|
||||
.iter()
|
||||
.chunks(N_BYTES_CHUNK_SIZE)
|
||||
.into_iter()
|
||||
.map(|bytes| bytes.fold(0usize, |acc, &d| acc * 256usize + d as usize))
|
||||
.collect::<Vec<usize>>();
|
||||
|
||||
// For every unused chunk, the chunk size should be set to 0.
|
||||
for &unused_chunk_size in chunk_sizes.iter().skip(valid_chunks) {
|
||||
assert_eq!(unused_chunk_size, 0, "unused chunk has size 0");
|
||||
}
|
||||
|
||||
// Segment the batch bytes based on the chunk sizes.
|
||||
let (segmented_batch_data, remaining_bytes) =
|
||||
chunk_sizes.into_iter().take(valid_chunks).fold(
|
||||
(Vec::new(), batch_bytes),
|
||||
|(mut datas, rest_bytes), size| {
|
||||
datas.push(Vec::from(&rest_bytes[..size]));
|
||||
(datas, &rest_bytes[size..])
|
||||
},
|
||||
);
|
||||
|
||||
// After segmenting the batch data into chunks, no bytes should be left.
|
||||
assert!(
|
||||
remaining_bytes.is_empty(),
|
||||
"chunk segmentation len must add up to the correct value"
|
||||
);
|
||||
|
||||
// Compute the chunk data digests based on the segmented data.
|
||||
let chunk_data_digests = segmented_batch_data
|
||||
.iter()
|
||||
.map(|bytes| B256::from(keccak256(bytes)))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
metadata_digest,
|
||||
chunk_data_digests,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the challenge digest from blob bytes. which is the combination of
|
||||
/// digest for bytes in each chunk
|
||||
pub fn get_challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
keccak256(self.get_challenge_digest_preimage(versioned_hash))
|
||||
}
|
||||
|
||||
/// The number of bytes in payload Data to represent the "payload metadata" section: a u16 to
|
||||
/// represent the size of chunks and max_chunks * u32 to represent chunk sizes
|
||||
const fn n_bytes_metadata() -> usize {
|
||||
N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE)
|
||||
}
|
||||
|
||||
/// Validate the payload contents.
|
||||
pub fn validate<'a>(
|
||||
&self,
|
||||
header: &BatchHeaderV6,
|
||||
chunk_infos: &'a [ChunkInfo],
|
||||
) -> (&'a ChunkInfo, &'a ChunkInfo) {
|
||||
// There should be at least 1 chunk info.
|
||||
assert!(!chunk_infos.is_empty(), "at least 1 chunk info");
|
||||
|
||||
// Get the first and last chunks' info, to construct the batch info.
|
||||
let (first_chunk, last_chunk) = (
|
||||
chunk_infos.first().expect("at least one chunk in batch"),
|
||||
chunk_infos.last().expect("at least one chunk in batch"),
|
||||
);
|
||||
|
||||
for (&chunk_data_digest, chunk_info) in self.chunk_data_digests.iter().zip_eq(chunk_infos) {
|
||||
assert_eq!(chunk_data_digest, chunk_info.tx_data_digest)
|
||||
}
|
||||
|
||||
// Validate the l1-msg identifier data_hash for the batch.
|
||||
let batch_data_hash_preimage = chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| chunk_info.data_hash.0)
|
||||
.collect::<Vec<_>>();
|
||||
let batch_data_hash = keccak256(batch_data_hash_preimage);
|
||||
assert_eq!(batch_data_hash, header.data_hash);
|
||||
|
||||
(first_chunk, last_chunk)
|
||||
}
|
||||
|
||||
/// Get the preimage for the challenge digest.
|
||||
pub(crate) fn get_challenge_digest_preimage(&self, versioned_hash: B256) -> Vec<u8> {
|
||||
// preimage =
|
||||
// metadata_digest ||
|
||||
// chunk[0].chunk_data_digest || ...
|
||||
// chunk[N_SNARKS-1].chunk_data_digest ||
|
||||
// blob_versioned_hash
|
||||
//
|
||||
// where chunk_data_digest for a padded chunk is set equal to the "last valid chunk"'s
|
||||
// chunk_data_digest.
|
||||
let mut preimage = self.metadata_digest.to_vec();
|
||||
let last_digest = self
|
||||
.chunk_data_digests
|
||||
.last()
|
||||
.expect("at least we have one");
|
||||
for chunk_digest in self
|
||||
.chunk_data_digests
|
||||
.iter()
|
||||
.chain(std::iter::repeat(last_digest))
|
||||
.take(N_MAX_CHUNKS)
|
||||
{
|
||||
preimage.extend_from_slice(chunk_digest.as_slice());
|
||||
}
|
||||
preimage.extend_from_slice(versioned_hash.as_slice());
|
||||
preimage
|
||||
}
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::BatchHeaderV7;
|
||||
use types_base::{
|
||||
public_inputs::chunk::{BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
use super::N_BLOB_BYTES;
|
||||
|
||||
/// da-codec@v7
|
||||
const DA_CODEC_VERSION: u8 = 7;
|
||||
|
||||
/// Represents the data contained within an EIP-4844 blob that is published on-chain.
|
||||
///
|
||||
/// The bytes following some metadata represent zstd-encoded [`PayloadV7`] if the envelope is
|
||||
/// indicated as `is_encoded == true`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvelopeV7 {
|
||||
/// The original envelope bytes supplied.
|
||||
///
|
||||
/// Caching just for re-use later in challenge digest computation.
|
||||
pub envelope_bytes: Vec<u8>,
|
||||
/// The version from da-codec, i.e. v7 in this case.
|
||||
pub version: u8,
|
||||
/// A single byte boolean flag (value is 0 or 1) to denote whether or not the following blob
|
||||
/// bytes represent a batch in its zstd-encoded or raw form.
|
||||
pub is_encoded: u8,
|
||||
/// The unpadded bytes that possibly encode the [`PayloadV7`].
|
||||
pub unpadded_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<&[u8]> for EnvelopeV7 {
|
||||
fn from(blob_bytes: &[u8]) -> Self {
|
||||
// The number of bytes is as expected.
|
||||
assert_eq!(blob_bytes.len(), N_BLOB_BYTES);
|
||||
|
||||
// The version of the blob encoding was as expected, i.e. da-codec@v7.
|
||||
let version = blob_bytes[0];
|
||||
assert_eq!(version, DA_CODEC_VERSION);
|
||||
|
||||
// Calculate the unpadded size of the encoded payload.
|
||||
//
|
||||
// It should be at most the maximum number of bytes allowed.
|
||||
let unpadded_size = (blob_bytes[1] as usize) * 256 * 256
|
||||
+ (blob_bytes[2] as usize) * 256
|
||||
+ blob_bytes[3] as usize;
|
||||
assert!(unpadded_size <= N_BLOB_BYTES - 5);
|
||||
|
||||
// Whether the envelope represents encoded payload or raw payload.
|
||||
//
|
||||
// Is a boolean.
|
||||
let is_encoded = blob_bytes[4];
|
||||
assert!(is_encoded <= 1);
|
||||
|
||||
// The padded bytes are all 0s.
|
||||
for &padded_byte in blob_bytes.iter().skip(5 + unpadded_size) {
|
||||
assert_eq!(padded_byte, 0);
|
||||
}
|
||||
|
||||
Self {
|
||||
version,
|
||||
is_encoded,
|
||||
unpadded_bytes: blob_bytes[5..(5 + unpadded_size)].to_vec(),
|
||||
envelope_bytes: blob_bytes.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvelopeV7 {
|
||||
/// The verification of the EIP-4844 blob is done via point-evaluation precompile
|
||||
/// implemented in-circuit.
|
||||
///
|
||||
/// We require a random challenge point for this, and using Fiat-Shamir we compute it with
|
||||
/// every byte in the blob along with the blob's versioned hash, i.e. an identifier for its KZG
|
||||
/// commitment.
|
||||
///
|
||||
/// keccak256(
|
||||
/// keccak256(envelope) ||
|
||||
/// versioned hash
|
||||
/// )
|
||||
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(keccak256(&self.envelope_bytes))
|
||||
.chain(versioned_hash.0)
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the batch data, eventually encoded into an [`EnvelopeV7`].
|
||||
///
|
||||
/// | Field | # Bytes | Type | Index |
|
||||
/// |------------------------|---------|----------------|---------------|
|
||||
/// | prevL1MessageQueueHash | 32 | bytes32 | 0 |
|
||||
/// | postL1MessageQueueHash | 32 | bytes32 | 32 |
|
||||
/// | initialL2BlockNumber | 8 | u64 | 64 |
|
||||
/// | numBlocks | 2 | u16 | 72 |
|
||||
/// | blockCtxs[0] | 52 | BlockContextV2 | 74 |
|
||||
/// | ... blockCtxs[i] ... | 52 | BlockContextV2 | 74 + 52*i |
|
||||
/// | blockCtxs[n-1] | 52 | BlockContextV2 | 74 + 52*(n-1) |
|
||||
/// | l2TxsData | dynamic | bytes | 74 + 52*n |
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PayloadV7 {
|
||||
/// The version from da-codec, i.e. v7 in this case.
|
||||
///
|
||||
/// Note: This is not really a part of payload, simply coopied from the envelope for
|
||||
/// convenience.
|
||||
pub version: u8,
|
||||
/// Message queue hash at the end of the previous batch.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// Message queue hash at the end of the current batch.
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// The block number of the first block in the batch.
|
||||
pub initial_block_number: u64,
|
||||
/// The number of blocks in the batch.
|
||||
pub num_blocks: u16,
|
||||
/// The block contexts of each block in the batch.
|
||||
pub block_contexts: Vec<BlockContextV2>,
|
||||
/// The L2 tx data flattened over every tx in every block in the batch.
|
||||
pub tx_data: Vec<u8>,
|
||||
}
|
||||
|
||||
const INDEX_PREV_MSG_QUEUE_HASH: usize = 0;
|
||||
const INDEX_POST_MSG_QUEUE_HASH: usize = INDEX_PREV_MSG_QUEUE_HASH + 32;
|
||||
const INDEX_L2_BLOCK_NUM: usize = INDEX_POST_MSG_QUEUE_HASH + 32;
|
||||
const INDEX_NUM_BLOCKS: usize = INDEX_L2_BLOCK_NUM + 8;
|
||||
const INDEX_BLOCK_CTX: usize = INDEX_NUM_BLOCKS + 2;
|
||||
|
||||
impl From<&EnvelopeV7> for PayloadV7 {
|
||||
fn from(envelope: &EnvelopeV7) -> Self {
|
||||
// Conditionally decode depending on the flag set in the envelope.
|
||||
let payload_bytes = if envelope.is_encoded & 1 == 1 {
|
||||
vm_zstd::process(&envelope.unpadded_bytes)
|
||||
.expect("zstd decode should succeed")
|
||||
.decoded_data
|
||||
} else {
|
||||
envelope.unpadded_bytes.to_vec()
|
||||
};
|
||||
|
||||
// Sanity check on the payload size.
|
||||
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX);
|
||||
let num_blocks = u16::from_be_bytes(
|
||||
payload_bytes[INDEX_NUM_BLOCKS..INDEX_BLOCK_CTX]
|
||||
.try_into()
|
||||
.expect("should not fail"),
|
||||
);
|
||||
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX));
|
||||
|
||||
// Deserialize the other fields.
|
||||
let prev_msg_queue_hash =
|
||||
B256::from_slice(&payload_bytes[INDEX_PREV_MSG_QUEUE_HASH..INDEX_POST_MSG_QUEUE_HASH]);
|
||||
let post_msg_queue_hash =
|
||||
B256::from_slice(&payload_bytes[INDEX_POST_MSG_QUEUE_HASH..INDEX_L2_BLOCK_NUM]);
|
||||
let initial_block_number = u64::from_be_bytes(
|
||||
payload_bytes[INDEX_L2_BLOCK_NUM..INDEX_NUM_BLOCKS]
|
||||
.try_into()
|
||||
.expect("should not fail"),
|
||||
);
|
||||
|
||||
// Deserialize block contexts depending on the number of blocks in the batch.
|
||||
let mut block_contexts = Vec::with_capacity(num_blocks as usize);
|
||||
for i in 0..num_blocks {
|
||||
let start = (i as usize) * SIZE_BLOCK_CTX + INDEX_BLOCK_CTX;
|
||||
block_contexts.push(BlockContextV2::from(
|
||||
&payload_bytes[start..(start + SIZE_BLOCK_CTX)],
|
||||
));
|
||||
}
|
||||
|
||||
// All remaining bytes are flattened L2 txs.
|
||||
let tx_data =
|
||||
payload_bytes[INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX)..].to_vec();
|
||||
|
||||
Self {
|
||||
version: envelope.version,
|
||||
prev_msg_queue_hash,
|
||||
post_msg_queue_hash,
|
||||
initial_block_number,
|
||||
num_blocks,
|
||||
block_contexts,
|
||||
tx_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadV7 {
|
||||
/// Validate the payload contents.
|
||||
pub fn validate<'a>(
|
||||
&self,
|
||||
header: &BatchHeaderV7,
|
||||
chunk_infos: &'a [ChunkInfo],
|
||||
) -> (&'a ChunkInfo, &'a ChunkInfo) {
|
||||
// Get the first and last chunks' info, to construct the batch info.
|
||||
let (first_chunk, last_chunk) = (
|
||||
chunk_infos.first().expect("at least one chunk in batch"),
|
||||
chunk_infos.last().expect("at least one chunk in batch"),
|
||||
);
|
||||
|
||||
// version from payload is what's present in the on-chain batch header
|
||||
assert_eq!(self.version, header.version);
|
||||
|
||||
// number of blocks in the batch
|
||||
assert_eq!(
|
||||
usize::from(self.num_blocks),
|
||||
chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| &chunk_info.block_ctxs)
|
||||
.count()
|
||||
);
|
||||
assert_eq!(usize::from(self.num_blocks), self.block_contexts.len());
|
||||
|
||||
// the block number of the first block in the batch
|
||||
assert_eq!(self.initial_block_number, first_chunk.initial_block_number);
|
||||
|
||||
// prev message queue hash
|
||||
assert_eq!(self.prev_msg_queue_hash, first_chunk.prev_msg_queue_hash);
|
||||
|
||||
// post message queue hash
|
||||
assert_eq!(self.post_msg_queue_hash, last_chunk.post_msg_queue_hash);
|
||||
|
||||
// for each chunk, the tx_data_digest, i.e. keccak digest of the rlp-encoded L2 tx bytes
|
||||
// flattened over every tx in the chunk, should be re-computed and matched against the
|
||||
// public input of the chunk-circuit.
|
||||
//
|
||||
// first check that the total size of rlp-encoded tx data flattened over all txs in the
|
||||
// chunk is in fact the size available from the payload.
|
||||
assert_eq!(
|
||||
u64::try_from(self.tx_data.len()).expect("len(tx-data) is u64"),
|
||||
chunk_infos
|
||||
.iter()
|
||||
.map(|chunk_info| chunk_info.tx_data_length)
|
||||
.sum::<u64>(),
|
||||
);
|
||||
let mut index: usize = 0;
|
||||
for chunk_info in chunk_infos.iter() {
|
||||
let chunk_size = chunk_info.tx_data_length as usize;
|
||||
let chunk_tx_data_digest =
|
||||
keccak256(&self.tx_data.as_slice()[index..(index + chunk_size)]);
|
||||
assert_eq!(chunk_tx_data_digest, chunk_info.tx_data_digest);
|
||||
index += chunk_size;
|
||||
}
|
||||
|
||||
// for each block in the batch, check that the block context matches what's provided as
|
||||
// witness.
|
||||
for (block_ctx, witness_block_ctx) in self.block_contexts.iter().zip(
|
||||
chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| &chunk_info.block_ctxs),
|
||||
) {
|
||||
assert_eq!(block_ctx, witness_block_ctx);
|
||||
}
|
||||
|
||||
(first_chunk, last_chunk)
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
use crate::header::ReferenceHeader;
|
||||
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
|
||||
use types_base::public_inputs::{ForkName, chunk::ChunkInfo};
|
||||
|
||||
/// Simply rewrap byte48 to avoid unnecessary dep
|
||||
pub type Bytes48 = [u8; 48];
|
||||
|
||||
/// Witness required by applying point evaluation
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct PointEvalWitness {
|
||||
/// kzg commitment
|
||||
#[rkyv()]
|
||||
pub kzg_commitment: Bytes48,
|
||||
/// kzg proof
|
||||
#[rkyv()]
|
||||
pub kzg_proof: Bytes48,
|
||||
}
|
||||
|
||||
/// Witness to the batch circuit.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchWitness {
|
||||
/// Flattened root proofs from all chunks in the batch.
|
||||
#[rkyv()]
|
||||
pub chunk_proofs: Vec<AggregationInput>,
|
||||
/// Chunk infos.
|
||||
#[rkyv()]
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
/// Blob bytes.
|
||||
#[rkyv()]
|
||||
pub blob_bytes: Vec<u8>,
|
||||
/// Witness for point evaluation
|
||||
pub point_eval_witness: PointEvalWitness,
|
||||
/// Header for reference.
|
||||
#[rkyv()]
|
||||
pub reference_header: ReferenceHeader,
|
||||
/// The code version specify the chain spec
|
||||
#[rkyv()]
|
||||
pub fork_name: ForkName,
|
||||
}
|
||||
|
||||
impl ProofCarryingWitness for ArchivedBatchWitness {
|
||||
fn get_proofs(&self) -> Vec<AggregationInput> {
|
||||
self.chunk_proofs
|
||||
.iter()
|
||||
.map(|archived| AggregationInput {
|
||||
public_values: archived
|
||||
.public_values
|
||||
.iter()
|
||||
.map(|u32_le| u32_le.to_native())
|
||||
.collect(),
|
||||
commitment: ProgramCommitment::from(&archived.commitment),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-bundle"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
vm-zstd = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -1,2 +0,0 @@
|
||||
mod witness;
|
||||
pub use witness::{ArchivedBundleWitness, BundleWitness};
|
||||
@@ -1,30 +0,0 @@
|
||||
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
|
||||
use types_base::public_inputs::batch::BatchInfo;
|
||||
|
||||
/// The witness for the bundle circuit.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BundleWitness {
|
||||
/// Batch proofs being aggregated in the bundle.
|
||||
#[rkyv()]
|
||||
pub batch_proofs: Vec<AggregationInput>,
|
||||
/// Public-input values for the corresponding batch proofs.
|
||||
#[rkyv()]
|
||||
pub batch_infos: Vec<BatchInfo>,
|
||||
}
|
||||
|
||||
impl ProofCarryingWitness for ArchivedBundleWitness {
|
||||
fn get_proofs(&self) -> Vec<AggregationInput> {
|
||||
self.batch_proofs
|
||||
.iter()
|
||||
.map(|archived| AggregationInput {
|
||||
public_values: archived
|
||||
.public_values
|
||||
.iter()
|
||||
.map(|u32_le| u32_le.to_native())
|
||||
.collect(),
|
||||
commitment: ProgramCommitment::from(&archived.commitment),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-chunk"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
sbv-trie = { workspace = true }
|
||||
sbv-core = { workspace = true }
|
||||
sbv-primitives = { workspace = true }
|
||||
sbv-kv = { workspace = true }
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
|
||||
openvm = { workspace = true, features = ["std"] }
|
||||
openvm-rv32im-guest = { workspace = true }
|
||||
openvm-custom-insn = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
openvm = ["sbv-trie/openvm", "sbv-core/openvm", "sbv-primitives/openvm"]
|
||||
@@ -1,167 +0,0 @@
|
||||
use sbv_core::{EvmDatabase, EvmExecutor};
|
||||
use sbv_primitives::{
|
||||
BlockWitness,
|
||||
chainspec::{
|
||||
BaseFeeParams, BaseFeeParamsKind, Chain, MAINNET,
|
||||
reth_chainspec::ChainSpec,
|
||||
scroll::{ScrollChainConfig, ScrollChainSpec},
|
||||
},
|
||||
ext::{BlockWitnessChunkExt, TxBytesHashExt},
|
||||
hardforks::SCROLL_DEV_HARDFORKS,
|
||||
types::{
|
||||
consensus::BlockHeader,
|
||||
reth::{Block, BlockWitnessRethExt, RecoveredBlock},
|
||||
scroll::ChunkInfoBuilder,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{ArchivedChunkWitness, make_providers, manually_drop_on_zkvm};
|
||||
use types_base::public_inputs::{
|
||||
ForkName,
|
||||
chunk::{BlockContextV2, ChunkInfo},
|
||||
};
|
||||
|
||||
fn block_ctxv2_from_block(value: &RecoveredBlock<Block>) -> BlockContextV2 {
|
||||
use alloy_primitives::U256;
|
||||
BlockContextV2 {
|
||||
timestamp: value.timestamp,
|
||||
gas_limit: value.gas_limit,
|
||||
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
|
||||
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
|
||||
num_l1_msgs: u16::try_from(
|
||||
value
|
||||
.body()
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| tx.is_l1_message())
|
||||
.count(),
|
||||
)
|
||||
.expect("num l1 msgs u16"),
|
||||
}
|
||||
}
|
||||
|
||||
type Witness = ArchivedChunkWitness;
|
||||
|
||||
pub fn execute(witness: &Witness) -> Result<ChunkInfo, String> {
|
||||
if witness.blocks.is_empty() {
|
||||
return Err("At least one witness must be provided in chunk mode".into());
|
||||
}
|
||||
if !witness.blocks.has_same_chain_id() {
|
||||
return Err("All witnesses must have the same chain id in chunk mode".into());
|
||||
}
|
||||
if !witness.blocks.has_seq_block_number() {
|
||||
return Err("All witnesses must have sequential block numbers in chunk mode".into());
|
||||
}
|
||||
// Get the blocks to build the basic chunk-info.
|
||||
let blocks = manually_drop_on_zkvm!(
|
||||
witness
|
||||
.blocks
|
||||
.iter()
|
||||
.map(|w| w.build_reth_block())
|
||||
.collect::<Result<Vec<RecoveredBlock<Block>>, _>>()
|
||||
.map_err(|e| e.to_string())?
|
||||
);
|
||||
let pre_state_root = witness.blocks[0].pre_state_root;
|
||||
|
||||
let fork_name = ForkName::from(&witness.fork_name);
|
||||
let chain = Chain::from_id(witness.blocks[0].chain_id());
|
||||
|
||||
// SCROLL_DEV_HARDFORKS will enable all forks
|
||||
let mut hardforks = (*SCROLL_DEV_HARDFORKS).clone();
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
// disable EuclidV2 fork for legacy chunk
|
||||
use sbv_primitives::{chainspec::ForkCondition, hardforks::ScrollHardfork};
|
||||
hardforks.insert(ScrollHardfork::EuclidV2, ForkCondition::Never);
|
||||
}
|
||||
|
||||
let inner = ChainSpec {
|
||||
chain,
|
||||
genesis_hash: Default::default(),
|
||||
genesis: Default::default(),
|
||||
genesis_header: Default::default(),
|
||||
paris_block_and_final_difficulty: Default::default(),
|
||||
hardforks,
|
||||
deposit_contract: Default::default(),
|
||||
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
|
||||
prune_delete_limit: 20000,
|
||||
blob_params: Default::default(),
|
||||
};
|
||||
let config = ScrollChainConfig::mainnet();
|
||||
let chain_spec: ScrollChainSpec = ScrollChainSpec { inner, config };
|
||||
|
||||
let (code_db, nodes_provider, block_hashes) = make_providers(&witness.blocks);
|
||||
let nodes_provider = manually_drop_on_zkvm!(nodes_provider);
|
||||
|
||||
let prev_state_root = witness.blocks[0].pre_state_root();
|
||||
let mut db = manually_drop_on_zkvm!(
|
||||
EvmDatabase::new_from_root(code_db, prev_state_root, &nodes_provider, block_hashes)
|
||||
.map_err(|e| format!("failed to create EvmDatabase: {}", e))?
|
||||
);
|
||||
for block in blocks.iter() {
|
||||
let output = manually_drop_on_zkvm!(
|
||||
EvmExecutor::new(std::sync::Arc::new(chain_spec.clone()), &db, block)
|
||||
.execute()
|
||||
.map_err(|e| format!("failed to execute block: {}", e))?
|
||||
);
|
||||
db.update(&nodes_provider, output.state.state.iter())
|
||||
.map_err(|e| format!("failed to update db: {}", e))?;
|
||||
}
|
||||
|
||||
let post_state_root = db.commit_changes();
|
||||
|
||||
let withdraw_root = db
|
||||
.withdraw_root()
|
||||
.map_err(|e| format!("failed to get withdraw root: {}", e))?;
|
||||
|
||||
let mut rlp_buffer = manually_drop_on_zkvm!(Vec::with_capacity(2048));
|
||||
let (tx_data_length, tx_data_digest) = blocks
|
||||
.iter()
|
||||
.flat_map(|b| b.body().transactions.iter())
|
||||
.tx_bytes_hash_in(rlp_buffer.as_mut());
|
||||
let _ = tx_data_length;
|
||||
|
||||
let sbv_chunk_info = {
|
||||
#[allow(unused_mut)]
|
||||
let mut builder = ChunkInfoBuilder::new(&chain_spec, pre_state_root.into(), &blocks);
|
||||
if fork_name == ForkName::EuclidV2 {
|
||||
builder.set_prev_msg_queue_hash(witness.prev_msg_queue_hash.into());
|
||||
}
|
||||
builder.build(withdraw_root)
|
||||
};
|
||||
if post_state_root != sbv_chunk_info.post_state_root() {
|
||||
return Err(format!(
|
||||
"state root mismatch: expected={}, found={}",
|
||||
sbv_chunk_info.post_state_root(),
|
||||
post_state_root
|
||||
));
|
||||
}
|
||||
|
||||
let chunk_info = ChunkInfo {
|
||||
chain_id: sbv_chunk_info.chain_id(),
|
||||
prev_state_root: sbv_chunk_info.prev_state_root(),
|
||||
post_state_root: sbv_chunk_info.post_state_root(),
|
||||
data_hash: sbv_chunk_info
|
||||
.clone()
|
||||
.into_legacy()
|
||||
.map(|x| x.data_hash)
|
||||
.unwrap_or_default(),
|
||||
withdraw_root,
|
||||
tx_data_digest,
|
||||
tx_data_length: u64::try_from(tx_data_length).expect("tx_data_length: u64"),
|
||||
initial_block_number: blocks[0].header().number,
|
||||
prev_msg_queue_hash: witness.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: sbv_chunk_info
|
||||
.into_euclid_v2()
|
||||
.map(|x| x.post_msg_queue_hash)
|
||||
.unwrap_or_default(),
|
||||
block_ctxs: blocks.iter().map(block_ctxv2_from_block).collect(),
|
||||
};
|
||||
|
||||
openvm::io::println(format!("withdraw_root = {:?}", withdraw_root));
|
||||
openvm::io::println(format!("tx_bytes_hash = {:?}", tx_data_digest));
|
||||
|
||||
// We should never touch that lazy lock... Or else we introduce 40M useless cycles.
|
||||
assert!(std::sync::LazyLock::get(&MAINNET).is_none());
|
||||
|
||||
Ok(chunk_info)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
#![feature(lazy_get)]
|
||||
|
||||
mod utils;
|
||||
|
||||
mod witness;
|
||||
|
||||
pub use utils::make_providers;
|
||||
pub use witness::{ArchivedChunkWitness, ChunkWitness};
|
||||
|
||||
mod execute;
|
||||
pub use execute::execute;
|
||||
@@ -1,27 +0,0 @@
|
||||
use alloy_primitives::{B256, U256};
|
||||
use sbv_primitives::types::{
|
||||
consensus::BlockHeader,
|
||||
reth::{Block, RecoveredBlock},
|
||||
};
|
||||
|
||||
use types_base::public_inputs::chunk::BlockContextV2;
|
||||
|
||||
impl From<&RecoveredBlock<Block>> for BlockContextV2 {
|
||||
fn from(value: &RecoveredBlock<Block>) -> Self {
|
||||
Self {
|
||||
timestamp: value.timestamp,
|
||||
gas_limit: value.gas_limit,
|
||||
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
|
||||
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
|
||||
num_l1_msgs: u16::try_from(
|
||||
value
|
||||
.body()
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| tx.is_l1_message())
|
||||
.count(),
|
||||
)
|
||||
.expect("num l1 msgs u16"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
use sbv_kv::nohash::NoHashMap;
|
||||
use sbv_primitives::{B256, BlockWitness, Bytes, ext::BlockWitnessExt};
|
||||
use sbv_trie::{BlockWitnessTrieExt, TrieNode};
|
||||
|
||||
type CodeDb = NoHashMap<B256, Bytes>;
|
||||
|
||||
type NodesProvider = NoHashMap<B256, TrieNode>;
|
||||
|
||||
type BlockHashProvider = sbv_kv::null::NullProvider;
|
||||
|
||||
pub fn make_providers<W: BlockWitness>(
|
||||
witnesses: &[W],
|
||||
) -> (CodeDb, NodesProvider, BlockHashProvider) {
|
||||
let code_db = {
|
||||
// build code db
|
||||
let num_codes = witnesses.iter().map(|w| w.codes_iter().len()).sum();
|
||||
let mut code_db =
|
||||
NoHashMap::<B256, Bytes>::with_capacity_and_hasher(num_codes, Default::default());
|
||||
witnesses.import_codes(&mut code_db);
|
||||
code_db
|
||||
};
|
||||
let nodes_provider = {
|
||||
let num_states = witnesses.iter().map(|w| w.states_iter().len()).sum();
|
||||
let mut nodes_provider =
|
||||
NoHashMap::<B256, TrieNode>::with_capacity_and_hasher(num_states, Default::default());
|
||||
witnesses.import_nodes(&mut nodes_provider).unwrap();
|
||||
nodes_provider
|
||||
};
|
||||
let block_hashes = sbv_kv::null::NullProvider;
|
||||
|
||||
(code_db, nodes_provider, block_hashes)
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
macro_rules! manually_drop_on_zkvm {
|
||||
($e:expr) => {
|
||||
std::mem::ManuallyDrop::new($e)
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
|
||||
macro_rules! manually_drop_on_zkvm {
|
||||
($e:expr) => {
|
||||
$e
|
||||
};
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
use sbv_primitives::types::BlockWitness;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use types_base::public_inputs::ForkName;
|
||||
|
||||
/// The witness type accepted by the chunk-circuit.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ChunkWitness {
|
||||
/// The block witness for each block in the chunk.
|
||||
pub blocks: Vec<BlockWitness>,
|
||||
/// The on-chain rolling L1 message queue hash before enqueueing any L1 msg tx from the chunk.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The code version specify the chain spec
|
||||
pub fork_name: ForkName,
|
||||
}
|
||||
|
||||
impl ChunkWitness {
|
||||
pub fn new(blocks: &[BlockWitness], prev_msg_queue_hash: B256, fork_name: ForkName) -> Self {
|
||||
let num_codes = blocks.iter().map(|w| w.codes.len()).sum();
|
||||
let num_states = blocks.iter().map(|w| w.states.len()).sum();
|
||||
let mut codes = HashSet::with_capacity(num_codes);
|
||||
let mut states = HashSet::with_capacity(num_states);
|
||||
|
||||
let blocks = blocks
|
||||
.iter()
|
||||
.map(|block| BlockWitness {
|
||||
chain_id: block.chain_id,
|
||||
header: block.header.clone(),
|
||||
pre_state_root: block.pre_state_root,
|
||||
transaction: block.transaction.clone(),
|
||||
withdrawals: block.withdrawals.clone(),
|
||||
states: block
|
||||
.states
|
||||
.iter()
|
||||
.filter(|s| states.insert(*s))
|
||||
.cloned()
|
||||
.collect(),
|
||||
codes: block
|
||||
.codes
|
||||
.iter()
|
||||
.filter(|c| codes.insert(*c))
|
||||
.cloned()
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
blocks,
|
||||
prev_msg_queue_hash,
|
||||
fork_name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_v1(blocks: &[BlockWitness]) -> Self {
|
||||
Self::new(blocks, Default::default(), ForkName::EuclidV1)
|
||||
}
|
||||
|
||||
pub fn new_v2(blocks: &[BlockWitness], prev_msg_queue_hash: B256) -> Self {
|
||||
Self::new(blocks, prev_msg_queue_hash, ForkName::EuclidV2)
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// re-export for a compatible interface with old circuit/types for prover
|
||||
|
||||
pub mod bundle {
|
||||
pub use types_base::public_inputs::bundle::{BundleInfo, BundleInfoV1, BundleInfoV2};
|
||||
pub use types_bundle::*;
|
||||
}
|
||||
|
||||
pub mod batch {
|
||||
pub use types_base::public_inputs::batch::{ArchivedBatchInfo, BatchInfo, VersionedBatchInfo};
|
||||
pub use types_batch::*;
|
||||
}
|
||||
|
||||
pub mod chunk {
|
||||
pub use types_base::public_inputs::chunk::{
|
||||
ArchivedChunkInfo, BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX, VersionedChunkInfo,
|
||||
};
|
||||
pub use types_chunk::*;
|
||||
}
|
||||
|
||||
pub use types_agg;
|
||||
pub use types_base::{public_inputs, utils};
|
||||
@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobUploadStatus represents the status of a blob upload
|
||||
type BlobUploadStatus int
|
||||
|
||||
const (
|
||||
// BlobUploadStatusUndefined indicates an undefined status
|
||||
BlobUploadStatusUndefined BlobUploadStatus = iota
|
||||
// BlobUploadStatusPending indicates a pending upload status
|
||||
BlobUploadStatusPending
|
||||
// BlobUploadStatusUploaded indicates a successful upload status
|
||||
BlobUploadStatusUploaded
|
||||
// BlobUploadStatusFailed indicates a failed upload status
|
||||
BlobUploadStatusFailed
|
||||
)
|
||||
|
||||
func (s BlobUploadStatus) String() string {
|
||||
switch s {
|
||||
case BlobUploadStatusPending:
|
||||
return "BlobUploadStatusPending"
|
||||
case BlobUploadStatusUploaded:
|
||||
return "BlobUploadStatusUploaded"
|
||||
case BlobUploadStatusFailed:
|
||||
return "BlobUploadStatusFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobStoragePlatform represents the platform a blob upload to
|
||||
type BlobStoragePlatform int
|
||||
|
||||
const (
|
||||
// BlobStoragePlatformUndefined indicates an undefined platform
|
||||
BlobStoragePlatformUndefined BlobStoragePlatform = iota
|
||||
// BlobStoragePlatformS3 represents AWS S3
|
||||
BlobStoragePlatformS3
|
||||
// BlobStoragePlatformArweave represents storage blockchain Arweave
|
||||
BlobStoragePlatformArweave
|
||||
)
|
||||
|
||||
func (s BlobStoragePlatform) String() string {
|
||||
switch s {
|
||||
case BlobStoragePlatformS3:
|
||||
return "BlobStoragePlatformS3"
|
||||
case BlobStoragePlatformArweave:
|
||||
return "BlobStoragePlatformArweave"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
{"metadata":{"bundle_info":{"chain_id":333333,"msg_queue_hash":"0x0101010101010101010101010101010101010101010101010101010101010101","num_batches":2,"prev_state_root":"0x5302a56cbbec7d14d48d592b805d4ec3c7011439dfaa90d44deee02a9326d203","prev_batch_hash":"0xabacadaeaf000000000000000000000000000000000000000000000000000000","post_state_root":"0xaf6696afb2e11052490051f0f9f6444be6e9f5bb82beb3c3dae846cfa59ed6e0","batch_hash":"0xf0ee5d6b9cd739eb1ff816a58486af8b08d42a8c50d6e5998e7a3947c7aae2a9","withdraw_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"bundle_pi_hash":"0x2028510c403837c6ed77660fd92814ba61d7b746e7268cc8dfc14d163d45e6bd"},"proof":{"proof":"CfpNiL6UpegsK3VcoAj9ey5daMbZDFiF1XpCKvrOeN0MPPLNCDrllJL/gN0E3qmq20kGLYpBQ8aZ3sgUrxpSyA+9GKK8NhZoIM75adOnV8AYCLXpmxfS81MxIai/+ghxDIUvJQJVgWKJPsMQp4lO/Qltc4eCNWeoR2jHua/VzSASQXDDQ5ozD6i448TVkjKiyEcHwFFMMuOebFUzDc85hA4AJGM1T9bPl5VVQkEpijdNF+1lzUfi27U0XRQbYz8aE8hiCLxR8Z2bHg65dvfa+TsaDI8oAlz33Q1yIadZBtceKsH53P5u6vwWp0dQvw8DGNv8G5zvsayHPNCvy4xz8hRT3E4G0Ome8voqqOxrc/A8u2fE6LoXKswvU6Uquv+LHwGMbTugRvQ0BBXlLQ29Hvj18rDzS6ll0OnEcRiaaEkGOZy7Kq1PGiF7ZxMZsJYCbhyPgg4TKpesYDUJygEN0iGNX90dmyzGLTTgJATMYBGD2U+XP/T+UOMbxFTl3TFNHWlCPhEAu5LBwZ0pD3XV1xNW1iUqwTSfg7Qz1SOUYkot10Q8EAKeXk3hluHK+nSQhOMfWC4tnvfQdMqepfymwwArzA/9GMA/Two7yuzgCz7vHb+56YKPZiDrh4cqSvpVI92hCF8GWHaTqWDR0fikx2Y7GLX8YBM3Rx8reQE+LYYGEJHJzD4cIc0MKiuet605ZPSAaKpb8JM2EgrCAfw+QAhBiwXQ3HOQkrt17tzqNJH7IeHF761v43D9w+IeqvetKEgYXEH3fHmN00dLV2Uws8C4956qze+SG81ScnZzbrIeiO9lnmUXSFzrL40K+3NqCZcFnfLhVidyEJepzJi50yOK5BUJdMFdNtvHtprICqLKyb7aRg39qoZ7RqyJTg5nAjQQBGelvRu/AN6zdyxja73Jo5gEovdIiMybi/IhfMwKGWgiRaOGxyHx9KZ/ZA/w7r3rce6vuDsUhk5hsgVj4wUW3BqoZ8iRIH8X6AjK1xli+S/HfgAkfmUVwNNBOcgYEcrqEbswsfYKOcoFn71DISLK0jmB44LTNyGxoWBMpIAOf/gGhQSNk0ojd4n4UXxShsqmJ57Kudw/mGimMm+Crhr5asxeiFH0eJNBgUEXDuveqE1d20UTRJ1UJ/hZGomsDLebTojSTtsMLWTtx/4Mqg+g3Odte1WKN6CgxF4kGRcW2tE3D1jiBys5FTHMAhmka3mUBwlciT7syDWBDlYVuSmwppCghdBMQfQL4s3Uh0vRG28LkU+UXcwYXwh3UK6cA1bBnKfAa9k7P5BuMxVh8p6he6EZr0kGNjKGPSxuVxgczO/C32GP+HVVsWlIMNmgB4GeMHIN3yJampOrLZIMlQuP9d9kOicvRia1ge5sFtT+Vmthnp1F7sR3P+ADB/WxKSxVbiLaVBo+zm/rZbyM9vU0CVLD69lzPC6xKcFkxewlWJU6o7rOz1qzh47fT+8qUcVYfpCSEtT/U8eX2JFnXCb0PPXWivofI28tnsuS8GjwUiOyzCoxxuIEOyz1HNRXBcO2dSKR2qM41zUs0btA2JkA3hTVW8YWn8czHxrZyooooaumzbUPQBOqO3fewnLLyQ9etBcjZJ8Xm/B1EBk9cRPWDjgx5Hq8C0soA+EsoNoaSQJu67HuFTRd/OWvKSliCoj1XVcqBobnJWmTU7kAgi73pMaq/G4ot2rRFSL9MbkJgHCyxBkrl9nkCVUJC5GphsrDS5P5/bmRS3iTNdxiXAzdwOIQqJpEO54oN+3CHZuZuUOgCcWTI3uxWq/gBDJrBTsv8EUqtNQJve0qwIh2PUuJl5DIqF0CvswN649gywc=","instances":"AAAAAAAAAAAAAAAAAAAAAAAAAAAApvhdIlw19IwSvukAAAAAAAAAAAAAAAAAAAAAAAAAAAAl72fyrHk3TaguHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAALh9HvEG69AvDlAAAAAAAAAAAAAAAAAAAAAAAAAAAAkGY9R6S+t36FIrAAAAAAAAAAAAAAAAAAAAAAAAAAAACoNqt7QwZoXUpj/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdaREhypq22OmnAAAAAAAAAAAAAAAAAAAAAAAAAAAAOXf2Vj0jGD1q4xQAAAAAAAAAAAAAAAAAAAAAAAAAAADZYAdKTg7m4hBHGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAll4nXKE0us1IMAAAAAAAAAAAAAAAAAAAAAAAAAAAAFfnJ8YXlwczTsyEAAAAAAAAAAAAAAAAAAAAAAAAAAAArXqULkWYvNST9PQAAAAAAAAAAAAAAAAAAAAAAAAAAAAArqteSdJMySnbMAC5TUWus+SXtvRWUNmCSMiMb4aZvb4hpJ5yXqjtih6gAIn9WQUOx/Z/rbbdComU0hCSwKwrewQgB3KolXKensAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADfAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL0="},"vk":"AhYAAAAABAAAAD2PumVP6pqldS0PKWW8Q4IvnE/rvtm5/2fXvG196sYhKtVFtg+WFGYJrU+eMUKZVjPurMpM8kbYiXvE18bnsU4Nu8s47Xabxy0EViND1dzsu5HicdAWl0xG5C+VpO2faJdK4nGwtD4WHtbdqWY72nSY5aKSDxAYO85vLy+9cJZlQsMNQlhTi/2q9PYQpC4D3Uf8E+yZ7gvLhd6cFdErlg4Oq/nthQkfxPAarVYLUFNGW80SgIloMDhutrky34D+Csw8T9j5UXpHz3K/2yuVSXK6OvMG4/058TXG09qKgXYP","git_version":"9f48bc4"}
|
||||
File diff suppressed because one or more lines are too long
@@ -10,13 +10,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
const (
|
||||
EuclidFork = "euclid"
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
// ProofType represents the type of task.
|
||||
type ProofType uint8
|
||||
|
||||
@@ -46,10 +39,12 @@ const (
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// it is a hex encoded big with fixed length on 48 bytes
|
||||
@@ -97,40 +92,59 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof *Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment *Byte48 `json:"kzg_commitment,omitempty"`
|
||||
// ChallengeDigest should be a common.Hash type if it is not nil
|
||||
ChallengeDigest interface{} `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
type RawBytes []byte
|
||||
|
||||
func (r RawBytes) MarshalJSON() ([]byte, error) {
|
||||
if r == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
// Marshal the []byte as a JSON array of numbers
|
||||
rn := make([]uint16, len(r))
|
||||
for i := range r {
|
||||
rn[i] = uint16(r[i])
|
||||
}
|
||||
return json.Marshal(rn)
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
type ChunkInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
// TxBytes []byte `json:"tx_bytes"`
|
||||
TxBytesHash common.Hash `json:"tx_data_digest"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
TxDataLength uint64 `json:"tx_data_length"`
|
||||
InitialBlockNumber uint64 `json:"initial_block_number"`
|
||||
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
|
||||
PrevBlockhash common.Hash `json:"prev_blockhash"`
|
||||
PostBlockhash common.Hash `json:"post_blockhash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// BlockContextV2 is the block context for euclid v2
|
||||
@@ -142,10 +156,18 @@ type BlockContextV2 struct {
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Metric data carried with OpenVMProof
|
||||
type OpenVMProofStat struct {
|
||||
TotalCycle uint64 `json:"total_cycles"`
|
||||
ExecutionTimeMills uint64 `json:"execution_time_mills"`
|
||||
ProvingTimeMills uint64 `json:"proving_time_mills"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Stat *OpenVMProofStat `json:"stat,omitempty"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
@@ -157,7 +179,8 @@ type OpenVMEvmProof struct {
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
TotalGasUsed uint64 `json:"chunk_total_gas"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
@@ -184,6 +207,7 @@ type OpenVMBatchInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -244,6 +268,7 @@ type OpenVMBundleInfo struct {
|
||||
PrevBatchHash common.Hash `json:"prev_batch_hash"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
MsgQueueHash common.Hash `json:"msg_queue_hash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
|
||||
23
common/utils/blob.go
Normal file
23
common/utils/blob.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
|
||||
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
|
||||
// calculate kzg4844 commitment from blob
|
||||
commit, err := kzg4844.BlobToCommitment(&blob)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
|
||||
}
|
||||
|
||||
// calculate kzg4844 versioned blob hash from blob commitment
|
||||
hasher := sha256.New()
|
||||
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
|
||||
|
||||
return vh, nil
|
||||
}
|
||||
51
common/utils/blob_test.go
Normal file
51
common/utils/blob_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
type BlobData struct {
|
||||
VersionedBlobHash string `json:"versionedBlobHash"`
|
||||
BlobData string `json:"blobData"`
|
||||
}
|
||||
|
||||
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
|
||||
func TestCalculateVersionedBlobHash(t *testing.T) {
|
||||
// Read the test data
|
||||
data, err := os.ReadFile("../testdata/blobdata.json")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
var blobData BlobData
|
||||
if err := json.Unmarshal(data, &blobData); err != nil {
|
||||
t.Fatalf("Failed to parse blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
blobBytes, err := hex.DecodeString(blobData.BlobData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode blob data: %v", err)
|
||||
}
|
||||
|
||||
// Convert []byte to kzg4844.Blob
|
||||
var blob kzg4844.Blob
|
||||
copy(blob[:], blobBytes)
|
||||
|
||||
// Calculate the hash
|
||||
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
|
||||
}
|
||||
|
||||
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
|
||||
|
||||
if calculatedHash != blobData.VersionedBlobHash {
|
||||
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.10"
|
||||
var tag = "v4.7.5"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -23,7 +23,7 @@ var commit = func() string {
|
||||
return "000000"
|
||||
}()
|
||||
|
||||
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
|
||||
// ZkVersion is commit-id of cargo.lock/zkvm-prover and openvm, contacted by a "-"
|
||||
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
|
||||
var ZkVersion = "000000-000000"
|
||||
|
||||
|
||||
2
coordinator/.gitignore
vendored
2
coordinator/.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
/build/bin
|
||||
.idea
|
||||
internal/logic/verifier/lib
|
||||
libzkp.so
|
||||
libzkp.dylib
|
||||
|
||||
@@ -1,27 +1,35 @@
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
|
||||
include ../build/common.mk
|
||||
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBZKP_PATH=./internal/logic/libzkp/lib/$(LIB_ZKP_NAME)
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
|
||||
ZK_VERSION=${ZKVM_VERSION}-${OPENVM_VERSION}
|
||||
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
$(LIBZKP_PATH):
|
||||
$(MAKE) -C ./internal/logic/libzkp build
|
||||
|
||||
coordinator_api: libzkp ## Builds the Coordinator api instance.
|
||||
clean_libzkp:
|
||||
$(MAKE) -C ./internal/logic/libzkp clean
|
||||
|
||||
libzkp: clean_libzkp $(LIBZKP_PATH)
|
||||
|
||||
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
$(call macos_codesign,$(PWD)/build/bin/coordinator_api)
|
||||
|
||||
coordinator_cron:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
@@ -29,8 +37,21 @@ coordinator_cron:
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
coordinator_api_skip_libzkp:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
@if [ -f "$(PWD)/conf/config.template.json" ]; then \
|
||||
SRC="$(PWD)/conf/config.template.json"; \
|
||||
else \
|
||||
SRC="$(CURDIR)/conf/config.json"; \
|
||||
fi; \
|
||||
cp -fL "$$SRC" "$(CURDIR)/build/bin/conf/config.template.json"
|
||||
@echo "Setting up releases..."
|
||||
cd $(CURDIR)/build && bash setup_releases.sh
|
||||
|
||||
|
||||
#coordinator_api_skip_libzkp:
|
||||
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
mock_coordinator_api: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
@@ -38,15 +59,15 @@ mock_coordinator_api: ## Builds the mocked Coordinator instance.
|
||||
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
|
||||
test-verifier: libzkp
|
||||
test-verifier: $(LIBZKP_PATH)
|
||||
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
|
||||
|
||||
test-gpu-verifier: libzkp
|
||||
test-gpu-verifier: $(LIBZKP_PATH)
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
cd ../ && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -10,6 +10,8 @@ See [monorepo prerequisites](../README.md#prerequisites).
|
||||
|
||||
## Build
|
||||
|
||||
Using Go version 1.22
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make coordinator_api
|
||||
|
||||
72
coordinator/build/setup_releases.sh
Normal file
72
coordinator/build/setup_releases.sh
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
echo "SCROLL_ZKVM_VERSION not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# default fork name from env or "galileo"
|
||||
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileo}"
|
||||
|
||||
# set ASSET_DIR by reading from config.json
|
||||
CONFIG_FILE="bin/conf/config.template.json"
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo "Config file $CONFIG_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get the number of verifiers in the array
|
||||
VERIFIER_COUNT=$(jq -r '.prover_manager.verifier.verifiers | length' "$CONFIG_FILE")
|
||||
|
||||
if [ "$VERIFIER_COUNT" = "null" ] || [ "$VERIFIER_COUNT" -eq 0 ]; then
|
||||
echo "No verifiers found in config file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found $VERIFIER_COUNT verifier(s) in config"
|
||||
|
||||
# iterate through each verifier entry
|
||||
for ((i=0; i<$VERIFIER_COUNT; i++)); do
|
||||
# extract assets_path for current verifier
|
||||
ASSETS_PATH=$(jq -r ".prover_manager.verifier.verifiers[$i].assets_path" "$CONFIG_FILE")
|
||||
FORK_NAME=$(jq -r ".prover_manager.verifier.verifiers[$i].fork_name" "$CONFIG_FILE")
|
||||
|
||||
# skip if this verifier's fork doesn't match the target fork
|
||||
if [ "$FORK_NAME" != "$SCROLL_FORK_NAME" ]; then
|
||||
echo "Expect $SCROLL_FORK_NAME, skip current fork ($FORK_NAME)"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$ASSETS_PATH" = "null" ]; then
|
||||
echo "Warning: Could not find assets_path for verifier $i, skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Processing verifier $i ($FORK_NAME): assets_path=$ASSETS_PATH"
|
||||
|
||||
# check if it's an absolute path (starts with /)
|
||||
if [[ "$ASSETS_PATH" = /* ]]; then
|
||||
# absolute path, use as is
|
||||
ASSET_DIR="$ASSETS_PATH"
|
||||
else
|
||||
# relative path, prefix with "bin/"
|
||||
ASSET_DIR="bin/$ASSETS_PATH"
|
||||
fi
|
||||
|
||||
echo "Using ASSET_DIR: $ASSET_DIR"
|
||||
|
||||
# create directory if it doesn't exist
|
||||
mkdir -p "$ASSET_DIR"
|
||||
|
||||
# assets for verifier-only mode
|
||||
echo "Downloading assets for $FORK_NAME to $ASSET_DIR..."
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
|
||||
|
||||
echo "Completed downloading assets for $FORK_NAME"
|
||||
echo "---"
|
||||
done
|
||||
|
||||
echo "All verifier assets downloaded successfully"
|
||||
@@ -90,10 +90,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
MinProverVersion: "v4.4.89",
|
||||
Verifiers: []coordinatorConfig.AssetConfig{
|
||||
{
|
||||
AssetsPath: "",
|
||||
ForkName: "galileo",
|
||||
},
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
var cfg *config.Config
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
@@ -29,16 +30,29 @@ func init() {
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
if err := utils.LogSetup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
var err error
|
||||
cfg, err = config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// sub commands
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "verify",
|
||||
Usage: "verify an proof, specified by [forkname] <type> <proof path>",
|
||||
Action: verify,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
|
||||
109
coordinator/cmd/tool/verify.go
Normal file
109
coordinator/cmd/tool/verify.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func verify(cCtx *cli.Context) error {
|
||||
var forkName, proofType, proofPath string
|
||||
if cCtx.Args().Len() <= 2 {
|
||||
forkName = cfg.ProverManager.Verifier.Verifiers[0].ForkName
|
||||
proofType = cCtx.Args().First()
|
||||
proofPath = cCtx.Args().Get(1)
|
||||
} else {
|
||||
forkName = cCtx.Args().First()
|
||||
proofType = cCtx.Args().Get(1)
|
||||
proofPath = cCtx.Args().Get(2)
|
||||
}
|
||||
log.Info("verify proof", "in", proofPath, "type", proofType, "forkName", forkName)
|
||||
|
||||
// Load the content of the proof file
|
||||
data, err := os.ReadFile(filepath.Clean(proofPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading file: %w", err)
|
||||
}
|
||||
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, cfg.L2.ValidiumMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ret bool
|
||||
switch strings.ToLower(proofType) {
|
||||
case "chunk":
|
||||
proof := &message.OpenVMChunkProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.ChunkVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
if len(proof.Vk) != 0 {
|
||||
if !bytes.Equal(proof.Vk, vk) {
|
||||
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
|
||||
base64.StdEncoding.EncodeToString(vk),
|
||||
base64.StdEncoding.EncodeToString(proof.Vk),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
proof.Vk = vk
|
||||
}
|
||||
|
||||
ret, err = vf.VerifyChunkProof(proof, forkName)
|
||||
case "batch":
|
||||
proof := &message.OpenVMBatchProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.BatchVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
if len(proof.Vk) != 0 {
|
||||
if !bytes.Equal(proof.Vk, vk) {
|
||||
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
|
||||
base64.StdEncoding.EncodeToString(vk),
|
||||
base64.StdEncoding.EncodeToString(proof.Vk),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
proof.Vk = vk
|
||||
}
|
||||
|
||||
ret, err = vf.VerifyBatchProof(proof, forkName)
|
||||
case "bundle":
|
||||
proof := &message.OpenVMBundleProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.BundleVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
proof.Vk = vk
|
||||
|
||||
ret, err = vf.VerifyBundleProof(proof, forkName)
|
||||
default:
|
||||
return fmt.Errorf("unsupport proof type %s", proofType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("verified:", "ret", ret)
|
||||
return nil
|
||||
}
|
||||
@@ -7,11 +7,18 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [
|
||||
{
|
||||
"features": "legacy_witness:openvm_13",
|
||||
"assets_path": "assets_feynman",
|
||||
"fork_name": "feynman"
|
||||
},
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileo"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
@@ -21,7 +28,10 @@
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
"chain_id": 111,
|
||||
"l2geth": {
|
||||
"endpoint": "not need to specified for mocking"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user