mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
190 Commits
refactor/z
...
tools/get-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
634cb73ff2 | ||
|
|
dced0c6a82 | ||
|
|
f045984c99 | ||
|
|
38af170acc | ||
|
|
c65622b7f6 | ||
|
|
80af42695d | ||
|
|
4c4cff0ca4 | ||
|
|
0df9ede841 | ||
|
|
d1138653e0 | ||
|
|
1572680566 | ||
|
|
8f4fc9af95 | ||
|
|
917b143557 | ||
|
|
c4849251c6 | ||
|
|
9bb768e454 | ||
|
|
9033471930 | ||
|
|
886af073c1 | ||
|
|
2b318ec7c7 | ||
|
|
4c2a75576f | ||
|
|
2a8330c346 | ||
|
|
d82e109360 | ||
|
|
42bfcb84d7 | ||
|
|
83c0a0870c | ||
|
|
78a458daa5 | ||
|
|
01d0e48e9a | ||
|
|
5cfb8b6a69 | ||
|
|
b59db732c3 | ||
|
|
899476731d | ||
|
|
1bec964097 | ||
|
|
b73acca200 | ||
|
|
77dceaea35 | ||
|
|
d0cb8b9aa5 | ||
|
|
ed057286d9 | ||
|
|
b3e46673f6 | ||
|
|
2fb27ceb3d | ||
|
|
e3332885ed | ||
|
|
3ee2d2b39c | ||
|
|
4b21c79443 | ||
|
|
c6f0299373 | ||
|
|
3454c6c670 | ||
|
|
901693a2c0 | ||
|
|
0bb53140f5 | ||
|
|
09790c4448 | ||
|
|
ae212a919a | ||
|
|
9b5c42e9d9 | ||
|
|
60877d3c16 | ||
|
|
07d1621310 | ||
|
|
11afeb1354 | ||
|
|
cf41048c0a | ||
|
|
77d63226c5 | ||
|
|
135073c0ad | ||
|
|
bab0e4f8d6 | ||
|
|
2d620ddf4f | ||
|
|
8befb84910 | ||
|
|
4822d38aba | ||
|
|
cb87c7aedd | ||
|
|
3a3db5fe32 | ||
|
|
b4546af434 | ||
|
|
459941d942 | ||
|
|
9f480e5397 | ||
|
|
7d4ff80edf | ||
|
|
5869bfd825 | ||
|
|
12a262ad99 | ||
|
|
7d5b77a36c | ||
|
|
5f8bb53dce | ||
|
|
87e1235c7f | ||
|
|
86e6555a54 | ||
|
|
e3b17a0740 | ||
|
|
ef9e25f14c | ||
|
|
0fc28cb511 | ||
|
|
ad2e94e190 | ||
|
|
2846ecffa5 | ||
|
|
0e82c63ac4 | ||
|
|
9996af6227 | ||
|
|
8cf087c63b | ||
|
|
b984341991 | ||
|
|
7486236a7a | ||
|
|
a6ed321666 | ||
|
|
8db4e5c77d | ||
|
|
5cf8cda8a7 | ||
|
|
bcc6b0f7e0 | ||
|
|
fe6451b76c | ||
|
|
be88ef6c39 | ||
|
|
64368f9a79 | ||
|
|
f288179451 | ||
|
|
b8c7ec2b22 | ||
|
|
88da49383c | ||
|
|
1ea9acafa3 | ||
|
|
c743efd99e | ||
|
|
2d40f0f942 | ||
|
|
fcbaa674c6 | ||
|
|
110083c6c8 | ||
|
|
b3c1df7557 | ||
|
|
893bf18d62 | ||
|
|
7ec6d478b3 | ||
|
|
eacdc78ba7 | ||
|
|
2cc9f65852 | ||
|
|
af381223f3 | ||
|
|
bb6ee2c932 | ||
|
|
e99a8515b9 | ||
|
|
38b3239c6b | ||
|
|
d987931e30 | ||
|
|
90d15637eb | ||
|
|
4d677b344b | ||
|
|
d57e6b0e7b | ||
|
|
9b462e4c98 | ||
|
|
c9f6e8c6e1 | ||
|
|
867307d576 | ||
|
|
20dffe4ea5 | ||
|
|
57d50b7183 | ||
|
|
7a70e374b8 | ||
|
|
0799dd48f2 | ||
|
|
224546e380 | ||
|
|
95adcc378f | ||
|
|
47219f2d86 | ||
|
|
ab7038c0a7 | ||
|
|
d79aaef35a | ||
|
|
da963313b6 | ||
|
|
f27ddb7f8e | ||
|
|
94bee1903a | ||
|
|
b7e7d1a1f1 | ||
|
|
f1ea4b315c | ||
|
|
8b08a57f63 | ||
|
|
a868bc1531 | ||
|
|
101cc46bd9 | ||
|
|
9f4c9ee150 | ||
|
|
03c63a62cf | ||
|
|
b30f4d0b00 | ||
|
|
4333d51bef | ||
|
|
82dd5e0e5e | ||
|
|
f91c999005 | ||
|
|
c8b614fd2f | ||
|
|
a1c4562432 | ||
|
|
d6674e8a3d | ||
|
|
55b32e1c0c | ||
|
|
8ea431514d | ||
|
|
26a49cb2a3 | ||
|
|
e27ab5a396 | ||
|
|
554a233928 | ||
|
|
673777fe63 | ||
|
|
7353f30ff6 | ||
|
|
eb5758b693 | ||
|
|
47a6c23b1f | ||
|
|
081d28988d | ||
|
|
782e019f9c | ||
|
|
89ede0d315 | ||
|
|
a55de1fc09 | ||
|
|
ed394a6369 | ||
|
|
121ce09c80 | ||
|
|
0125dd62a6 | ||
|
|
bb9d404e85 | ||
|
|
e1a0bab452 | ||
|
|
50ebf179fd | ||
|
|
01fa3b34a7 | ||
|
|
2e9827a750 | ||
|
|
867fda6952 | ||
|
|
fbc14ac91b | ||
|
|
37924b0ae7 | ||
|
|
8b57dd6381 | ||
|
|
f13863e542 | ||
|
|
d3acd6b510 | ||
|
|
83c73f8458 | ||
|
|
bf084368c5 | ||
|
|
d503d4a990 | ||
|
|
ac17696171 | ||
|
|
b424cef816 | ||
|
|
e5ad9c618d | ||
|
|
848d3a6827 | ||
|
|
2bd0655fda | ||
|
|
f01af24908 | ||
|
|
2de45f0d54 | ||
|
|
c3a3bad800 | ||
|
|
9412c7ff3a | ||
|
|
5f2295043e | ||
|
|
69a80d4a4a | ||
|
|
8db5339c1f | ||
|
|
99c0a9fac5 | ||
|
|
f4e17bcca6 | ||
|
|
e713424e5c | ||
|
|
2efbbd7d77 | ||
|
|
310abdd543 | ||
|
|
5a479c3a08 | ||
|
|
783b965deb | ||
|
|
182f8e307c | ||
|
|
b460d4a717 | ||
|
|
421afe9c30 | ||
|
|
ca8d930bd6 | ||
|
|
940fde0cbf | ||
|
|
78c99636dc | ||
|
|
0c0c417829 | ||
|
|
41606fe7d7 |
@@ -1,16 +0,0 @@
|
||||
.github
|
||||
|
||||
.gitignore
|
||||
|
||||
.dockerignore
|
||||
|
||||
Dockerfile
|
||||
Dockerfile.backup
|
||||
|
||||
.output
|
||||
|
||||
docs
|
||||
|
||||
openvm-clippy
|
||||
|
||||
target
|
||||
4
.github/workflows/common.yml
vendored
4
.github/workflows/common.yml
vendored
@@ -42,10 +42,6 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
|
||||
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@@ -307,13 +307,6 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Run custom script
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/init-openvm.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
|
||||
2
.github/workflows/intermediate-docker.yml
vendored
2
.github/workflows/intermediate-docker.yml
vendored
@@ -24,7 +24,6 @@ on:
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
@@ -48,7 +47,6 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- 0.1.41
|
||||
- 0.1.71
|
||||
BASE_IMAGE:
|
||||
description: "which intermediate image you want to update"
|
||||
required: true
|
||||
|
||||
99
.github/workflows/prover.yml
vendored
Normal file
99
.github/workflows/prover.yml
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
name: Prover
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
paths_ignore: '["**/README.md"]'
|
||||
|
||||
fmt:
|
||||
needs: [skip_check]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: rustfmt
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Cargo check
|
||||
run: cargo check --all-features
|
||||
- name: Cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
needs: [skip_check, fmt]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: clippy
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Run clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
compile:
|
||||
needs: [skip_check, clippy]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
|
||||
93
Cargo.toml
93
Cargo.toml
@@ -1,93 +0,0 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"common/types-rs",
|
||||
"common/types-rs/base",
|
||||
"common/types-rs/aggregation",
|
||||
"common/types-rs/chunk",
|
||||
"common/types-rs/batch",
|
||||
"common/types-rs/bundle",
|
||||
"common/libzkp/impl",
|
||||
"zkvm-prover/prover",
|
||||
"zkvm-prover/verifier",
|
||||
"zkvm-prover/integration",
|
||||
"zkvm-prover/bin",
|
||||
]
|
||||
exclude = [
|
||||
"prover"
|
||||
]
|
||||
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
authors = ["Scroll developers"]
|
||||
edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.8"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", package = "scroll-zkvm-prover"}
|
||||
|
||||
openvm = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-build = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-custom-insn = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-rv32im-guest = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-circuit = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-compiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-recursion = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-native-transpiler = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-continuations = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false }
|
||||
openvm-sdk = { git = "https://github.com/openvm-org/openvm.git", rev = "a0ae88f", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-stark-sdk = { git = "https://github.com/openvm-org/stark-backend.git", tag = "v1.0.1" }
|
||||
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-kv = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-trie = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
metrics-tracing-context = "0.16.0"
|
||||
|
||||
alloy = { version = "0.11", default-features = false }
|
||||
alloy-primitives = { version = "0.8", default-features = false }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "0.8", default-features = false }
|
||||
|
||||
rkyv = "0.8"
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_with = "3.11.0"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tracing = "0.1"
|
||||
eyre = "0.6"
|
||||
bincode_v1 = { version = "1.3", package = "bincode"}
|
||||
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
|
||||
"loader_halo2",
|
||||
"halo2-axiom",
|
||||
"display",
|
||||
] }
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
#TODO: upgrade
|
||||
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
|
||||
|
||||
scroll-zkvm-circuit-input-types = { path = "common/types-rs"}
|
||||
scroll-zkvm-verifier = { path = "zkvm-prover/verifier"}
|
||||
scroll-zkvm-prover = { path = "zkvm-prover/prover"}
|
||||
|
||||
[patch.crates-io]
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
@@ -1,6 +0,0 @@
|
||||
[patch."https://github.com/scroll-tech/scroll.git"]
|
||||
scroll-zkvm-circuit-input-types-base = { path = "../common/types-rs/base"}
|
||||
scroll-zkvm-circuit-input-types-aggregation = { path = "../common/types-rs/aggregation"}
|
||||
scroll-zkvm-circuit-input-types-chunk = { path = "../common/types-rs/chunk"}
|
||||
scroll-zkvm-circuit-input-types-batch = { path = "../common/types-rs/batch"}
|
||||
scroll-zkvm-circuit-input-types-bundle = { path = "../common/types-rs/bundle"}
|
||||
@@ -9,10 +9,6 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
@@ -1,2 +0,0 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
OPENVM_GPU_COMMIT=dfa10b4
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout openvm-gpu
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}
|
||||
246
common/libzkp/impl/Cargo.lock
generated
246
common/libzkp/impl/Cargo.lock
generated
@@ -2852,8 +2852,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -2865,8 +2865,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -2894,7 +2894,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "openvm-algebra-complex-macros"
|
||||
version = "0.1.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-macros-common",
|
||||
"quote",
|
||||
@@ -2903,8 +2903,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"halo2curves-axiom",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -2916,8 +2916,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-moduli-macros"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-macros-common",
|
||||
"quote",
|
||||
@@ -2926,8 +2926,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-algebra-guest",
|
||||
"openvm-instructions",
|
||||
@@ -2940,8 +2940,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-bigint-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -2962,8 +2962,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-bigint-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"num-bigint 0.4.6",
|
||||
"num-traits",
|
||||
@@ -2976,8 +2976,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-bigint-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-bigint-guest",
|
||||
"openvm-instructions",
|
||||
@@ -2991,8 +2991,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-build"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"cargo_metadata",
|
||||
"eyre",
|
||||
@@ -3003,8 +3003,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"cfg-if",
|
||||
@@ -3034,8 +3034,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"quote",
|
||||
@@ -3044,8 +3044,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit-primitives"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"itertools 0.14.0",
|
||||
@@ -3059,8 +3059,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit-primitives-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"quote",
|
||||
@@ -3069,8 +3069,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-continuations"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"openvm-circuit",
|
||||
@@ -3085,7 +3085,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "openvm-custom-insn"
|
||||
version = "0.1.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3094,8 +3094,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3125,8 +3125,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"ecdsa",
|
||||
"elliptic-curve",
|
||||
@@ -3150,8 +3150,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-sw-macros"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-macros-common",
|
||||
"quote",
|
||||
@@ -3160,8 +3160,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-ecc-guest",
|
||||
"openvm-instructions",
|
||||
@@ -3174,8 +3174,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-instructions"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"derive-new 0.6.0",
|
||||
@@ -3191,8 +3191,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-instructions-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn 2.0.98",
|
||||
@@ -3200,8 +3200,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-keccak256-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3226,8 +3226,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-keccak256-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-platform",
|
||||
"tiny-keccak",
|
||||
@@ -3235,8 +3235,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-keccak256-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3249,16 +3249,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-macros-common"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"syn 2.0.98",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openvm-mod-circuit-builder"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -3276,8 +3276,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3303,8 +3303,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-compiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"itertools 0.14.0",
|
||||
@@ -3327,8 +3327,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-compiler-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn 2.0.98",
|
||||
@@ -3336,8 +3336,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-recursion"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"itertools 0.14.0",
|
||||
@@ -3364,8 +3364,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-pairing-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3394,8 +3394,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-pairing-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"group 0.13.0",
|
||||
"halo2curves-axiom",
|
||||
@@ -3420,8 +3420,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-pairing-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3434,8 +3434,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-platform"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"libm",
|
||||
@@ -3445,8 +3445,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-poseidon2-air"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"lazy_static",
|
||||
@@ -3462,8 +3462,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32-adapters"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"itertools 0.14.0",
|
||||
@@ -3482,8 +3482,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32im-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3505,8 +3505,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32im-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-custom-insn",
|
||||
"strum_macros 0.26.4",
|
||||
@@ -3514,8 +3514,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32im-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3530,8 +3530,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sdk"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bitcode",
|
||||
@@ -3577,8 +3577,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-air"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-circuit-primitives",
|
||||
"openvm-stark-backend",
|
||||
@@ -3588,8 +3588,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3611,8 +3611,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-platform",
|
||||
"sha2",
|
||||
@@ -3620,8 +3620,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3634,8 +3634,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-stark-backend"
|
||||
version = "1.0.0"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?rev=b051e8978da9c829a76b262abf4a9736c8d1681e#b051e8978da9c829a76b262abf4a9736c8d1681e"
|
||||
dependencies = [
|
||||
"bitcode",
|
||||
"cfg-if",
|
||||
@@ -3660,8 +3660,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-stark-sdk"
|
||||
version = "1.0.0"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?rev=b051e8978da9c829a76b262abf4a9736c8d1681e#b051e8978da9c829a76b262abf4a9736c8d1681e"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"derive_more 0.99.19",
|
||||
@@ -3695,8 +3695,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"elf",
|
||||
"eyre",
|
||||
@@ -3737,7 +3737,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-air"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-matrix",
|
||||
@@ -3746,7 +3746,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-baby-bear"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-mds",
|
||||
@@ -3760,7 +3760,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-blake3"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"p3-symmetric",
|
||||
@@ -3770,7 +3770,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-bn254-fr"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"ff 0.13.0",
|
||||
"halo2curves",
|
||||
@@ -3785,7 +3785,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-challenger"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-maybe-rayon",
|
||||
@@ -3797,7 +3797,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-commit"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-challenger",
|
||||
@@ -3811,7 +3811,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-dft"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -3824,7 +3824,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-field"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -3841,7 +3841,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-fri"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-challenger",
|
||||
@@ -3860,7 +3860,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-goldilocks"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"num-bigint 0.4.6",
|
||||
"p3-dft",
|
||||
@@ -3877,7 +3877,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-interpolation"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-matrix",
|
||||
@@ -3888,7 +3888,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-keccak"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -3900,7 +3900,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-keccak-air"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-air",
|
||||
"p3-field",
|
||||
@@ -3914,7 +3914,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-matrix"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -3929,7 +3929,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-maybe-rayon"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"rayon",
|
||||
]
|
||||
@@ -3937,7 +3937,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-mds"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-dft",
|
||||
@@ -3951,7 +3951,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-merkle-tree"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-commit",
|
||||
@@ -3968,7 +3968,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-monty-31"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -3989,7 +3989,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-poseidon"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-mds",
|
||||
@@ -4000,7 +4000,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-poseidon2"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"gcd",
|
||||
"p3-field",
|
||||
@@ -4012,7 +4012,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-poseidon2-air"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-air",
|
||||
"p3-field",
|
||||
@@ -4028,7 +4028,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-symmetric"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -4038,7 +4038,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-uni-stark"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-air",
|
||||
@@ -4056,7 +4056,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-util"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -5674,7 +5674,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "scroll-zkvm-circuit-input-types"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-serde 0.8.3",
|
||||
@@ -5696,8 +5696,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-prover"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"base64 0.22.1",
|
||||
@@ -5732,8 +5732,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-verifier"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"eyre",
|
||||
|
||||
@@ -7,19 +7,25 @@ edition = "2021"
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
scroll-zkvm-prover.workspace = true
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
[patch.crates-io]
|
||||
# patched add rkyv support & MSRV 1.77
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
|
||||
|
||||
env_logger = "0.11.0"
|
||||
[dependencies]
|
||||
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-prover" }
|
||||
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-verifier" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
base64.workspace = true
|
||||
once_cell.workspace = true
|
||||
serde.workspace = true
|
||||
once_cell = "1.19"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json.workspace = true
|
||||
anyhow = "1"
|
||||
serde_json = "1.0.66"
|
||||
anyhow = "1.0.86"
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
@@ -26,6 +26,12 @@ pub unsafe extern "C" fn verify_chunk_proof(
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
// Skip verification for darwinV2 as we can't host darwinV2 and euclid verifiers on the same
|
||||
// binary.
|
||||
if fork_name_str == "darwinV2" {
|
||||
return true as c_char;
|
||||
}
|
||||
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ use super::{ProofVerifier, TaskType, VKDump};
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidVerifier {
|
||||
@@ -53,13 +53,12 @@ impl ProofVerifier for EuclidVerifier {
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ use super::{ProofVerifier, TaskType, VKDump};
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use scroll_zkvm_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use scroll_zkvm_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
@@ -53,13 +53,12 @@ impl ProofVerifier for EuclidV2Verifier {
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
types-base = { path = "base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
types-chunk = { path = "chunk", package = "scroll-zkvm-circuit-input-types-chunk"}
|
||||
types-batch = { path = "batch", package = "scroll-zkvm-circuit-input-types-batch"}
|
||||
types-bundle = { path = "bundle", package = "scroll-zkvm-circuit-input-types-bundle"}
|
||||
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Input Types for circuits
|
||||
|
||||
A series of separated crates for the input types accepted by circuits as input.
|
||||
|
||||
This crate help decoupling circuits with other crates and keep their dependencies neat and controllable. Avoiding to involve crates which is not compatible with the tootlchain of openvm from indirect dependency.
|
||||
|
||||
### Code structure
|
||||
```
|
||||
types-rs
|
||||
│
|
||||
├── base
|
||||
│
|
||||
├── circuit
|
||||
│
|
||||
├── aggregation
|
||||
│
|
||||
<following are layer-oriented crates>
|
||||
│
|
||||
├── chunk
|
||||
│
|
||||
├── batch
|
||||
│
|
||||
└── bundle
|
||||
```
|
||||
@@ -1,14 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-aggregation"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
/// Represents an openvm program commitments and public values.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct AggregationInput {
|
||||
/// Public values.
|
||||
pub public_values: Vec<u32>,
|
||||
/// Represent the commitment needed to verify a root proof
|
||||
pub commitment: ProgramCommitment,
|
||||
}
|
||||
|
||||
/// Represent the commitment needed to verify a [`RootProof`].
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ProgramCommitment {
|
||||
/// The commitment to the child program exe.
|
||||
pub exe: [u32; 8],
|
||||
/// The commitment to the child program leaf.
|
||||
pub leaf: [u32; 8],
|
||||
}
|
||||
|
||||
impl ProgramCommitment {
|
||||
pub fn deserialize(commitment_bytes: &[u8]) -> Self {
|
||||
// TODO: temporary skip deserialize if no vk is provided
|
||||
if commitment_bytes.is_empty() {
|
||||
return Default::default();
|
||||
}
|
||||
|
||||
let archived_data =
|
||||
rkyv::access::<ArchivedProgramCommitment, rkyv::rancor::BoxedError>(commitment_bytes)
|
||||
.unwrap();
|
||||
|
||||
Self {
|
||||
exe: archived_data.exe.map(|u32_le| u32_le.to_native()),
|
||||
leaf: archived_data.leaf.map(|u32_le| u32_le.to_native()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
rkyv::to_bytes::<rkyv::rancor::BoxedError>(self)
|
||||
.map(|v| v.to_vec())
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedProgramCommitment> for ProgramCommitment {
|
||||
fn from(archived: &ArchivedProgramCommitment) -> Self {
|
||||
Self {
|
||||
exe: archived.exe.map(|u32_le| u32_le.to_native()),
|
||||
leaf: archived.leaf.map(|u32_le| u32_le.to_native()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of public-input values, i.e. [u32; N].
|
||||
///
|
||||
/// Note that the actual value for each u32 is a byte.
|
||||
pub const NUM_PUBLIC_VALUES: usize = 32;
|
||||
|
||||
/// Witness for an [`AggregationCircuit`][AggCircuit] that also carries proofs that are being
|
||||
/// aggregated.
|
||||
pub trait ProofCarryingWitness {
|
||||
/// Get the root proofs from the witness.
|
||||
fn get_proofs(&self) -> Vec<AggregationInput>;
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-base"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
alloy-serde.workspace = true
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
tiny-keccak = { workspace = true }
|
||||
sha3 = "0.10.8"
|
||||
sha2 = "0.10.8"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -1,2 +0,0 @@
|
||||
pub mod public_inputs;
|
||||
pub mod utils;
|
||||
@@ -1,81 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
pub mod batch;
|
||||
pub mod bundle;
|
||||
pub mod chunk;
|
||||
|
||||
/// Defines behaviour to be implemented by types representing the public-input values of a circuit.
|
||||
pub trait PublicInputs {
|
||||
/// Keccak-256 digest of the public inputs. The public-input hash are revealed as public values
|
||||
/// via [`openvm::io::reveal`].
|
||||
fn pi_hash(&self) -> B256;
|
||||
|
||||
/// Validation logic between public inputs of two contiguous instances.
|
||||
fn validate(&self, prev_pi: &Self);
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Default,
|
||||
Debug,
|
||||
Copy,
|
||||
Clone,
|
||||
PartialEq,
|
||||
Eq,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub enum ForkName {
|
||||
#[default]
|
||||
EuclidV1,
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl From<&ArchivedForkName> for ForkName {
|
||||
fn from(archived: &ArchivedForkName) -> Self {
|
||||
match archived {
|
||||
ArchivedForkName::EuclidV1 => ForkName::EuclidV1,
|
||||
ArchivedForkName::EuclidV2 => ForkName::EuclidV2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<&str>> for ForkName {
|
||||
fn from(value: Option<&str>) -> Self {
|
||||
match value {
|
||||
None => Default::default(),
|
||||
Some("euclidv1") => ForkName::EuclidV1,
|
||||
Some("euclidv2") => ForkName::EuclidV2,
|
||||
Some(s) => unreachable!("hardfork not accepted: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ForkName {
|
||||
fn from(value: &str) -> Self {
|
||||
match value {
|
||||
"euclidv1" => ForkName::EuclidV1,
|
||||
"euclidv2" => ForkName::EuclidV2,
|
||||
s => unreachable!("hardfork not accepted: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// helper trait to extend PublicInputs
|
||||
pub trait MultiVersionPublicInputs {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256;
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName);
|
||||
}
|
||||
|
||||
impl<T: MultiVersionPublicInputs> PublicInputs for (T, ForkName) {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_by_fork(self.1)
|
||||
}
|
||||
|
||||
fn validate(&self, prev_pi: &Self) {
|
||||
assert_eq!(self.1, prev_pi.1);
|
||||
self.0.validate(&prev_pi.0, self.1)
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Represents public-input values for a batch.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchInfo {
|
||||
/// The state root before applying the batch.
|
||||
#[rkyv()]
|
||||
pub parent_state_root: B256,
|
||||
/// The batch hash of the parent batch.
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The state root after applying txs in the batch.
|
||||
#[rkyv()]
|
||||
pub state_root: B256,
|
||||
/// The batch header hash of the batch.
|
||||
#[rkyv()]
|
||||
pub batch_hash: B256,
|
||||
/// The EIP-155 chain ID of all txs in the batch.
|
||||
#[rkyv()]
|
||||
pub chain_id: u64,
|
||||
/// The withdraw root of the last block in the last chunk in the batch.
|
||||
#[rkyv()]
|
||||
pub withdraw_root: B256,
|
||||
/// The L1 msg queue hash at the end of the previous batch.
|
||||
#[rkyv()]
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The L1 msg queue hash at the end of the current batch.
|
||||
#[rkyv()]
|
||||
pub post_msg_queue_hash: B256,
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchInfo> for BatchInfo {
|
||||
fn from(archived: &ArchivedBatchInfo) -> Self {
|
||||
Self {
|
||||
parent_state_root: archived.parent_state_root.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
state_root: archived.state_root.into(),
|
||||
batch_hash: archived.batch_hash.into(),
|
||||
chain_id: archived.chain_id.into(),
|
||||
withdraw_root: archived.withdraw_root.into(),
|
||||
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchInfo {
|
||||
/// Public input hash for a batch (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// parent state root ||
|
||||
/// parent batch hash ||
|
||||
/// state root ||
|
||||
/// batch hash ||
|
||||
/// chain id ||
|
||||
/// withdraw root ||
|
||||
/// )
|
||||
fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.parent_state_root.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a batch (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// parent state root ||
|
||||
/// parent batch hash ||
|
||||
/// state root ||
|
||||
/// batch hash ||
|
||||
/// chain id ||
|
||||
/// withdraw root ||
|
||||
/// prev msg queue hash ||
|
||||
/// post msg queue hash
|
||||
/// )
|
||||
fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.parent_state_root.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.prev_msg_queue_hash.as_slice())
|
||||
.chain(self.post_msg_queue_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type VersionedBatchInfo = (BatchInfo, ForkName);
|
||||
|
||||
impl MultiVersionPublicInputs for BatchInfo {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate public inputs between 2 contiguous batches.
|
||||
///
|
||||
/// - chain id MUST match
|
||||
/// - state roots MUST be chained
|
||||
/// - batch hashes MUST be chained
|
||||
/// - L1 msg queue hashes MUST be chained
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
|
||||
assert_eq!(self.chain_id, prev_pi.chain_id);
|
||||
assert_eq!(self.parent_state_root, prev_pi.state_root);
|
||||
assert_eq!(self.parent_batch_hash, prev_pi.batch_hash);
|
||||
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
|
||||
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs, PublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Represents fields required to compute the public-inputs digest of a bundle.
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct BundleInfo {
|
||||
/// The EIP-155 chain ID of all txs in the bundle.
|
||||
pub chain_id: u64,
|
||||
/// The L1 msg queue hash at the end of the last batch in the bundle.
|
||||
/// Not a phase 1 field so we make it omitable
|
||||
#[serde(default)]
|
||||
pub msg_queue_hash: B256,
|
||||
/// The number of batches bundled together in the bundle.
|
||||
pub num_batches: u32,
|
||||
/// The last finalized on-chain state root.
|
||||
pub prev_state_root: B256,
|
||||
/// The last finalized on-chain batch hash.
|
||||
pub prev_batch_hash: B256,
|
||||
/// The state root after applying every batch in the bundle.
|
||||
///
|
||||
/// Upon verification of the EVM-verifiable bundle proof, this state root will be finalized
|
||||
/// on-chain.
|
||||
pub post_state_root: B256,
|
||||
/// The batch hash of the last batch in the bundle.
|
||||
///
|
||||
/// Upon verification of the EVM-verifiable bundle proof, this batch hash will be finalized
|
||||
/// on-chain.
|
||||
pub batch_hash: B256,
|
||||
/// The withdrawals root at the last block in the last chunk in the last batch in the bundle.
|
||||
pub withdraw_root: B256,
|
||||
}
|
||||
|
||||
impl BundleInfo {
|
||||
/// Public input hash for a bundle (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// num batches ||
|
||||
/// prev state root ||
|
||||
/// prev batch hash ||
|
||||
/// post state root ||
|
||||
/// batch hash ||
|
||||
/// withdraw root
|
||||
/// )
|
||||
pub fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.num_batches.to_be_bytes().as_slice())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.prev_batch_hash.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a bundle (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// msg_queue_hash ||
|
||||
/// num batches ||
|
||||
/// prev state root ||
|
||||
/// prev batch hash ||
|
||||
/// post state root ||
|
||||
/// batch hash ||
|
||||
/// withdraw root
|
||||
/// )
|
||||
pub fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(self.chain_id.to_be_bytes().as_slice())
|
||||
.chain(self.msg_queue_hash.as_slice())
|
||||
.chain(self.num_batches.to_be_bytes().as_slice())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.prev_batch_hash.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.batch_hash.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn pi_hash(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiVersionPublicInputs for BundleInfo {
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => self.pi_hash_euclidv1(),
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self, _fork_name: ForkName) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BundleInfoV1(pub BundleInfo);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BundleInfoV2(pub BundleInfo);
|
||||
|
||||
impl From<BundleInfo> for BundleInfoV1 {
|
||||
fn from(value: BundleInfo) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BundleInfo> for BundleInfoV2 {
|
||||
fn from(value: BundleInfo) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInputs for BundleInfoV1 {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_euclidv1()
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInputs for BundleInfoV2 {
|
||||
fn pi_hash(&self) -> B256 {
|
||||
self.0.pi_hash_euclidv2()
|
||||
}
|
||||
|
||||
fn validate(&self, _prev_pi: &Self) {
|
||||
unreachable!("bundle is the last layer and is not aggregated by any other circuit");
|
||||
}
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
use alloy_primitives::{B256, U256};
|
||||
|
||||
use crate::{
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
/// Number of bytes used to serialise [`BlockContextV2`].
|
||||
pub const SIZE_BLOCK_CTX: usize = 52;
|
||||
|
||||
/// Represents the version 2 of block context.
|
||||
///
|
||||
/// The difference between v2 and v1 is that the block number field has been removed since v2.
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
PartialEq,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BlockContextV2 {
|
||||
/// The timestamp of the block.
|
||||
pub timestamp: u64,
|
||||
/// The base fee of the block.
|
||||
pub base_fee: U256,
|
||||
/// The gas limit of the block.
|
||||
pub gas_limit: u64,
|
||||
/// The number of transactions in the block, including both L1 msg txs as well as L2 txs.
|
||||
pub num_txs: u16,
|
||||
/// The number of L1 msg txs in the block.
|
||||
pub num_l1_msgs: u16,
|
||||
}
|
||||
|
||||
impl From<&ArchivedBlockContextV2> for BlockContextV2 {
|
||||
fn from(archived: &ArchivedBlockContextV2) -> Self {
|
||||
Self {
|
||||
timestamp: archived.timestamp.into(),
|
||||
base_fee: archived.base_fee.into(),
|
||||
gas_limit: archived.gas_limit.into(),
|
||||
num_txs: archived.num_txs.into(),
|
||||
num_l1_msgs: archived.num_l1_msgs.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&[u8]> for BlockContextV2 {
|
||||
fn from(bytes: &[u8]) -> Self {
|
||||
assert_eq!(bytes.len(), SIZE_BLOCK_CTX);
|
||||
|
||||
let timestamp = u64::from_be_bytes(bytes[0..8].try_into().expect("should not fail"));
|
||||
let base_fee = U256::from_be_slice(&bytes[8..40]);
|
||||
let gas_limit = u64::from_be_bytes(bytes[40..48].try_into().expect("should not fail"));
|
||||
let num_txs = u16::from_be_bytes(bytes[48..50].try_into().expect("should not fail"));
|
||||
let num_l1_msgs = u16::from_be_bytes(bytes[50..52].try_into().expect("should not fail"));
|
||||
|
||||
Self {
|
||||
timestamp,
|
||||
base_fee,
|
||||
gas_limit,
|
||||
num_txs,
|
||||
num_l1_msgs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockContextV2 {
|
||||
/// Serialize the block context in packed form.
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
std::iter::empty()
|
||||
.chain(self.timestamp.to_be_bytes())
|
||||
.chain(self.base_fee.to_be_bytes::<32>())
|
||||
.chain(self.gas_limit.to_be_bytes())
|
||||
.chain(self.num_txs.to_be_bytes())
|
||||
.chain(self.num_l1_msgs.to_be_bytes())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents header-like information for the chunk.
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ChunkInfo {
|
||||
/// The EIP-155 chain ID for all txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub chain_id: u64,
|
||||
/// The state root before applying the chunk.
|
||||
#[rkyv()]
|
||||
pub prev_state_root: B256,
|
||||
/// The state root after applying the chunk.
|
||||
#[rkyv()]
|
||||
pub post_state_root: B256,
|
||||
/// The withdrawals root after applying the chunk.
|
||||
#[rkyv()]
|
||||
pub withdraw_root: B256,
|
||||
/// Digest of L1 message txs force included in the chunk.
|
||||
/// It is a legacy field and can be omitted in new defination
|
||||
#[rkyv()]
|
||||
#[serde(default)]
|
||||
pub data_hash: B256,
|
||||
/// Digest of L2 tx data flattened over all L2 txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub tx_data_digest: B256,
|
||||
/// The L1 msg queue hash at the end of the previous chunk.
|
||||
#[rkyv()]
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The L1 msg queue hash at the end of the current chunk.
|
||||
#[rkyv()]
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// The length of rlp encoded L2 tx bytes flattened over all L2 txs in the chunk.
|
||||
#[rkyv()]
|
||||
pub tx_data_length: u64,
|
||||
/// The block number of the first block in the chunk.
|
||||
#[rkyv()]
|
||||
pub initial_block_number: u64,
|
||||
/// The block contexts of the blocks in the chunk.
|
||||
#[rkyv()]
|
||||
pub block_ctxs: Vec<BlockContextV2>,
|
||||
}
|
||||
|
||||
impl ChunkInfo {
|
||||
/// Public input hash for a given chunk (euclidv1 or da-codec@v6) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// prev state root ||
|
||||
/// post state root ||
|
||||
/// withdraw root ||
|
||||
/// chunk data hash ||
|
||||
/// tx data hash
|
||||
/// )
|
||||
pub fn pi_hash_euclidv1(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(&self.chain_id.to_be_bytes())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.data_hash.as_slice())
|
||||
.chain(self.tx_data_digest.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Public input hash for a given chunk (euclidv2 or da-codec@v7) is defined as
|
||||
///
|
||||
/// keccak(
|
||||
/// chain id ||
|
||||
/// prev state root ||
|
||||
/// post state root ||
|
||||
/// withdraw root ||
|
||||
/// tx data digest ||
|
||||
/// prev msg queue hash ||
|
||||
/// post msg queue hash ||
|
||||
/// initial block number ||
|
||||
/// block_ctx for block_ctx in block_ctxs
|
||||
/// )
|
||||
pub fn pi_hash_euclidv2(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(&self.chain_id.to_be_bytes())
|
||||
.chain(self.prev_state_root.as_slice())
|
||||
.chain(self.post_state_root.as_slice())
|
||||
.chain(self.withdraw_root.as_slice())
|
||||
.chain(self.tx_data_digest.as_slice())
|
||||
.chain(self.prev_msg_queue_hash.as_slice())
|
||||
.chain(self.post_msg_queue_hash.as_slice())
|
||||
.chain(&self.initial_block_number.to_be_bytes())
|
||||
.chain(
|
||||
self.block_ctxs
|
||||
.iter()
|
||||
.flat_map(|block_ctx| block_ctx.to_bytes())
|
||||
.collect::<Vec<u8>>()
|
||||
.as_slice(),
|
||||
)
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedChunkInfo> for ChunkInfo {
|
||||
fn from(archived: &ArchivedChunkInfo) -> Self {
|
||||
Self {
|
||||
chain_id: archived.chain_id.into(),
|
||||
prev_state_root: archived.prev_state_root.into(),
|
||||
post_state_root: archived.post_state_root.into(),
|
||||
withdraw_root: archived.withdraw_root.into(),
|
||||
data_hash: archived.data_hash.into(),
|
||||
tx_data_digest: archived.tx_data_digest.into(),
|
||||
prev_msg_queue_hash: archived.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: archived.post_msg_queue_hash.into(),
|
||||
tx_data_length: archived.tx_data_length.into(),
|
||||
initial_block_number: archived.initial_block_number.into(),
|
||||
block_ctxs: archived
|
||||
.block_ctxs
|
||||
.iter()
|
||||
.map(BlockContextV2::from)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type VersionedChunkInfo = (ChunkInfo, ForkName);
|
||||
|
||||
impl MultiVersionPublicInputs for ChunkInfo {
|
||||
/// Compute the public input hash for the chunk.
|
||||
fn pi_hash_by_fork(&self, fork_name: ForkName) -> B256 {
|
||||
match fork_name {
|
||||
ForkName::EuclidV1 => {
|
||||
assert_ne!(self.data_hash, B256::ZERO, "v6 must has valid data hash");
|
||||
self.pi_hash_euclidv1()
|
||||
}
|
||||
ForkName::EuclidV2 => self.pi_hash_euclidv2(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate public inputs between 2 contiguous chunks.
|
||||
///
|
||||
/// - chain id MUST match
|
||||
/// - state roots MUST be chained
|
||||
/// - L1 msg queue hash MUST be chained
|
||||
fn validate(&self, prev_pi: &Self, fork_name: ForkName) {
|
||||
assert_eq!(self.chain_id, prev_pi.chain_id);
|
||||
assert_eq!(self.prev_state_root, prev_pi.post_state_root);
|
||||
assert_eq!(self.prev_msg_queue_hash, prev_pi.post_msg_queue_hash);
|
||||
|
||||
// message queue hash is used only after euclidv2 (da-codec@v7)
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
assert_eq!(self.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.prev_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(self.post_msg_queue_hash, B256::ZERO);
|
||||
assert_eq!(prev_pi.post_msg_queue_hash, B256::ZERO);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
/// From the utility of ether-rs
|
||||
///
|
||||
/// Computes the Keccak-256 hash of input bytes.
|
||||
///
|
||||
/// Note that strings are interpreted as UTF-8 bytes,
|
||||
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
let mut output = [0u8; 32];
|
||||
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(bytes.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
B256::from(output)
|
||||
}
|
||||
|
||||
pub fn keccak256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
use sha3::{Digest, Keccak256};
|
||||
let mut output = [0u8; 32];
|
||||
let mut hasher = Keccak256::new();
|
||||
hasher.update(bytes.as_ref());
|
||||
output.copy_from_slice(hasher.finalize().as_ref());
|
||||
B256::from(output)
|
||||
}
|
||||
|
||||
pub fn sha256_rv32<T: AsRef<[u8]>>(bytes: T) -> B256 {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut output = [0u8; 32];
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(bytes.as_ref());
|
||||
output.copy_from_slice(hasher.finalize().as_ref());
|
||||
B256::from(output)
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
mod hash;
|
||||
pub use hash::{keccak256, keccak256_rv32, sha256_rv32};
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-batch"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
vm-zstd = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -1,30 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
pub mod v6;
|
||||
|
||||
pub mod v7;
|
||||
|
||||
pub trait BatchHeader {
|
||||
/// The DA-codec version for the batch header.
|
||||
fn version(&self) -> u8;
|
||||
|
||||
/// The incremental index of the batch.
|
||||
fn index(&self) -> u64;
|
||||
|
||||
/// The batch header digest of the parent batch.
|
||||
fn parent_batch_hash(&self) -> B256;
|
||||
|
||||
/// The batch header digest.
|
||||
fn batch_hash(&self) -> B256;
|
||||
}
|
||||
|
||||
/// Reference header indicate the version of batch header base on which batch hash
|
||||
/// should be calculated.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub enum ReferenceHeader {
|
||||
/// Represents DA-codec v6.
|
||||
V6(v6::BatchHeaderV6),
|
||||
/// Represents DA-codec v7.
|
||||
V7(v7::BatchHeaderV7),
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
use super::BatchHeader;
|
||||
use alloy_primitives::B256;
|
||||
use types_base::utils::keccak256;
|
||||
|
||||
/// Represents the header summarising the batch of chunks as per DA-codec v6.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchHeaderV6 {
|
||||
/// The DA-codec version for the batch.
|
||||
#[rkyv()]
|
||||
pub version: u8,
|
||||
/// The index of the batch
|
||||
#[rkyv()]
|
||||
pub batch_index: u64,
|
||||
/// Number of L1 messages popped in the batch
|
||||
#[rkyv()]
|
||||
pub l1_message_popped: u64,
|
||||
/// Number of total L1 messages popped after the batch
|
||||
#[rkyv()]
|
||||
pub total_l1_message_popped: u64,
|
||||
/// The parent batch hash
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The timestamp of the last block in this batch
|
||||
#[rkyv()]
|
||||
pub last_block_timestamp: u64,
|
||||
/// The data hash of the batch
|
||||
#[rkyv()]
|
||||
pub data_hash: B256,
|
||||
/// The versioned hash of the blob with this batch's data
|
||||
#[rkyv()]
|
||||
pub blob_versioned_hash: B256,
|
||||
/// The blob data proof: z (32), y (32)
|
||||
#[rkyv()]
|
||||
pub blob_data_proof: [B256; 2],
|
||||
}
|
||||
|
||||
impl BatchHeader for BatchHeaderV6 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash
|
||||
}
|
||||
|
||||
/// Batch hash as per DA-codec v6:
|
||||
///
|
||||
/// keccak(
|
||||
/// version ||
|
||||
/// batch index ||
|
||||
/// l1 message popped ||
|
||||
/// total l1 message popped ||
|
||||
/// batch data hash ||
|
||||
/// versioned hash ||
|
||||
/// parent batch hash ||
|
||||
/// last block timestamp ||
|
||||
/// z ||
|
||||
/// y
|
||||
/// )
|
||||
fn batch_hash(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(self.batch_index.to_be_bytes().as_slice())
|
||||
.chain(self.l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(self.total_l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(self.data_hash.as_slice())
|
||||
.chain(self.blob_versioned_hash.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.chain(self.last_block_timestamp.to_be_bytes().as_slice())
|
||||
.chain(self.blob_data_proof[0].as_slice())
|
||||
.chain(self.blob_data_proof[1].as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchHeader for ArchivedBatchHeaderV6 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index.into()
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash.into()
|
||||
}
|
||||
|
||||
fn batch_hash(&self) -> B256 {
|
||||
let batch_index: u64 = self.batch_index.into();
|
||||
let l1_message_popped: u64 = self.l1_message_popped.into();
|
||||
let total_l1_message_popped: u64 = self.total_l1_message_popped.into();
|
||||
let data_hash: B256 = self.data_hash.into();
|
||||
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
|
||||
let parent_batch_hash: B256 = self.parent_batch_hash.into();
|
||||
let last_block_timestamp: u64 = self.last_block_timestamp.into();
|
||||
let blob_data_proof: [B256; 2] = self.blob_data_proof.map(|h| h.into());
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(batch_index.to_be_bytes().as_slice())
|
||||
.chain(l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(total_l1_message_popped.to_be_bytes().as_slice())
|
||||
.chain(data_hash.as_slice())
|
||||
.chain(blob_versioned_hash.as_slice())
|
||||
.chain(parent_batch_hash.as_slice())
|
||||
.chain(last_block_timestamp.to_be_bytes().as_slice())
|
||||
.chain(blob_data_proof[0].as_slice())
|
||||
.chain(blob_data_proof[1].as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchHeaderV6> for BatchHeaderV6 {
|
||||
fn from(archived: &ArchivedBatchHeaderV6) -> Self {
|
||||
Self {
|
||||
version: archived.version,
|
||||
batch_index: archived.batch_index.into(),
|
||||
l1_message_popped: archived.l1_message_popped.into(),
|
||||
total_l1_message_popped: archived.total_l1_message_popped.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
last_block_timestamp: archived.last_block_timestamp.into(),
|
||||
data_hash: archived.data_hash.into(),
|
||||
blob_versioned_hash: archived.blob_versioned_hash.into(),
|
||||
blob_data_proof: [
|
||||
archived.blob_data_proof[0].into(),
|
||||
archived.blob_data_proof[1].into(),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use super::BatchHeader;
|
||||
use types_base::utils::keccak256;
|
||||
|
||||
/// Represents the header summarising the batch of chunks as per DA-codec v7.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
Default,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchHeaderV7 {
|
||||
/// The DA-codec version for the batch.
|
||||
#[rkyv()]
|
||||
pub version: u8,
|
||||
/// The index of the batch
|
||||
#[rkyv()]
|
||||
pub batch_index: u64,
|
||||
/// The parent batch hash
|
||||
#[rkyv()]
|
||||
pub parent_batch_hash: B256,
|
||||
/// The versioned hash of the blob with this batch's data
|
||||
#[rkyv()]
|
||||
pub blob_versioned_hash: B256,
|
||||
}
|
||||
|
||||
impl BatchHeader for BatchHeaderV7 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash
|
||||
}
|
||||
|
||||
/// Batch hash as per DA-codec v7:
|
||||
///
|
||||
/// keccak(
|
||||
/// version ||
|
||||
/// batch index ||
|
||||
/// versioned hash ||
|
||||
/// parent batch hash
|
||||
/// )
|
||||
fn batch_hash(&self) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(self.batch_index.to_be_bytes().as_slice())
|
||||
.chain(self.blob_versioned_hash.as_slice())
|
||||
.chain(self.parent_batch_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchHeader for ArchivedBatchHeaderV7 {
|
||||
fn version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
||||
fn index(&self) -> u64 {
|
||||
self.batch_index.into()
|
||||
}
|
||||
|
||||
fn parent_batch_hash(&self) -> B256 {
|
||||
self.parent_batch_hash.into()
|
||||
}
|
||||
|
||||
fn batch_hash(&self) -> B256 {
|
||||
let batch_index: u64 = self.batch_index.into();
|
||||
let blob_versioned_hash: B256 = self.blob_versioned_hash.into();
|
||||
let parent_batch_hash: B256 = self.parent_batch_hash.into();
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(vec![self.version].as_slice())
|
||||
.chain(batch_index.to_be_bytes().as_slice())
|
||||
.chain(blob_versioned_hash.as_slice())
|
||||
.chain(parent_batch_hash.as_slice())
|
||||
.cloned()
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&ArchivedBatchHeaderV7> for BatchHeaderV7 {
|
||||
fn from(archived: &ArchivedBatchHeaderV7) -> Self {
|
||||
Self {
|
||||
version: archived.version,
|
||||
batch_index: archived.batch_index.into(),
|
||||
parent_batch_hash: archived.parent_batch_hash.into(),
|
||||
blob_versioned_hash: archived.blob_versioned_hash.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
mod header;
|
||||
pub use header::{
|
||||
ArchivedReferenceHeader, BatchHeader, ReferenceHeader,
|
||||
v6::{ArchivedBatchHeaderV6, BatchHeaderV6},
|
||||
v7::{ArchivedBatchHeaderV7, BatchHeaderV7},
|
||||
};
|
||||
|
||||
mod payload;
|
||||
pub use payload::{
|
||||
v6::{EnvelopeV6, PayloadV6},
|
||||
v7::{EnvelopeV7, PayloadV7},
|
||||
};
|
||||
|
||||
pub use payload::{BLOB_WIDTH, N_BLOB_BYTES, N_DATA_BYTES_PER_COEFFICIENT};
|
||||
|
||||
mod witness;
|
||||
pub use witness::{ArchivedBatchWitness, BatchWitness, Bytes48, PointEvalWitness};
|
||||
@@ -1,15 +0,0 @@
|
||||
pub mod v6;
|
||||
pub mod v7;
|
||||
|
||||
/// The number data bytes we pack each BLS12-381 scalar into. The most-significant byte is 0.
|
||||
pub const N_DATA_BYTES_PER_COEFFICIENT: usize = 31;
|
||||
|
||||
/// The number of BLS12-381 scalar fields that effectively represent an EIP-4844 blob.
|
||||
pub const BLOB_WIDTH: usize = 4096;
|
||||
|
||||
/// The effective (reduced) number of bytes we can use within a blob.
|
||||
///
|
||||
/// EIP-4844 requires that each 32-bytes chunk of bytes represent a BLS12-381 scalar field element
|
||||
/// in its canonical form. As a result, we set the most-significant byte in each such chunk to 0.
|
||||
/// This allows us to use only up to 31 bytes in each such chunk, hence the reduced capacity.
|
||||
pub const N_BLOB_BYTES: usize = BLOB_WIDTH * N_DATA_BYTES_PER_COEFFICIENT;
|
||||
@@ -1,212 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::BatchHeaderV6;
|
||||
use types_base::{public_inputs::chunk::ChunkInfo, utils::keccak256};
|
||||
|
||||
/// The default max chunks for v6 payload
|
||||
pub const N_MAX_CHUNKS: usize = 45;
|
||||
|
||||
/// The number of bytes to encode number of chunks in a batch.
|
||||
const N_BYTES_NUM_CHUNKS: usize = 2;
|
||||
|
||||
/// The number of rows to encode chunk size (u32).
|
||||
const N_BYTES_CHUNK_SIZE: usize = 4;
|
||||
|
||||
impl From<&[u8]> for EnvelopeV6 {
|
||||
fn from(blob_bytes: &[u8]) -> Self {
|
||||
let is_encoded = blob_bytes[0] & 1 == 1;
|
||||
Self {
|
||||
is_encoded,
|
||||
envelope_bytes: if blob_bytes[0] & 1 == 1 {
|
||||
vm_zstd::process(&blob_bytes[1..]).unwrap().decoded_data
|
||||
} else {
|
||||
Vec::from(&blob_bytes[1..])
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvelopeV6 {
|
||||
/// The original envelope bytes supplied.
|
||||
///
|
||||
/// Caching just for re-use later in challenge digest computation.
|
||||
pub envelope_bytes: Vec<u8>,
|
||||
/// If the enveloped bytes is encoded (compressed) in envelop
|
||||
pub is_encoded: bool,
|
||||
}
|
||||
|
||||
impl EnvelopeV6 {
|
||||
/// Parse payload bytes and obtain challenge digest
|
||||
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
let payload = Payload::from(self);
|
||||
payload.get_challenge_digest(versioned_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&EnvelopeV6> for Payload {
|
||||
fn from(envelope: &EnvelopeV6) -> Self {
|
||||
Self::from_payload(&envelope.envelope_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload that describes a batch.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Payload {
|
||||
/// Metadata that encodes the sizes of every chunk in the batch.
|
||||
pub metadata_digest: B256,
|
||||
/// The Keccak digests of transaction bytes for every chunk in the batch.
|
||||
///
|
||||
/// The `chunk_data_digest` is a part of the chunk-circuit's public input and hence used to
|
||||
/// verify that the transaction bytes included in the chunk-circuit indeed match the
|
||||
/// transaction bytes made available in the batch.
|
||||
pub chunk_data_digests: Vec<B256>,
|
||||
}
|
||||
|
||||
pub type PayloadV6 = Payload;
|
||||
|
||||
impl Payload {
|
||||
/// For raw payload data (read from decompressed enveloped data), which is raw batch bytes
|
||||
/// with metadata, this function segments the byte stream into chunk segments.
|
||||
///
|
||||
/// This method is used INSIDE OF zkvm since we can not generate (compress) batch data within
|
||||
/// the vm program
|
||||
///
|
||||
/// The structure of batch bytes is as follows:
|
||||
///
|
||||
/// | Byte Index | Size | Hint |
|
||||
/// |--------------------------------------------------------------|-------------------------------|-------------------------------------|
|
||||
/// | 0 | N_BYTES_NUM_CHUNKS | Number of chunks |
|
||||
/// | N_BYTES_NUM_CHUNKS | N_BYTES_CHUNK_SIZE | Size of chunks[0] |
|
||||
/// | N_BYTES_NUM_CHUNKS + N_BYTES_CHUNK_SIZE | N_BYTES_CHUNK_SIZE | Size of chunks[1] |
|
||||
/// | N_BYTES_NUM_CHUNKS + (i * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[i] |
|
||||
/// | N_BYTES_NUM_CHUNKS + ((N_MAX_CHUNKS-1) * N_BYTES_CHUNK_SIZE) | N_BYTES_CHUNK_SIZE | Size of chunks[N_MAX_CHUNKS-1] |
|
||||
/// | N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE) | Size of chunks[0] | L2 tx bytes of chunks[0] |
|
||||
/// | "" + Size_of_chunks[0] | Size of chunks[1] | L2 tx bytes of chunks[1] |
|
||||
/// | "" + Size_of_chunks[i-1] | Size of chunks[i] | L2 tx bytes of chunks[i] |
|
||||
/// | "" + Size_of_chunks[Num_chunks-1] | Size of chunks[Num_chunks-1] | L2 tx bytes of chunks[Num_chunks-1] |
|
||||
pub fn from_payload(batch_bytes_with_metadata: &[u8]) -> Self {
|
||||
// Get the metadata bytes and metadata digest.
|
||||
let n_bytes_metadata = Self::n_bytes_metadata();
|
||||
let metadata_bytes = &batch_bytes_with_metadata[..n_bytes_metadata];
|
||||
let metadata_digest = keccak256(metadata_bytes);
|
||||
|
||||
// The remaining bytes represent the chunk data (L2 tx bytes) segmented as chunks.
|
||||
let batch_bytes = &batch_bytes_with_metadata[n_bytes_metadata..];
|
||||
|
||||
// The number of chunks in the batch.
|
||||
let valid_chunks = metadata_bytes[..N_BYTES_NUM_CHUNKS]
|
||||
.iter()
|
||||
.fold(0usize, |acc, &d| acc * 256usize + d as usize);
|
||||
|
||||
// The size of each chunk in the batch.
|
||||
let chunk_sizes = metadata_bytes[N_BYTES_NUM_CHUNKS..]
|
||||
.iter()
|
||||
.chunks(N_BYTES_CHUNK_SIZE)
|
||||
.into_iter()
|
||||
.map(|bytes| bytes.fold(0usize, |acc, &d| acc * 256usize + d as usize))
|
||||
.collect::<Vec<usize>>();
|
||||
|
||||
// For every unused chunk, the chunk size should be set to 0.
|
||||
for &unused_chunk_size in chunk_sizes.iter().skip(valid_chunks) {
|
||||
assert_eq!(unused_chunk_size, 0, "unused chunk has size 0");
|
||||
}
|
||||
|
||||
// Segment the batch bytes based on the chunk sizes.
|
||||
let (segmented_batch_data, remaining_bytes) =
|
||||
chunk_sizes.into_iter().take(valid_chunks).fold(
|
||||
(Vec::new(), batch_bytes),
|
||||
|(mut datas, rest_bytes), size| {
|
||||
datas.push(Vec::from(&rest_bytes[..size]));
|
||||
(datas, &rest_bytes[size..])
|
||||
},
|
||||
);
|
||||
|
||||
// After segmenting the batch data into chunks, no bytes should be left.
|
||||
assert!(
|
||||
remaining_bytes.is_empty(),
|
||||
"chunk segmentation len must add up to the correct value"
|
||||
);
|
||||
|
||||
// Compute the chunk data digests based on the segmented data.
|
||||
let chunk_data_digests = segmented_batch_data
|
||||
.iter()
|
||||
.map(|bytes| B256::from(keccak256(bytes)))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
metadata_digest,
|
||||
chunk_data_digests,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the challenge digest from blob bytes. which is the combination of
|
||||
/// digest for bytes in each chunk
|
||||
pub fn get_challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
keccak256(self.get_challenge_digest_preimage(versioned_hash))
|
||||
}
|
||||
|
||||
/// The number of bytes in payload Data to represent the "payload metadata" section: a u16 to
|
||||
/// represent the size of chunks and max_chunks * u32 to represent chunk sizes
|
||||
const fn n_bytes_metadata() -> usize {
|
||||
N_BYTES_NUM_CHUNKS + (N_MAX_CHUNKS * N_BYTES_CHUNK_SIZE)
|
||||
}
|
||||
|
||||
/// Validate the payload contents.
|
||||
pub fn validate<'a>(
|
||||
&self,
|
||||
header: &BatchHeaderV6,
|
||||
chunk_infos: &'a [ChunkInfo],
|
||||
) -> (&'a ChunkInfo, &'a ChunkInfo) {
|
||||
// There should be at least 1 chunk info.
|
||||
assert!(!chunk_infos.is_empty(), "at least 1 chunk info");
|
||||
|
||||
// Get the first and last chunks' info, to construct the batch info.
|
||||
let (first_chunk, last_chunk) = (
|
||||
chunk_infos.first().expect("at least one chunk in batch"),
|
||||
chunk_infos.last().expect("at least one chunk in batch"),
|
||||
);
|
||||
|
||||
for (&chunk_data_digest, chunk_info) in self.chunk_data_digests.iter().zip_eq(chunk_infos) {
|
||||
assert_eq!(chunk_data_digest, chunk_info.tx_data_digest)
|
||||
}
|
||||
|
||||
// Validate the l1-msg identifier data_hash for the batch.
|
||||
let batch_data_hash_preimage = chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| chunk_info.data_hash.0)
|
||||
.collect::<Vec<_>>();
|
||||
let batch_data_hash = keccak256(batch_data_hash_preimage);
|
||||
assert_eq!(batch_data_hash, header.data_hash);
|
||||
|
||||
(first_chunk, last_chunk)
|
||||
}
|
||||
|
||||
/// Get the preimage for the challenge digest.
|
||||
pub(crate) fn get_challenge_digest_preimage(&self, versioned_hash: B256) -> Vec<u8> {
|
||||
// preimage =
|
||||
// metadata_digest ||
|
||||
// chunk[0].chunk_data_digest || ...
|
||||
// chunk[N_SNARKS-1].chunk_data_digest ||
|
||||
// blob_versioned_hash
|
||||
//
|
||||
// where chunk_data_digest for a padded chunk is set equal to the "last valid chunk"'s
|
||||
// chunk_data_digest.
|
||||
let mut preimage = self.metadata_digest.to_vec();
|
||||
let last_digest = self
|
||||
.chunk_data_digests
|
||||
.last()
|
||||
.expect("at least we have one");
|
||||
for chunk_digest in self
|
||||
.chunk_data_digests
|
||||
.iter()
|
||||
.chain(std::iter::repeat(last_digest))
|
||||
.take(N_MAX_CHUNKS)
|
||||
{
|
||||
preimage.extend_from_slice(chunk_digest.as_slice());
|
||||
}
|
||||
preimage.extend_from_slice(versioned_hash.as_slice());
|
||||
preimage
|
||||
}
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
|
||||
use crate::BatchHeaderV7;
|
||||
use types_base::{
|
||||
public_inputs::chunk::{BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX},
|
||||
utils::keccak256,
|
||||
};
|
||||
|
||||
use super::N_BLOB_BYTES;
|
||||
|
||||
/// da-codec@v7
|
||||
const DA_CODEC_VERSION: u8 = 7;
|
||||
|
||||
/// Represents the data contained within an EIP-4844 blob that is published on-chain.
|
||||
///
|
||||
/// The bytes following some metadata represent zstd-encoded [`PayloadV7`] if the envelope is
|
||||
/// indicated as `is_encoded == true`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvelopeV7 {
|
||||
/// The original envelope bytes supplied.
|
||||
///
|
||||
/// Caching just for re-use later in challenge digest computation.
|
||||
pub envelope_bytes: Vec<u8>,
|
||||
/// The version from da-codec, i.e. v7 in this case.
|
||||
pub version: u8,
|
||||
/// A single byte boolean flag (value is 0 or 1) to denote whether or not the following blob
|
||||
/// bytes represent a batch in its zstd-encoded or raw form.
|
||||
pub is_encoded: u8,
|
||||
/// The unpadded bytes that possibly encode the [`PayloadV7`].
|
||||
pub unpadded_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl From<&[u8]> for EnvelopeV7 {
|
||||
fn from(blob_bytes: &[u8]) -> Self {
|
||||
// The number of bytes is as expected.
|
||||
assert_eq!(blob_bytes.len(), N_BLOB_BYTES);
|
||||
|
||||
// The version of the blob encoding was as expected, i.e. da-codec@v7.
|
||||
let version = blob_bytes[0];
|
||||
assert_eq!(version, DA_CODEC_VERSION);
|
||||
|
||||
// Calculate the unpadded size of the encoded payload.
|
||||
//
|
||||
// It should be at most the maximum number of bytes allowed.
|
||||
let unpadded_size = (blob_bytes[1] as usize) * 256 * 256
|
||||
+ (blob_bytes[2] as usize) * 256
|
||||
+ blob_bytes[3] as usize;
|
||||
assert!(unpadded_size <= N_BLOB_BYTES - 5);
|
||||
|
||||
// Whether the envelope represents encoded payload or raw payload.
|
||||
//
|
||||
// Is a boolean.
|
||||
let is_encoded = blob_bytes[4];
|
||||
assert!(is_encoded <= 1);
|
||||
|
||||
// The padded bytes are all 0s.
|
||||
for &padded_byte in blob_bytes.iter().skip(5 + unpadded_size) {
|
||||
assert_eq!(padded_byte, 0);
|
||||
}
|
||||
|
||||
Self {
|
||||
version,
|
||||
is_encoded,
|
||||
unpadded_bytes: blob_bytes[5..(5 + unpadded_size)].to_vec(),
|
||||
envelope_bytes: blob_bytes.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvelopeV7 {
|
||||
/// The verification of the EIP-4844 blob is done via point-evaluation precompile
|
||||
/// implemented in-circuit.
|
||||
///
|
||||
/// We require a random challenge point for this, and using Fiat-Shamir we compute it with
|
||||
/// every byte in the blob along with the blob's versioned hash, i.e. an identifier for its KZG
|
||||
/// commitment.
|
||||
///
|
||||
/// keccak256(
|
||||
/// keccak256(envelope) ||
|
||||
/// versioned hash
|
||||
/// )
|
||||
pub fn challenge_digest(&self, versioned_hash: B256) -> B256 {
|
||||
keccak256(
|
||||
std::iter::empty()
|
||||
.chain(keccak256(&self.envelope_bytes))
|
||||
.chain(versioned_hash.0)
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the batch data, eventually encoded into an [`EnvelopeV7`].
|
||||
///
|
||||
/// | Field | # Bytes | Type | Index |
|
||||
/// |------------------------|---------|----------------|---------------|
|
||||
/// | prevL1MessageQueueHash | 32 | bytes32 | 0 |
|
||||
/// | postL1MessageQueueHash | 32 | bytes32 | 32 |
|
||||
/// | initialL2BlockNumber | 8 | u64 | 64 |
|
||||
/// | numBlocks | 2 | u16 | 72 |
|
||||
/// | blockCtxs[0] | 52 | BlockContextV2 | 74 |
|
||||
/// | ... blockCtxs[i] ... | 52 | BlockContextV2 | 74 + 52*i |
|
||||
/// | blockCtxs[n-1] | 52 | BlockContextV2 | 74 + 52*(n-1) |
|
||||
/// | l2TxsData | dynamic | bytes | 74 + 52*n |
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PayloadV7 {
|
||||
/// The version from da-codec, i.e. v7 in this case.
|
||||
///
|
||||
/// Note: This is not really a part of payload, simply coopied from the envelope for
|
||||
/// convenience.
|
||||
pub version: u8,
|
||||
/// Message queue hash at the end of the previous batch.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// Message queue hash at the end of the current batch.
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// The block number of the first block in the batch.
|
||||
pub initial_block_number: u64,
|
||||
/// The number of blocks in the batch.
|
||||
pub num_blocks: u16,
|
||||
/// The block contexts of each block in the batch.
|
||||
pub block_contexts: Vec<BlockContextV2>,
|
||||
/// The L2 tx data flattened over every tx in every block in the batch.
|
||||
pub tx_data: Vec<u8>,
|
||||
}
|
||||
|
||||
const INDEX_PREV_MSG_QUEUE_HASH: usize = 0;
|
||||
const INDEX_POST_MSG_QUEUE_HASH: usize = INDEX_PREV_MSG_QUEUE_HASH + 32;
|
||||
const INDEX_L2_BLOCK_NUM: usize = INDEX_POST_MSG_QUEUE_HASH + 32;
|
||||
const INDEX_NUM_BLOCKS: usize = INDEX_L2_BLOCK_NUM + 8;
|
||||
const INDEX_BLOCK_CTX: usize = INDEX_NUM_BLOCKS + 2;
|
||||
|
||||
impl From<&EnvelopeV7> for PayloadV7 {
|
||||
fn from(envelope: &EnvelopeV7) -> Self {
|
||||
// Conditionally decode depending on the flag set in the envelope.
|
||||
let payload_bytes = if envelope.is_encoded & 1 == 1 {
|
||||
vm_zstd::process(&envelope.unpadded_bytes)
|
||||
.expect("zstd decode should succeed")
|
||||
.decoded_data
|
||||
} else {
|
||||
envelope.unpadded_bytes.to_vec()
|
||||
};
|
||||
|
||||
// Sanity check on the payload size.
|
||||
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX);
|
||||
let num_blocks = u16::from_be_bytes(
|
||||
payload_bytes[INDEX_NUM_BLOCKS..INDEX_BLOCK_CTX]
|
||||
.try_into()
|
||||
.expect("should not fail"),
|
||||
);
|
||||
assert!(payload_bytes.len() >= INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX));
|
||||
|
||||
// Deserialize the other fields.
|
||||
let prev_msg_queue_hash =
|
||||
B256::from_slice(&payload_bytes[INDEX_PREV_MSG_QUEUE_HASH..INDEX_POST_MSG_QUEUE_HASH]);
|
||||
let post_msg_queue_hash =
|
||||
B256::from_slice(&payload_bytes[INDEX_POST_MSG_QUEUE_HASH..INDEX_L2_BLOCK_NUM]);
|
||||
let initial_block_number = u64::from_be_bytes(
|
||||
payload_bytes[INDEX_L2_BLOCK_NUM..INDEX_NUM_BLOCKS]
|
||||
.try_into()
|
||||
.expect("should not fail"),
|
||||
);
|
||||
|
||||
// Deserialize block contexts depending on the number of blocks in the batch.
|
||||
let mut block_contexts = Vec::with_capacity(num_blocks as usize);
|
||||
for i in 0..num_blocks {
|
||||
let start = (i as usize) * SIZE_BLOCK_CTX + INDEX_BLOCK_CTX;
|
||||
block_contexts.push(BlockContextV2::from(
|
||||
&payload_bytes[start..(start + SIZE_BLOCK_CTX)],
|
||||
));
|
||||
}
|
||||
|
||||
// All remaining bytes are flattened L2 txs.
|
||||
let tx_data =
|
||||
payload_bytes[INDEX_BLOCK_CTX + ((num_blocks as usize) * SIZE_BLOCK_CTX)..].to_vec();
|
||||
|
||||
Self {
|
||||
version: envelope.version,
|
||||
prev_msg_queue_hash,
|
||||
post_msg_queue_hash,
|
||||
initial_block_number,
|
||||
num_blocks,
|
||||
block_contexts,
|
||||
tx_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadV7 {
|
||||
/// Validate the payload contents.
|
||||
pub fn validate<'a>(
|
||||
&self,
|
||||
header: &BatchHeaderV7,
|
||||
chunk_infos: &'a [ChunkInfo],
|
||||
) -> (&'a ChunkInfo, &'a ChunkInfo) {
|
||||
// Get the first and last chunks' info, to construct the batch info.
|
||||
let (first_chunk, last_chunk) = (
|
||||
chunk_infos.first().expect("at least one chunk in batch"),
|
||||
chunk_infos.last().expect("at least one chunk in batch"),
|
||||
);
|
||||
|
||||
// version from payload is what's present in the on-chain batch header
|
||||
assert_eq!(self.version, header.version);
|
||||
|
||||
// number of blocks in the batch
|
||||
assert_eq!(
|
||||
usize::from(self.num_blocks),
|
||||
chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| &chunk_info.block_ctxs)
|
||||
.count()
|
||||
);
|
||||
assert_eq!(usize::from(self.num_blocks), self.block_contexts.len());
|
||||
|
||||
// the block number of the first block in the batch
|
||||
assert_eq!(self.initial_block_number, first_chunk.initial_block_number);
|
||||
|
||||
// prev message queue hash
|
||||
assert_eq!(self.prev_msg_queue_hash, first_chunk.prev_msg_queue_hash);
|
||||
|
||||
// post message queue hash
|
||||
assert_eq!(self.post_msg_queue_hash, last_chunk.post_msg_queue_hash);
|
||||
|
||||
// for each chunk, the tx_data_digest, i.e. keccak digest of the rlp-encoded L2 tx bytes
|
||||
// flattened over every tx in the chunk, should be re-computed and matched against the
|
||||
// public input of the chunk-circuit.
|
||||
//
|
||||
// first check that the total size of rlp-encoded tx data flattened over all txs in the
|
||||
// chunk is in fact the size available from the payload.
|
||||
assert_eq!(
|
||||
u64::try_from(self.tx_data.len()).expect("len(tx-data) is u64"),
|
||||
chunk_infos
|
||||
.iter()
|
||||
.map(|chunk_info| chunk_info.tx_data_length)
|
||||
.sum::<u64>(),
|
||||
);
|
||||
let mut index: usize = 0;
|
||||
for chunk_info in chunk_infos.iter() {
|
||||
let chunk_size = chunk_info.tx_data_length as usize;
|
||||
let chunk_tx_data_digest =
|
||||
keccak256(&self.tx_data.as_slice()[index..(index + chunk_size)]);
|
||||
assert_eq!(chunk_tx_data_digest, chunk_info.tx_data_digest);
|
||||
index += chunk_size;
|
||||
}
|
||||
|
||||
// for each block in the batch, check that the block context matches what's provided as
|
||||
// witness.
|
||||
for (block_ctx, witness_block_ctx) in self.block_contexts.iter().zip(
|
||||
chunk_infos
|
||||
.iter()
|
||||
.flat_map(|chunk_info| &chunk_info.block_ctxs),
|
||||
) {
|
||||
assert_eq!(block_ctx, witness_block_ctx);
|
||||
}
|
||||
|
||||
(first_chunk, last_chunk)
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
use crate::header::ReferenceHeader;
|
||||
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
|
||||
use types_base::public_inputs::{ForkName, chunk::ChunkInfo};
|
||||
|
||||
/// Simply rewrap byte48 to avoid unnecessary dep
|
||||
pub type Bytes48 = [u8; 48];
|
||||
|
||||
/// Witness required by applying point evaluation
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct PointEvalWitness {
|
||||
/// kzg commitment
|
||||
#[rkyv()]
|
||||
pub kzg_commitment: Bytes48,
|
||||
/// kzg proof
|
||||
#[rkyv()]
|
||||
pub kzg_proof: Bytes48,
|
||||
}
|
||||
|
||||
/// Witness to the batch circuit.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BatchWitness {
|
||||
/// Flattened root proofs from all chunks in the batch.
|
||||
#[rkyv()]
|
||||
pub chunk_proofs: Vec<AggregationInput>,
|
||||
/// Chunk infos.
|
||||
#[rkyv()]
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
/// Blob bytes.
|
||||
#[rkyv()]
|
||||
pub blob_bytes: Vec<u8>,
|
||||
/// Witness for point evaluation
|
||||
pub point_eval_witness: PointEvalWitness,
|
||||
/// Header for reference.
|
||||
#[rkyv()]
|
||||
pub reference_header: ReferenceHeader,
|
||||
/// The code version specify the chain spec
|
||||
#[rkyv()]
|
||||
pub fork_name: ForkName,
|
||||
}
|
||||
|
||||
impl ProofCarryingWitness for ArchivedBatchWitness {
|
||||
fn get_proofs(&self) -> Vec<AggregationInput> {
|
||||
self.chunk_proofs
|
||||
.iter()
|
||||
.map(|archived| AggregationInput {
|
||||
public_values: archived
|
||||
.public_values
|
||||
.iter()
|
||||
.map(|u32_le| u32_le.to_native())
|
||||
.collect(),
|
||||
commitment: ProgramCommitment::from(&archived.commitment),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-bundle"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
vm-zstd = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
types-agg = { path = "../aggregation", package = "scroll-zkvm-circuit-input-types-aggregation"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -1,2 +0,0 @@
|
||||
mod witness;
|
||||
pub use witness::{ArchivedBundleWitness, BundleWitness};
|
||||
@@ -1,30 +0,0 @@
|
||||
use types_agg::{AggregationInput, ProgramCommitment, ProofCarryingWitness};
|
||||
use types_base::public_inputs::batch::BatchInfo;
|
||||
|
||||
/// The witness for the bundle circuit.
|
||||
#[derive(Clone, Debug, rkyv::Archive, rkyv::Deserialize, rkyv::Serialize)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct BundleWitness {
|
||||
/// Batch proofs being aggregated in the bundle.
|
||||
#[rkyv()]
|
||||
pub batch_proofs: Vec<AggregationInput>,
|
||||
/// Public-input values for the corresponding batch proofs.
|
||||
#[rkyv()]
|
||||
pub batch_infos: Vec<BatchInfo>,
|
||||
}
|
||||
|
||||
impl ProofCarryingWitness for ArchivedBundleWitness {
|
||||
fn get_proofs(&self) -> Vec<AggregationInput> {
|
||||
self.batch_proofs
|
||||
.iter()
|
||||
.map(|archived| AggregationInput {
|
||||
public_values: archived
|
||||
.public_values
|
||||
.iter()
|
||||
.map(|u32_le| u32_le.to_native())
|
||||
.collect(),
|
||||
commitment: ProgramCommitment::from(&archived.commitment),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "scroll-zkvm-circuit-input-types-chunk"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
repository.workspace = true
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
alloy-primitives = { workspace = true, default-features = false, features = ["std", "map-hashbrown", "map-fxhash", "rkyv"] }
|
||||
rkyv.workspace = true
|
||||
sbv-trie = { workspace = true }
|
||||
sbv-core = { workspace = true }
|
||||
sbv-primitives = { workspace = true }
|
||||
sbv-kv = { workspace = true }
|
||||
serde.workspace = true
|
||||
itertools.workspace = true
|
||||
|
||||
openvm = { workspace = true, features = ["std"] }
|
||||
openvm-rv32im-guest = { workspace = true }
|
||||
openvm-custom-insn = { workspace = true }
|
||||
|
||||
types-base = { path = "../base", package = "scroll-zkvm-circuit-input-types-base"}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
openvm = ["sbv-trie/openvm", "sbv-core/openvm", "sbv-primitives/openvm"]
|
||||
@@ -1,167 +0,0 @@
|
||||
use sbv_core::{EvmDatabase, EvmExecutor};
|
||||
use sbv_primitives::{
|
||||
BlockWitness,
|
||||
chainspec::{
|
||||
BaseFeeParams, BaseFeeParamsKind, Chain, MAINNET,
|
||||
reth_chainspec::ChainSpec,
|
||||
scroll::{ScrollChainConfig, ScrollChainSpec},
|
||||
},
|
||||
ext::{BlockWitnessChunkExt, TxBytesHashExt},
|
||||
hardforks::SCROLL_DEV_HARDFORKS,
|
||||
types::{
|
||||
consensus::BlockHeader,
|
||||
reth::{Block, BlockWitnessRethExt, RecoveredBlock},
|
||||
scroll::ChunkInfoBuilder,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{ArchivedChunkWitness, make_providers, manually_drop_on_zkvm};
|
||||
use types_base::public_inputs::{
|
||||
ForkName,
|
||||
chunk::{BlockContextV2, ChunkInfo},
|
||||
};
|
||||
|
||||
fn block_ctxv2_from_block(value: &RecoveredBlock<Block>) -> BlockContextV2 {
|
||||
use alloy_primitives::U256;
|
||||
BlockContextV2 {
|
||||
timestamp: value.timestamp,
|
||||
gas_limit: value.gas_limit,
|
||||
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
|
||||
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
|
||||
num_l1_msgs: u16::try_from(
|
||||
value
|
||||
.body()
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| tx.is_l1_message())
|
||||
.count(),
|
||||
)
|
||||
.expect("num l1 msgs u16"),
|
||||
}
|
||||
}
|
||||
|
||||
type Witness = ArchivedChunkWitness;
|
||||
|
||||
pub fn execute(witness: &Witness) -> Result<ChunkInfo, String> {
|
||||
if witness.blocks.is_empty() {
|
||||
return Err("At least one witness must be provided in chunk mode".into());
|
||||
}
|
||||
if !witness.blocks.has_same_chain_id() {
|
||||
return Err("All witnesses must have the same chain id in chunk mode".into());
|
||||
}
|
||||
if !witness.blocks.has_seq_block_number() {
|
||||
return Err("All witnesses must have sequential block numbers in chunk mode".into());
|
||||
}
|
||||
// Get the blocks to build the basic chunk-info.
|
||||
let blocks = manually_drop_on_zkvm!(
|
||||
witness
|
||||
.blocks
|
||||
.iter()
|
||||
.map(|w| w.build_reth_block())
|
||||
.collect::<Result<Vec<RecoveredBlock<Block>>, _>>()
|
||||
.map_err(|e| e.to_string())?
|
||||
);
|
||||
let pre_state_root = witness.blocks[0].pre_state_root;
|
||||
|
||||
let fork_name = ForkName::from(&witness.fork_name);
|
||||
let chain = Chain::from_id(witness.blocks[0].chain_id());
|
||||
|
||||
// SCROLL_DEV_HARDFORKS will enable all forks
|
||||
let mut hardforks = (*SCROLL_DEV_HARDFORKS).clone();
|
||||
if fork_name == ForkName::EuclidV1 {
|
||||
// disable EuclidV2 fork for legacy chunk
|
||||
use sbv_primitives::{chainspec::ForkCondition, hardforks::ScrollHardfork};
|
||||
hardforks.insert(ScrollHardfork::EuclidV2, ForkCondition::Never);
|
||||
}
|
||||
|
||||
let inner = ChainSpec {
|
||||
chain,
|
||||
genesis_hash: Default::default(),
|
||||
genesis: Default::default(),
|
||||
genesis_header: Default::default(),
|
||||
paris_block_and_final_difficulty: Default::default(),
|
||||
hardforks,
|
||||
deposit_contract: Default::default(),
|
||||
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
|
||||
prune_delete_limit: 20000,
|
||||
blob_params: Default::default(),
|
||||
};
|
||||
let config = ScrollChainConfig::mainnet();
|
||||
let chain_spec: ScrollChainSpec = ScrollChainSpec { inner, config };
|
||||
|
||||
let (code_db, nodes_provider, block_hashes) = make_providers(&witness.blocks);
|
||||
let nodes_provider = manually_drop_on_zkvm!(nodes_provider);
|
||||
|
||||
let prev_state_root = witness.blocks[0].pre_state_root();
|
||||
let mut db = manually_drop_on_zkvm!(
|
||||
EvmDatabase::new_from_root(code_db, prev_state_root, &nodes_provider, block_hashes)
|
||||
.map_err(|e| format!("failed to create EvmDatabase: {}", e))?
|
||||
);
|
||||
for block in blocks.iter() {
|
||||
let output = manually_drop_on_zkvm!(
|
||||
EvmExecutor::new(std::sync::Arc::new(chain_spec.clone()), &db, block)
|
||||
.execute()
|
||||
.map_err(|e| format!("failed to execute block: {}", e))?
|
||||
);
|
||||
db.update(&nodes_provider, output.state.state.iter())
|
||||
.map_err(|e| format!("failed to update db: {}", e))?;
|
||||
}
|
||||
|
||||
let post_state_root = db.commit_changes();
|
||||
|
||||
let withdraw_root = db
|
||||
.withdraw_root()
|
||||
.map_err(|e| format!("failed to get withdraw root: {}", e))?;
|
||||
|
||||
let mut rlp_buffer = manually_drop_on_zkvm!(Vec::with_capacity(2048));
|
||||
let (tx_data_length, tx_data_digest) = blocks
|
||||
.iter()
|
||||
.flat_map(|b| b.body().transactions.iter())
|
||||
.tx_bytes_hash_in(rlp_buffer.as_mut());
|
||||
let _ = tx_data_length;
|
||||
|
||||
let sbv_chunk_info = {
|
||||
#[allow(unused_mut)]
|
||||
let mut builder = ChunkInfoBuilder::new(&chain_spec, pre_state_root.into(), &blocks);
|
||||
if fork_name == ForkName::EuclidV2 {
|
||||
builder.set_prev_msg_queue_hash(witness.prev_msg_queue_hash.into());
|
||||
}
|
||||
builder.build(withdraw_root)
|
||||
};
|
||||
if post_state_root != sbv_chunk_info.post_state_root() {
|
||||
return Err(format!(
|
||||
"state root mismatch: expected={}, found={}",
|
||||
sbv_chunk_info.post_state_root(),
|
||||
post_state_root
|
||||
));
|
||||
}
|
||||
|
||||
let chunk_info = ChunkInfo {
|
||||
chain_id: sbv_chunk_info.chain_id(),
|
||||
prev_state_root: sbv_chunk_info.prev_state_root(),
|
||||
post_state_root: sbv_chunk_info.post_state_root(),
|
||||
data_hash: sbv_chunk_info
|
||||
.clone()
|
||||
.into_legacy()
|
||||
.map(|x| x.data_hash)
|
||||
.unwrap_or_default(),
|
||||
withdraw_root,
|
||||
tx_data_digest,
|
||||
tx_data_length: u64::try_from(tx_data_length).expect("tx_data_length: u64"),
|
||||
initial_block_number: blocks[0].header().number,
|
||||
prev_msg_queue_hash: witness.prev_msg_queue_hash.into(),
|
||||
post_msg_queue_hash: sbv_chunk_info
|
||||
.into_euclid_v2()
|
||||
.map(|x| x.post_msg_queue_hash)
|
||||
.unwrap_or_default(),
|
||||
block_ctxs: blocks.iter().map(block_ctxv2_from_block).collect(),
|
||||
};
|
||||
|
||||
openvm::io::println(format!("withdraw_root = {:?}", withdraw_root));
|
||||
openvm::io::println(format!("tx_bytes_hash = {:?}", tx_data_digest));
|
||||
|
||||
// We should never touch that lazy lock... Or else we introduce 40M useless cycles.
|
||||
assert!(std::sync::LazyLock::get(&MAINNET).is_none());
|
||||
|
||||
Ok(chunk_info)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
#![feature(lazy_get)]
|
||||
|
||||
mod utils;
|
||||
|
||||
mod witness;
|
||||
|
||||
pub use utils::make_providers;
|
||||
pub use witness::{ArchivedChunkWitness, ChunkWitness};
|
||||
|
||||
mod execute;
|
||||
pub use execute::execute;
|
||||
@@ -1,27 +0,0 @@
|
||||
use alloy_primitives::{B256, U256};
|
||||
use sbv_primitives::types::{
|
||||
consensus::BlockHeader,
|
||||
reth::{Block, RecoveredBlock},
|
||||
};
|
||||
|
||||
use types_base::public_inputs::chunk::BlockContextV2;
|
||||
|
||||
impl From<&RecoveredBlock<Block>> for BlockContextV2 {
|
||||
fn from(value: &RecoveredBlock<Block>) -> Self {
|
||||
Self {
|
||||
timestamp: value.timestamp,
|
||||
gas_limit: value.gas_limit,
|
||||
base_fee: U256::from(value.base_fee_per_gas().expect("base_fee_expected")),
|
||||
num_txs: u16::try_from(value.body().transactions.len()).expect("num txs u16"),
|
||||
num_l1_msgs: u16::try_from(
|
||||
value
|
||||
.body()
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| tx.is_l1_message())
|
||||
.count(),
|
||||
)
|
||||
.expect("num l1 msgs u16"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
use sbv_kv::nohash::NoHashMap;
|
||||
use sbv_primitives::{B256, BlockWitness, Bytes, ext::BlockWitnessExt};
|
||||
use sbv_trie::{BlockWitnessTrieExt, TrieNode};
|
||||
|
||||
type CodeDb = NoHashMap<B256, Bytes>;
|
||||
|
||||
type NodesProvider = NoHashMap<B256, TrieNode>;
|
||||
|
||||
type BlockHashProvider = sbv_kv::null::NullProvider;
|
||||
|
||||
pub fn make_providers<W: BlockWitness>(
|
||||
witnesses: &[W],
|
||||
) -> (CodeDb, NodesProvider, BlockHashProvider) {
|
||||
let code_db = {
|
||||
// build code db
|
||||
let num_codes = witnesses.iter().map(|w| w.codes_iter().len()).sum();
|
||||
let mut code_db =
|
||||
NoHashMap::<B256, Bytes>::with_capacity_and_hasher(num_codes, Default::default());
|
||||
witnesses.import_codes(&mut code_db);
|
||||
code_db
|
||||
};
|
||||
let nodes_provider = {
|
||||
let num_states = witnesses.iter().map(|w| w.states_iter().len()).sum();
|
||||
let mut nodes_provider =
|
||||
NoHashMap::<B256, TrieNode>::with_capacity_and_hasher(num_states, Default::default());
|
||||
witnesses.import_nodes(&mut nodes_provider).unwrap();
|
||||
nodes_provider
|
||||
};
|
||||
let block_hashes = sbv_kv::null::NullProvider;
|
||||
|
||||
(code_db, nodes_provider, block_hashes)
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
macro_rules! manually_drop_on_zkvm {
|
||||
($e:expr) => {
|
||||
std::mem::ManuallyDrop::new($e)
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))]
|
||||
macro_rules! manually_drop_on_zkvm {
|
||||
($e:expr) => {
|
||||
$e
|
||||
};
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
use alloy_primitives::B256;
|
||||
use sbv_primitives::types::BlockWitness;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use types_base::public_inputs::ForkName;
|
||||
|
||||
/// The witness type accepted by the chunk-circuit.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
serde::Deserialize,
|
||||
serde::Serialize,
|
||||
rkyv::Archive,
|
||||
rkyv::Deserialize,
|
||||
rkyv::Serialize,
|
||||
)]
|
||||
#[rkyv(derive(Debug))]
|
||||
pub struct ChunkWitness {
|
||||
/// The block witness for each block in the chunk.
|
||||
pub blocks: Vec<BlockWitness>,
|
||||
/// The on-chain rolling L1 message queue hash before enqueueing any L1 msg tx from the chunk.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The code version specify the chain spec
|
||||
pub fork_name: ForkName,
|
||||
}
|
||||
|
||||
impl ChunkWitness {
|
||||
pub fn new(blocks: &[BlockWitness], prev_msg_queue_hash: B256, fork_name: ForkName) -> Self {
|
||||
let num_codes = blocks.iter().map(|w| w.codes.len()).sum();
|
||||
let num_states = blocks.iter().map(|w| w.states.len()).sum();
|
||||
let mut codes = HashSet::with_capacity(num_codes);
|
||||
let mut states = HashSet::with_capacity(num_states);
|
||||
|
||||
let blocks = blocks
|
||||
.iter()
|
||||
.map(|block| BlockWitness {
|
||||
chain_id: block.chain_id,
|
||||
header: block.header.clone(),
|
||||
pre_state_root: block.pre_state_root,
|
||||
transaction: block.transaction.clone(),
|
||||
withdrawals: block.withdrawals.clone(),
|
||||
states: block
|
||||
.states
|
||||
.iter()
|
||||
.filter(|s| states.insert(*s))
|
||||
.cloned()
|
||||
.collect(),
|
||||
codes: block
|
||||
.codes
|
||||
.iter()
|
||||
.filter(|c| codes.insert(*c))
|
||||
.cloned()
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
blocks,
|
||||
prev_msg_queue_hash,
|
||||
fork_name,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_v1(blocks: &[BlockWitness]) -> Self {
|
||||
Self::new(blocks, Default::default(), ForkName::EuclidV1)
|
||||
}
|
||||
|
||||
pub fn new_v2(blocks: &[BlockWitness], prev_msg_queue_hash: B256) -> Self {
|
||||
Self::new(blocks, prev_msg_queue_hash, ForkName::EuclidV2)
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// re-export for a compatible interface with old circuit/types for prover
|
||||
|
||||
pub mod bundle {
|
||||
pub use types_base::public_inputs::bundle::{BundleInfo, BundleInfoV1, BundleInfoV2};
|
||||
pub use types_bundle::*;
|
||||
}
|
||||
|
||||
pub mod batch {
|
||||
pub use types_base::public_inputs::batch::{ArchivedBatchInfo, BatchInfo, VersionedBatchInfo};
|
||||
pub use types_batch::*;
|
||||
}
|
||||
|
||||
pub mod chunk {
|
||||
pub use types_base::public_inputs::chunk::{
|
||||
ArchivedChunkInfo, BlockContextV2, ChunkInfo, SIZE_BLOCK_CTX, VersionedChunkInfo,
|
||||
};
|
||||
pub use types_chunk::*;
|
||||
}
|
||||
|
||||
pub use types_agg;
|
||||
pub use types_base::{public_inputs, utils};
|
||||
@@ -276,8 +276,8 @@ const (
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
|
||||
SenderTypeL2GasOracleDeprecated
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracleDeprecated:
|
||||
return "SenderTypeL2GasOracleDeprecated"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
|
||||
@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
SenderTypeL2GasOracleDeprecated,
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
{"metadata":{"bundle_info":{"chain_id":333333,"msg_queue_hash":"0x0101010101010101010101010101010101010101010101010101010101010101","num_batches":2,"prev_state_root":"0x5302a56cbbec7d14d48d592b805d4ec3c7011439dfaa90d44deee02a9326d203","prev_batch_hash":"0xabacadaeaf000000000000000000000000000000000000000000000000000000","post_state_root":"0xaf6696afb2e11052490051f0f9f6444be6e9f5bb82beb3c3dae846cfa59ed6e0","batch_hash":"0xf0ee5d6b9cd739eb1ff816a58486af8b08d42a8c50d6e5998e7a3947c7aae2a9","withdraw_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"bundle_pi_hash":"0x2028510c403837c6ed77660fd92814ba61d7b746e7268cc8dfc14d163d45e6bd"},"proof":{"proof":"CfpNiL6UpegsK3VcoAj9ey5daMbZDFiF1XpCKvrOeN0MPPLNCDrllJL/gN0E3qmq20kGLYpBQ8aZ3sgUrxpSyA+9GKK8NhZoIM75adOnV8AYCLXpmxfS81MxIai/+ghxDIUvJQJVgWKJPsMQp4lO/Qltc4eCNWeoR2jHua/VzSASQXDDQ5ozD6i448TVkjKiyEcHwFFMMuOebFUzDc85hA4AJGM1T9bPl5VVQkEpijdNF+1lzUfi27U0XRQbYz8aE8hiCLxR8Z2bHg65dvfa+TsaDI8oAlz33Q1yIadZBtceKsH53P5u6vwWp0dQvw8DGNv8G5zvsayHPNCvy4xz8hRT3E4G0Ome8voqqOxrc/A8u2fE6LoXKswvU6Uquv+LHwGMbTugRvQ0BBXlLQ29Hvj18rDzS6ll0OnEcRiaaEkGOZy7Kq1PGiF7ZxMZsJYCbhyPgg4TKpesYDUJygEN0iGNX90dmyzGLTTgJATMYBGD2U+XP/T+UOMbxFTl3TFNHWlCPhEAu5LBwZ0pD3XV1xNW1iUqwTSfg7Qz1SOUYkot10Q8EAKeXk3hluHK+nSQhOMfWC4tnvfQdMqepfymwwArzA/9GMA/Two7yuzgCz7vHb+56YKPZiDrh4cqSvpVI92hCF8GWHaTqWDR0fikx2Y7GLX8YBM3Rx8reQE+LYYGEJHJzD4cIc0MKiuet605ZPSAaKpb8JM2EgrCAfw+QAhBiwXQ3HOQkrt17tzqNJH7IeHF761v43D9w+IeqvetKEgYXEH3fHmN00dLV2Uws8C4956qze+SG81ScnZzbrIeiO9lnmUXSFzrL40K+3NqCZcFnfLhVidyEJepzJi50yOK5BUJdMFdNtvHtprICqLKyb7aRg39qoZ7RqyJTg5nAjQQBGelvRu/AN6zdyxja73Jo5gEovdIiMybi/IhfMwKGWgiRaOGxyHx9KZ/ZA/w7r3rce6vuDsUhk5hsgVj4wUW3BqoZ8iRIH8X6AjK1xli+S/HfgAkfmUVwNNBOcgYEcrqEbswsfYKOcoFn71DISLK0jmB44LTNyGxoWBMpIAOf/gGhQSNk0ojd4n4UXxShsqmJ57Kudw/mGimMm+Crhr5asxeiFH0eJNBgUEXDuveqE1d20UTRJ1UJ/hZGomsDLebTojSTtsMLWTtx/4Mqg+g3Odte1WKN6CgxF4kGRcW2tE3D1jiBys5FTHMAhmka3mUBwlciT7syDWBDlYVuSmwppCghdBMQfQL4s3Uh0vRG28LkU+UXcwYXwh3UK6cA1bBnKfAa9k7P5BuMxVh8p6he6EZr0kGNjKGPSxuVxgczO/C32GP+HVVsWlIMNmgB4GeMHIN3yJampOrLZIMlQuP9d9kOicvRia1ge5sFtT+Vmthnp1F7sR3P+ADB/WxKSxVbiLaVBo+zm/rZbyM9vU0CVLD69lzPC6xKcFkxewlWJU6o7rOz1qzh47fT+8qUcVYfpCSEtT/U8eX2JFnXCb0PPXWivofI28tnsuS8GjwUiOyzCoxxuIEOyz1HNRXBcO2dSKR2qM41zUs0btA2JkA3hTVW8YWn8czHxrZyooooaumzbUPQBOqO3fewnLLyQ9etBcjZJ8Xm/B1EBk9cRPWDjgx5Hq8C0soA+EsoNoaSQJu67HuFTRd/OWvKSliCoj1XVcqBobnJWmTU7kAgi73pMaq/G4ot2rRFSL9MbkJgHCyxBkrl9nkCVUJC5GphsrDS5P5/bmRS3iTNdxiXAzdwOIQqJpEO54oN+3CHZuZuUOgCcWTI3uxWq/gBDJrBTsv8EUqtNQJve0qwIh2PUuJl5DIqF0CvswN649gywc=","instances":"AAAAAAAAAAAAAAAAAAAAAAAAAAAApvhdIlw19IwSvukAAAAAAAAAAAAAAAAAAAAAAAAAAAAl72fyrHk3TaguHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAALh9HvEG69AvDlAAAAAAAAAAAAAAAAAAAAAAAAAAAAkGY9R6S+t36FIrAAAAAAAAAAAAAAAAAAAAAAAAAAAACoNqt7QwZoXUpj/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdaREhypq22OmnAAAAAAAAAAAAAAAAAAAAAAAAAAAAOXf2Vj0jGD1q4xQAAAAAAAAAAAAAAAAAAAAAAAAAAADZYAdKTg7m4hBHGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAll4nXKE0us1IMAAAAAAAAAAAAAAAAAAAAAAAAAAAAFfnJ8YXlwczTsyEAAAAAAAAAAAAAAAAAAAAAAAAAAAArXqULkWYvNST9PQAAAAAAAAAAAAAAAAAAAAAAAAAAAAArqteSdJMySnbMAC5TUWus+SXtvRWUNmCSMiMb4aZvb4hpJ5yXqjtih6gAIn9WQUOx/Z/rbbdComU0hCSwKwrewQgB3KolXKensAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC3AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADfAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL0="},"vk":"AhYAAAAABAAAAD2PumVP6pqldS0PKWW8Q4IvnE/rvtm5/2fXvG196sYhKtVFtg+WFGYJrU+eMUKZVjPurMpM8kbYiXvE18bnsU4Nu8s47Xabxy0EViND1dzsu5HicdAWl0xG5C+VpO2faJdK4nGwtD4WHtbdqWY72nSY5aKSDxAYO85vLy+9cJZlQsMNQlhTi/2q9PYQpC4D3Uf8E+yZ7gvLhd6cFdErlg4Oq/nthQkfxPAarVYLUFNGW80SgIloMDhutrky34D+Csw8T9j5UXpHz3K/2yuVSXK6OvMG4/058TXG09qKgXYP","git_version":"9f48bc4"}
|
||||
File diff suppressed because one or more lines are too long
@@ -14,6 +14,7 @@ const (
|
||||
EuclidFork = "euclid"
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidForkNameForProver = "euclidv1"
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
@@ -98,22 +99,22 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []ChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []BatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
@@ -142,6 +143,157 @@ type BlockContextV2 struct {
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
}
|
||||
|
||||
// ChunkProof
|
||||
type ChunkProof interface {
|
||||
Proof() []byte
|
||||
}
|
||||
|
||||
// NewChunkProof creates a new ChunkProof instance.
|
||||
func NewChunkProof(hardForkName string) ChunkProof {
|
||||
switch hardForkName {
|
||||
case EuclidFork, EuclidV2Fork:
|
||||
return &OpenVMChunkProof{}
|
||||
default:
|
||||
return &Halo2ChunkProof{}
|
||||
}
|
||||
}
|
||||
|
||||
// Halo2ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type Halo2ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
RawProof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
}
|
||||
|
||||
// Proof returns the proof bytes of a ChunkProof
|
||||
func (ap *Halo2ChunkProof) Proof() []byte {
|
||||
return ap.RawProof
|
||||
}
|
||||
|
||||
// BatchProof
|
||||
type BatchProof interface {
|
||||
SanityCheck() error
|
||||
Proof() []byte
|
||||
}
|
||||
|
||||
// NewBatchProof creates a new BatchProof instance.
|
||||
func NewBatchProof(hardForkName string) BatchProof {
|
||||
switch hardForkName {
|
||||
case EuclidFork, EuclidV2Fork:
|
||||
return &OpenVMBatchProof{}
|
||||
default:
|
||||
return &Halo2BatchProof{}
|
||||
}
|
||||
}
|
||||
|
||||
// Halo2BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type Halo2BatchProof struct {
|
||||
Protocol []byte `json:"protocol"`
|
||||
RawProof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// Proof returns the proof bytes of a BatchProof
|
||||
func (ap *Halo2BatchProof) Proof() []byte {
|
||||
return ap.RawProof
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *Halo2BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.RawProof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.RawProof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BundleProof
|
||||
type BundleProof interface {
|
||||
SanityCheck() error
|
||||
Proof() []byte
|
||||
}
|
||||
|
||||
// NewBundleProof creates a new BundleProof instance.
|
||||
func NewBundleProof(hardForkName string) BundleProof {
|
||||
switch hardForkName {
|
||||
case EuclidFork, EuclidV2Fork:
|
||||
return &OpenVMBundleProof{}
|
||||
default:
|
||||
return &Halo2BundleProof{}
|
||||
}
|
||||
}
|
||||
|
||||
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type Halo2BundleProof struct {
|
||||
RawProof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// Proof returns the proof bytes of a BundleProof
|
||||
func (ap *Halo2BundleProof) Proof() []byte {
|
||||
return ap.RawProof
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BundleProof is in a legal format
|
||||
func (ap *Halo2BundleProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.RawProof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.RawProof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
|
||||
@@ -20,12 +20,9 @@ var (
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
&MinCodecVersionFlag,
|
||||
}
|
||||
// ProposerToolFlags contains flags only used in proposer tool
|
||||
ProposerToolFlags = []cli.Flag{
|
||||
&StartL2BlockFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
@@ -76,6 +73,12 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
// ServicePortFlag is the port the service will listen on
|
||||
ServicePortFlag = cli.IntFlag{
|
||||
Name: "service.port",
|
||||
@@ -94,10 +97,4 @@ var (
|
||||
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
|
||||
Required: true,
|
||||
}
|
||||
// StartL2BlockFlag indicates the start L2 block number for proposer tool
|
||||
StartL2BlockFlag = cli.Uint64Flag{
|
||||
Name: "start-l2-block",
|
||||
Usage: "Start L2 block number for proposer tool",
|
||||
Value: 0,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.10"
|
||||
var tag = "v4.5.0"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -90,10 +90,18 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwinV2",
|
||||
MinProverVersion: "v4.3.0",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
@@ -62,14 +62,14 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
var batchProofs []message.BatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.OpenVMBatchProof
|
||||
proof := message.NewBatchProof("darwinV2")
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
log.Error("failed to unmarshal batch proof")
|
||||
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
|
||||
}
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
batchProofs = append(batchProofs, proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
|
||||
@@ -7,9 +7,17 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"mock_mode": true,
|
||||
"low_version_circuit": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"fork_name": "darwin",
|
||||
"min_prover_version": "v4.4.43"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets",
|
||||
"fork_name": "darwinV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ require (
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/da-codec => github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c
|
||||
|
||||
require (
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
||||
@@ -177,8 +177,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c h1:MCbuwFynRgxQeoyXwt/wUAPo3vfb61rMWxqADE2he4A=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
|
||||
@@ -51,6 +51,7 @@ type Config struct {
|
||||
|
||||
// CircuitConfig circuit items.
|
||||
type CircuitConfig struct {
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ForkName string `json:"fork_name"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
@@ -58,6 +59,8 @@ type CircuitConfig struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
MockMode bool `json:"mock_mode"`
|
||||
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
|
||||
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
|
||||
@@ -15,18 +15,15 @@ func TestConfig(t *testing.T) {
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"agg_vk_path": ""
|
||||
},
|
||||
"max_verifier_workers": 4
|
||||
"max_verifier_workers": 4,
|
||||
"min_prover_version": "v1.0.0"
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
|
||||
@@ -26,7 +26,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap, "openVmVerifier", vf.OpenVMVkMap)
|
||||
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
|
||||
@@ -22,6 +22,9 @@ import (
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
chunkVks map[string]struct{}
|
||||
batchVKs map[string]struct{}
|
||||
bundleVks map[string]struct{}
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
@@ -31,14 +34,28 @@ type LoginLogic struct {
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
|
||||
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
|
||||
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
|
||||
panic("verifier config file error")
|
||||
}
|
||||
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidFork && cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidV2Fork {
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
|
||||
}
|
||||
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
chunkVks: vf.ChunkVKMap,
|
||||
batchVKs: vf.BatchVKMap,
|
||||
bundleVks: vf.BundleVkMap,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
@@ -58,25 +75,46 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
vks := make(map[string]struct{})
|
||||
for vk := range l.openVmVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
if len(login.Message.ProverTypes) > 0 {
|
||||
vks := make(map[string]struct{})
|
||||
for _, proverType := range login.Message.ProverTypes {
|
||||
switch proverType {
|
||||
case types.ProverTypeChunk:
|
||||
for vk := range l.chunkVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeBatch:
|
||||
for vk := range l.batchVKs {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
for vk := range l.bundleVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeOpenVM:
|
||||
for vk := range l.openVmVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
default:
|
||||
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
|
||||
}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -197,27 +197,34 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
|
||||
}
|
||||
|
||||
var chunkProofs []*message.OpenVMChunkProof
|
||||
var chunkProofs []message.ChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
var proof message.OpenVMChunkProof
|
||||
proof := message.NewChunkProof(hardForkName)
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
chunkProofs = append(chunkProofs, proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
|
||||
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
|
||||
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if halo2Proof, ok := proof.(*message.Halo2ChunkProof); ok {
|
||||
if halo2Proof.ChunkInfo != nil {
|
||||
chunkInfo.TxBytes = halo2Proof.ChunkInfo.TxBytes
|
||||
}
|
||||
}
|
||||
if openvmProof, ok := proof.(*message.OpenVMChunkProof); ok {
|
||||
chunkInfo.InitialBlockNumber = openvmProof.MetaData.ChunkInfo.InitialBlockNumber
|
||||
chunkInfo.BlockCtxs = openvmProof.MetaData.ChunkInfo.BlockCtxs
|
||||
chunkInfo.TxDataLength = openvmProof.MetaData.ChunkInfo.TxDataLength
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
@@ -251,7 +258,7 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
@@ -259,9 +266,8 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
} else if hardForkName == message.EuclidFork {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
|
||||
@@ -200,13 +200,13 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
var batchProofs []message.BatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.OpenVMBatchProof
|
||||
proof := message.NewBatchProof(hardForkName)
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
|
||||
}
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
batchProofs = append(batchProofs, proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
@@ -215,9 +215,8 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
} else if hardForkName == message.EuclidFork {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
}
|
||||
|
||||
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
|
||||
@@ -228,7 +227,10 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
NumBatches: uint32(len(batches)),
|
||||
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
|
||||
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
|
||||
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.BundleInfo.MsgQueueHash = common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash)
|
||||
}
|
||||
|
||||
batchProofsBytes, err := json.Marshal(taskDetail)
|
||||
|
||||
@@ -195,9 +195,8 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
} else if hardForkName == message.EuclidFork {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -171,19 +171,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
chunkProof := &message.OpenVMChunkProof{}
|
||||
chunkProof := message.NewChunkProof(hardForkName)
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
|
||||
case message.ProofTypeBatch:
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
batchProof := message.NewBatchProof(hardForkName)
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
|
||||
case message.ProofTypeBundle:
|
||||
bundleProof := &message.OpenVMBundleProof{}
|
||||
bundleProof := message.NewBundleProof(hardForkName)
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
|
||||
@@ -10,26 +10,31 @@ import (
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
|
||||
batchVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
chunkVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBundleProof return a mock verification result for a BundleProof.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -7,8 +7,11 @@ import (
|
||||
// InvalidTestProof invalid proof used in tests
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a verifier.
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]struct{}
|
||||
BatchVKMap map[string]struct{}
|
||||
BundleVkMap map[string]struct{}
|
||||
OpenVMVkMap map[string]struct{}
|
||||
}
|
||||
|
||||
@@ -30,12 +30,14 @@ import (
|
||||
// in `*config.CircuitConfig` being changed
|
||||
type rustCircuitConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
}
|
||||
|
||||
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
return &rustCircuitConfig{
|
||||
ForkName: cfg.ForkName,
|
||||
ParamsPath: cfg.ParamsPath,
|
||||
AssetsPath: cfg.AssetsPath,
|
||||
}
|
||||
}
|
||||
@@ -44,11 +46,13 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.VerifierConfig` being changed
|
||||
type rustVerifierConfig struct {
|
||||
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
|
||||
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
|
||||
return &rustVerifierConfig{
|
||||
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
|
||||
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
|
||||
}
|
||||
}
|
||||
@@ -61,6 +65,19 @@ type rustVkDump struct {
|
||||
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
chunkVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
batchVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
bundleVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
openVMVkMap := map[string]struct{}{"mock_vk": {}}
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: chunkVKMap,
|
||||
BatchVKMap: batchVKMap,
|
||||
BundleVkMap: bundleVKMap,
|
||||
OpenVMVkMap: openVMVkMap,
|
||||
}, nil
|
||||
}
|
||||
verifierConfig := newRustVerifierConfig(cfg)
|
||||
configBytes, err := json.Marshal(verifierConfig)
|
||||
if err != nil {
|
||||
@@ -76,9 +93,16 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]struct{}),
|
||||
BatchVKMap: make(map[string]struct{}),
|
||||
BundleVkMap: make(map[string]struct{}),
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
if err := v.loadLowVersionVKs(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -87,11 +111,21 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v.loadDarwinVKs()
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -109,8 +143,16 @@ func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName st
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -129,7 +171,15 @@ func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName st
|
||||
}
|
||||
|
||||
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -159,6 +209,32 @@ func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
// load low version vks, current is darwin
|
||||
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
|
||||
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.BundleVkMap[bundleVK] = struct{}{}
|
||||
v.BatchVKMap[batchVK] = struct{}{}
|
||||
v.ChunkVKMap[chunkVK] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Verifier) loadDarwinVKs() {
|
||||
v.BundleVkMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD5dsp1rEy7PSqiIFikkkOPqKokLW2mZSwCbtKdkfLQcvTxARUwHSe4iZe27PRJ5WWaLqtRV1+x6+pSVKtcPtaV4kE7v2YJRf0582hxiAF0IBaOoREdpyNfA2a9cvhWb2TMaPrUYP9EDQ7CUiW1FQzxbjGc95ua2htscnpU7d9S5stHWzKb7okkCG7bTIL9aG6qTQo2YXW7n3H3Ir47oVJB7IKrUzKGvI5Wmanh2zpZOJ9Qm4/wY24cT7cJz+Ux6wAg=="] = struct{}{}
|
||||
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD1DEjW4Kell67H07wazT5DdzrSh4+amh+cmosQHp9p9snFypyoBGt3UHtoJGQBZlywZWDS9ht5pnaEoGBdaKcQk+lFb+WxTiId0KOAa0mafTZTQw8yToy57Jple64qzlRu1dux30tZZGuerLN1CKzg5Xl2iOpMK+l87jCINwVp5cUtF/XrvhBbU7onKh3KBiy99iUqVyA3Y6iiIZhGKWBSuSA4bNgDYIoVkqjHpdL35aEShoRO6pNXt7rDzxFoPzH0JuPI54nE4OhVrzZXwtkAEosxVa/fszcE092FH+HhhtxZBYe/KEzwdISU9TOPdId3UF/UMYC0MiYOlqffVTgAg="] = struct{}{}
|
||||
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
|
||||
}
|
||||
|
||||
func (v *Verifier) loadOpenVMVks(forkName string) error {
|
||||
tempFile := path.Join(os.TempDir(), "openVmVk.json")
|
||||
defer func() {
|
||||
|
||||
@@ -29,9 +29,17 @@ func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
LowVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPathLo,
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPathHi,
|
||||
ForkName: "euclidV2",
|
||||
ForkName: "darwinV2",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
}
|
||||
@@ -40,43 +48,43 @@ func TestFFI(t *testing.T) {
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk1)
|
||||
t.Log("Verified chunk proof 1")
|
||||
|
||||
chunkProof2 := readChunkProof(*chunkProofPath2, as)
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk2)
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
}
|
||||
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
|
||||
func readBatchProof(filePat string, as *assert.Assertions) types.BatchProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &types.OpenVMBatchProof{}
|
||||
proof := &types.Halo2BatchProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
}
|
||||
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
|
||||
func readChunkProof(filePat string, as *assert.Assertions) types.ChunkProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &types.OpenVMChunkProof{}
|
||||
proof := &types.Halo2ChunkProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
|
||||
173
coordinator/internal/orm/script/main.go
Normal file
173
coordinator/internal/orm/script/main.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
log.Crit("no batch index range provided")
|
||||
return
|
||||
}
|
||||
|
||||
indexRange := os.Args[1]
|
||||
indices := strings.Split(indexRange, "-")
|
||||
if len(indices) != 2 {
|
||||
log.Crit("invalid batch index range format. Use start-end", "providedRange", indexRange)
|
||||
return
|
||||
}
|
||||
|
||||
startIndex, err := strconv.Atoi(indices[0])
|
||||
endIndex, err2 := strconv.Atoi(indices[1])
|
||||
if err != nil || err2 != nil || startIndex > endIndex {
|
||||
log.Crit("invalid batch index range", "start", indices[0], "end", indices[1], "err", err, "err2", err2)
|
||||
return
|
||||
}
|
||||
|
||||
db, err := database.InitDB(&database.Config{
|
||||
DriverName: "postgres",
|
||||
DSN: os.Getenv("DB_DSN"),
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := startIndex; i <= endIndex; i++ {
|
||||
batchIndex := uint64(i)
|
||||
resultBytes, err := getBatchTask(db, batchIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch task", "batchIndex", batchIndex, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
outputFilename := fmt.Sprintf("batch_task_%d.json", batchIndex)
|
||||
if err = os.WriteFile(outputFilename, resultBytes, 0644); err != nil {
|
||||
log.Crit("failed to write output file", "filename", outputFilename, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getBatchTask(db *gorm.DB, batchIndex uint64) ([]byte, error) {
|
||||
batch, err := orm.NewBatch(db).GetBatchByIndex(context.Background(), batchIndex)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get batch hash by index: %d err: %w ", batchIndex, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunks, err := orm.NewChunk(db).GetChunksByBatchHash(context.Background(), batch.Hash)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get chunk proofs for batch task id: %s err: %w ", batch.Hash, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var chunkProofs []message.ChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
fmt.Println("chunk index: ", chunk.Index)
|
||||
fmt.Print("chunk proof: ", chunk.Proof)
|
||||
proof := message.NewChunkProof("euclid")
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, batch.Hash, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: 534351,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if openvmProof, ok := proof.(*message.OpenVMChunkProof); ok {
|
||||
chunkInfo.InitialBlockNumber = openvmProof.MetaData.ChunkInfo.InitialBlockNumber
|
||||
chunkInfo.BlockCtxs = openvmProof.MetaData.ChunkInfo.BlockCtxs
|
||||
chunkInfo.TxDataLength = openvmProof.MetaData.ChunkInfo.TxDataLength
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail, err := getBatchTaskDetail(batch, chunkInfos, chunkProofs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", batch.Hash, err)
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.MarshalIndent(taskDetail, "", " ")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", batch.Hash, err)
|
||||
}
|
||||
|
||||
return chunkProofsBytes, nil
|
||||
}
|
||||
|
||||
func getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6:
|
||||
default:
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
if dbBatchCodecVersion >= encoding.CodecV7 {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
|
||||
challengeDigest, kzgCommitment, kzgProof, err := codec.BlobDataProofFromBlobBytes(dbBatch.BlobBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get challenge digest from blob bytes, taskID: %s, err: %w", dbBatch.Hash, err)
|
||||
}
|
||||
|
||||
taskDetail.ChallengeDigest = challengeDigest
|
||||
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(kzgProof[:]))}
|
||||
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(kzgCommitment[:]))}
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
@@ -22,7 +22,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeOpenVM},
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
@@ -64,7 +64,7 @@ func TestGenerateSignature(t *testing.T) {
|
||||
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeOpenVM},
|
||||
ProverTypes: []ProverType{ProverTypeChunk},
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
|
||||
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
@@ -20,10 +21,10 @@ type ProverType uint8
|
||||
|
||||
func (r ProverType) String() string {
|
||||
switch r {
|
||||
case ProverTypeChunkDeprecated:
|
||||
return "prover type chunk (deprecated)"
|
||||
case ProverTypeBatchDeprecated:
|
||||
return "prover type batch (deprecated)"
|
||||
case ProverTypeChunk:
|
||||
return "prover type chunk"
|
||||
case ProverTypeBatch:
|
||||
return "prover type batch"
|
||||
case ProverTypeOpenVM:
|
||||
return "prover type openvm"
|
||||
default:
|
||||
@@ -34,10 +35,10 @@ func (r ProverType) String() string {
|
||||
const (
|
||||
// ProverTypeUndefined is an unknown prover type
|
||||
ProverTypeUndefined ProverType = iota
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks, which is deprecated
|
||||
ProverTypeChunkDeprecated
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks, which is deprecated
|
||||
ProverTypeBatchDeprecated
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
|
||||
ProverTypeChunk
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
|
||||
ProverTypeBatch
|
||||
// ProverTypeOpenVM
|
||||
ProverTypeOpenVM
|
||||
)
|
||||
@@ -46,9 +47,9 @@ const (
|
||||
func MakeProverType(proofType message.ProofType) ProverType {
|
||||
switch proofType {
|
||||
case message.ProofTypeChunk:
|
||||
return ProverTypeChunkDeprecated
|
||||
return ProverTypeChunk
|
||||
case message.ProofTypeBatch, message.ProofTypeBundle:
|
||||
return ProverTypeBatchDeprecated
|
||||
return ProverTypeBatch
|
||||
default:
|
||||
return ProverTypeUndefined
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func randomURL() string {
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
@@ -84,10 +84,18 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
ForkName: "homestead",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "bernoulli",
|
||||
MinProverVersion: "v4.3.0",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
@@ -101,17 +109,20 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
},
|
||||
}
|
||||
|
||||
var chainConf params.ChainConfig
|
||||
for _, forkName := range forks {
|
||||
switch forkName {
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(100)
|
||||
case "homestead":
|
||||
chainConf.HomesteadBlock = big.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
|
||||
|
||||
router := gin.New()
|
||||
api.InitController(conf, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
api.InitController(conf, &chainConf, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
@@ -131,7 +142,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.4.89"
|
||||
version.Version = "v4.2.0"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -187,7 +198,7 @@ func TestApis(t *testing.T) {
|
||||
func testHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -200,7 +211,7 @@ func testHandshake(t *testing.T) {
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
@@ -218,7 +229,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
|
||||
func testGetTaskBlocked(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -262,7 +273,7 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
|
||||
func testOutdatedProverVersion(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -274,12 +285,14 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", chunkProver.proverVersion)
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", batchProver.proverVersion)
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
@@ -287,7 +300,7 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -370,7 +383,7 @@ func testValidProof(t *testing.T) {
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -458,7 +471,7 @@ func testInvalidProof(t *testing.T) {
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -559,7 +572,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
func testTimeoutProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -582,9 +595,7 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)
|
||||
|
||||
@@ -207,16 +207,14 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
}
|
||||
|
||||
var proof []byte
|
||||
switch message.ProofType(proverTaskSchema.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case message.ProofTypeBatch:
|
||||
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
|
||||
case int(message.ProofTypeBatch):
|
||||
encodeData, err := json.Marshal(message.Halo2BatchProof{})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
@@ -225,14 +223,16 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
if proofStatus == verifiedFailed {
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
chunkProof := message.Halo2ChunkProof{}
|
||||
chunkProof.RawProof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&chunkProof)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
|
||||
batchProof := message.Halo2BatchProof{}
|
||||
batchProof.RawProof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&batchProof)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
|
||||
@@ -1357,8 +1357,7 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6yba
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
|
||||
5854
prover/Cargo.lock
generated
Normal file
5854
prover/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
50
prover/Cargo.toml
Normal file
50
prover/Cargo.toml
Normal file
@@ -0,0 +1,50 @@
|
||||
[package]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
log = "0.4"
|
||||
env_logger = "0.11.3"
|
||||
serde = { version = "1.0.198", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
futures = "0.3.30"
|
||||
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
prover_darwin_v2 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "160db6c"}
|
||||
base64 = "0.13.1"
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
once_cell = "1.19.0"
|
||||
hex = "0.4.3"
|
||||
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
|
||||
rand = "0.8.5"
|
||||
eth-keystore = "0.5.0"
|
||||
rlp = "0.5.2"
|
||||
tokio = "1.37.0"
|
||||
async-trait = "0.1"
|
||||
sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
ctor = "0.2.8"
|
||||
@@ -1,22 +1,22 @@
|
||||
.PHONY: prover lint tests_binary
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
ZKVM_VERSION=$(shell ./print_high_zkvm_version.sh)
|
||||
ifeq (${ZKVM_VERSION},)
|
||||
$(error ZKVM_VERSION not set)
|
||||
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
|
||||
ifeq (${ZKEVM_VERSION},)
|
||||
$(error ZKEVM_VERSION not set)
|
||||
else
|
||||
$(info ZKVM_VERSION is ${ZKVM_VERSION})
|
||||
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
|
||||
endif
|
||||
|
||||
ZKVM_COMMIT=$(shell echo ${ZKVM_VERSION} | cut -d " " -f2)
|
||||
$(info ZKVM_COMMIT is ${ZKVM_COMMIT})
|
||||
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
|
||||
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
|
||||
|
||||
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
|
||||
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
|
||||
|
||||
GIT_REV=$(shell git rev-parse --short HEAD)
|
||||
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
|
||||
@@ -27,12 +27,12 @@ else
|
||||
$(info GO_TAG is ${GO_TAG})
|
||||
endif
|
||||
|
||||
ifeq (${PLONKY3_GPU_VERSION},)
|
||||
# use plonky3 with CPU
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
|
||||
ifeq (${HALO2_GPU_VERSION},)
|
||||
# use halo2_proofs with CPU
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
|
||||
else
|
||||
# use halo2_gpu
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
|
||||
endif
|
||||
|
||||
prover:
|
||||
30
prover/config.json
Normal file
30
prover/config.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"sdk_config": {
|
||||
"prover_name_prefix": "prover-1",
|
||||
"keys_dir": "keys",
|
||||
"coordinator": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "http://localhost:9999"
|
||||
},
|
||||
"prover": {
|
||||
"circuit_types": [1,2,3],
|
||||
"circuit_version": "v0.13.1"
|
||||
},
|
||||
"db_path": "unique-db-path-for-prover-1"
|
||||
},
|
||||
"low_version_circuit": {
|
||||
"hard_fork_name": "darwin",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"hard_fork_name": "darwinV2",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
}
|
||||
}
|
||||
21
prover/print_halo2gpu_version.sh
Executable file
21
prover/print_halo2gpu_version.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_file="$HOME/.cargo/config"
|
||||
|
||||
if [ ! -e "$config_file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
|
||||
|
||||
pushd $halo2gpu_path
|
||||
|
||||
commit_hash=$(git log --pretty=format:%h -n 1)
|
||||
echo "${commit_hash:0:7}"
|
||||
|
||||
popd
|
||||
|
||||
10
prover/print_high_zkevm_version.sh
Executable file
10
prover/print_high_zkevm_version.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -ue
|
||||
|
||||
higher_zkevm_item=`grep "zkevm-circuits.git" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
|
||||
|
||||
higher_version=`echo $higher_zkevm_item | awk '{print $1}'`
|
||||
|
||||
higher_commit=`echo $higher_zkevm_item | cut -d ' ' -f2 | cut -c-7`
|
||||
|
||||
echo "$higher_version $higher_commit"
|
||||
1
prover/rust-toolchain
Normal file
1
prover/rust-toolchain
Normal file
@@ -0,0 +1 @@
|
||||
nightly-2023-12-03
|
||||
9
prover/rustfmt.toml
Normal file
9
prover/rustfmt.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
edition = "2021"
|
||||
|
||||
comment_width = 100
|
||||
imports_granularity = "Crate"
|
||||
max_width = 100
|
||||
newline_style = "Unix"
|
||||
# normalize_comments = true
|
||||
reorder_imports = true
|
||||
wrap_comments = true
|
||||
51
prover/src/config.rs
Normal file
51
prover/src/config.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use anyhow::{bail, Result};
|
||||
|
||||
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
|
||||
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AssetsDirEnvConfig {}
|
||||
|
||||
impl AssetsDirEnvConfig {
|
||||
pub fn init() -> Result<()> {
|
||||
let value = std::env::var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME)?;
|
||||
let dirs: Vec<&str> = value.split(',').collect();
|
||||
if dirs.len() != 2 {
|
||||
bail!("env variable SCROLL_PROVER_ASSETS_DIR value must be 2 parts seperated by comma.")
|
||||
}
|
||||
unsafe {
|
||||
SCROLL_PROVER_ASSETS_DIRS = dirs.into_iter().map(|s| s.to_string()).collect();
|
||||
log::info!(
|
||||
"init SCROLL_PROVER_ASSETS_DIRS: {:?}",
|
||||
SCROLL_PROVER_ASSETS_DIRS
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn enable_first() {
|
||||
unsafe {
|
||||
log::info!(
|
||||
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
|
||||
&SCROLL_PROVER_ASSETS_DIRS[0]
|
||||
);
|
||||
std::env::set_var(
|
||||
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
|
||||
&SCROLL_PROVER_ASSETS_DIRS[0],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_second() {
|
||||
unsafe {
|
||||
log::info!(
|
||||
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
|
||||
&SCROLL_PROVER_ASSETS_DIRS[1]
|
||||
);
|
||||
std::env::set_var(
|
||||
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
|
||||
&SCROLL_PROVER_ASSETS_DIRS[1],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
76
prover/src/main.rs
Normal file
76
prover/src/main.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
#![feature(lazy_cell)]
|
||||
#![feature(core_intrinsics)]
|
||||
|
||||
mod config;
|
||||
mod prover;
|
||||
mod types;
|
||||
mod utils;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use clap::{ArgAction, Parser};
|
||||
use prover::{LocalProver, LocalProverConfig};
|
||||
use scroll_proving_sdk::{
|
||||
prover::ProverBuilder,
|
||||
utils::{get_version, init_tracing},
|
||||
};
|
||||
use tokio::runtime;
|
||||
use utils::get_prover_type;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(disable_version_flag = true)]
|
||||
struct Args {
|
||||
/// Path of config file
|
||||
#[arg(long = "config", default_value = "conf/config.json")]
|
||||
config_file: String,
|
||||
|
||||
/// Version of this prover
|
||||
#[arg(short, long, action = ArgAction::SetTrue)]
|
||||
version: bool,
|
||||
|
||||
/// Path of log file
|
||||
#[arg(long = "log.file")]
|
||||
log_file: Option<String>,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let rt = runtime::Builder::new_multi_thread()
|
||||
.thread_stack_size(16 * 1024 * 1024) // Set stack size to 16MB
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed to create Tokio runtime");
|
||||
|
||||
rt.block_on(async {
|
||||
init_tracing();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
println!("version is {}", get_version());
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
let cfg = LocalProverConfig::from_file(args.config_file)?;
|
||||
let sdk_config = cfg.sdk_config.clone();
|
||||
let mut prover_types = vec![];
|
||||
sdk_config
|
||||
.prover
|
||||
.circuit_types
|
||||
.iter()
|
||||
.for_each(|circuit_type| {
|
||||
if let Some(pt) = get_prover_type(*circuit_type) {
|
||||
if !prover_types.contains(&pt) {
|
||||
prover_types.push(pt);
|
||||
}
|
||||
}
|
||||
});
|
||||
let local_prover = LocalProver::new(cfg, prover_types);
|
||||
let prover = ProverBuilder::new(sdk_config)
|
||||
.with_proving_service(Box::new(local_prover))
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
prover.run().await;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
192
prover/src/prover.rs
Normal file
192
prover/src/prover.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
use crate::{
|
||||
types::ProverType,
|
||||
utils::get_prover_type,
|
||||
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_trait::async_trait;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
prover::{
|
||||
proving_service::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
},
|
||||
ProvingService,
|
||||
},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::File,
|
||||
sync::{Arc, Mutex},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::RwLock, task::JoinHandle};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
impl LocalProverConfig {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
|
||||
}
|
||||
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Self::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
pub struct LocalProver {
|
||||
config: LocalProverConfig,
|
||||
prover_types: Vec<ProverType>,
|
||||
circuits_handler_provider: RwLock<CircuitsHandlerProvider>,
|
||||
next_task_id: Arc<Mutex<u64>>,
|
||||
current_task: Arc<Mutex<Option<JoinHandle<Result<String>>>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProvingService for LocalProver {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
|
||||
let mut prover_types = vec![];
|
||||
req.circuit_types.iter().for_each(|circuit_type| {
|
||||
if let Some(pt) = get_prover_type(*circuit_type) {
|
||||
if !prover_types.contains(&pt) {
|
||||
prover_types.push(pt);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let vks = self
|
||||
.circuits_handler_provider
|
||||
.read()
|
||||
.await
|
||||
.init_vks(&self.config, prover_types)
|
||||
.await;
|
||||
GetVkResponse { vks, error: None }
|
||||
}
|
||||
async fn prove(&self, req: ProveRequest) -> ProveResponse {
|
||||
let handler = self
|
||||
.circuits_handler_provider
|
||||
.write()
|
||||
.await
|
||||
.get_circuits_handler(&req.hard_fork_name, self.prover_types.clone())
|
||||
.expect("failed to get circuit handler");
|
||||
|
||||
match self.do_prove(req, handler).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to request proof: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn query_task(&self, req: QueryTaskRequest) -> QueryTaskResponse {
|
||||
let handle = self.current_task.lock().unwrap().take();
|
||||
if let Some(handle) = handle {
|
||||
if handle.is_finished() {
|
||||
return match handle.await {
|
||||
Ok(Ok(proof)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Success,
|
||||
proof: Some(proof),
|
||||
..Default::default()
|
||||
},
|
||||
Ok(Err(e)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task failed: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
Err(e) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task panicked: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
} else {
|
||||
*self.current_task.lock().unwrap() = Some(handle);
|
||||
return QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Proving,
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
}
|
||||
// If no handle is found
|
||||
QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some("no proving task is running".to_string()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(config: LocalProverConfig, prover_types: Vec<ProverType>) -> Self {
|
||||
let circuits_handler_provider = CircuitsHandlerProvider::new(config.clone())
|
||||
.expect("failed to create circuits handler provider");
|
||||
|
||||
Self {
|
||||
config,
|
||||
prover_types,
|
||||
circuits_handler_provider: RwLock::new(circuits_handler_provider),
|
||||
next_task_id: Arc::new(Mutex::new(0)),
|
||||
current_task: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(
|
||||
&self,
|
||||
req: ProveRequest,
|
||||
handler: Arc<Box<dyn CircuitsHandler>>,
|
||||
) -> Result<ProveResponse> {
|
||||
let task_id = {
|
||||
let mut next_task_id = self.next_task_id.lock().unwrap();
|
||||
*next_task_id += 1;
|
||||
*next_task_id
|
||||
};
|
||||
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let req_clone = req.clone();
|
||||
let handle = Handle::current();
|
||||
let task_handle =
|
||||
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
|
||||
|
||||
*self.current_task.lock().unwrap() = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
task_id: task_id.to_string(),
|
||||
circuit_type: req.circuit_type,
|
||||
circuit_version: req.circuit_version,
|
||||
hard_fork_name: req.hard_fork_name,
|
||||
status: TaskStatus::Proving,
|
||||
created_at,
|
||||
input: Some(req.input),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
153
prover/src/types.rs
Normal file
153
prover/src/types.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use ethers_core::types::H256;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use scroll_proving_sdk::prover::types::CircuitType;
|
||||
|
||||
pub type CommonHash = H256;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum ProverType {
|
||||
Chunk,
|
||||
Batch,
|
||||
}
|
||||
|
||||
impl ProverType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProverType::Chunk,
|
||||
2 => ProverType::Batch,
|
||||
_ => {
|
||||
panic!("invalid prover_type")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProverType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProverType::Chunk => serializer.serialize_u8(1),
|
||||
ProverType::Batch => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProverType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProverType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct Task {
|
||||
#[serde(rename = "type", default)]
|
||||
pub task_type: CircuitType,
|
||||
pub task_data: String,
|
||||
#[serde(default)]
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct ProofDetail {
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub proof_type: CircuitType,
|
||||
pub proof_data: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofFailureType {
|
||||
Undefined,
|
||||
Panic,
|
||||
NoPanic,
|
||||
}
|
||||
|
||||
impl ProofFailureType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofFailureType::Panic,
|
||||
2 => ProofFailureType::NoPanic,
|
||||
_ => ProofFailureType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofFailureType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofFailureType::Undefined => serializer.serialize_u8(0),
|
||||
ProofFailureType::Panic => serializer.serialize_u8(1),
|
||||
ProofFailureType::NoPanic => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofFailureType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofFailureType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofFailureType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofStatus {
|
||||
Ok,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl ProofStatus {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
0 => ProofStatus::Ok,
|
||||
_ => ProofStatus::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofStatus {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofStatus::Ok => serializer.serialize_u8(0),
|
||||
ProofStatus::Error => serializer.serialize_u8(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofStatus {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofStatus::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofStatus {
|
||||
fn default() -> Self {
|
||||
Self::Ok
|
||||
}
|
||||
}
|
||||
18
prover/src/utils.rs
Normal file
18
prover/src/utils.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use crate::types::ProverType;
|
||||
use scroll_proving_sdk::prover::types::CircuitType;
|
||||
|
||||
pub fn get_circuit_types(prover_type: ProverType) -> Vec<CircuitType> {
|
||||
match prover_type {
|
||||
ProverType::Chunk => vec![CircuitType::Chunk],
|
||||
ProverType::Batch => vec![CircuitType::Batch, CircuitType::Bundle],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_prover_type(task_type: CircuitType) -> Option<ProverType> {
|
||||
match task_type {
|
||||
CircuitType::Undefined => None,
|
||||
CircuitType::Chunk => Some(ProverType::Chunk),
|
||||
CircuitType::Batch => Some(ProverType::Batch),
|
||||
CircuitType::Bundle => Some(ProverType::Batch),
|
||||
}
|
||||
}
|
||||
165
prover/src/zk_circuits_handler.rs
Normal file
165
prover/src/zk_circuits_handler.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
mod common;
|
||||
mod darwin;
|
||||
mod darwin_v2;
|
||||
|
||||
use crate::{
|
||||
config::AssetsDirEnvConfig, prover::LocalProverConfig, types::ProverType,
|
||||
utils::get_circuit_types,
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use async_trait::async_trait;
|
||||
use darwin::DarwinHandler;
|
||||
use darwin_v2::DarwinV2Handler;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
pub mod utils {
|
||||
pub fn encode_vk(vk: Vec<u8>) -> String {
|
||||
base64::encode(vk)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait CircuitsHandler: Send + Sync {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>>;
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
|
||||
}
|
||||
|
||||
type CircuitsHandlerBuilder = fn(
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>>;
|
||||
|
||||
pub struct CircuitsHandlerProvider {
|
||||
config: LocalProverConfig,
|
||||
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
current_fork_name: Option<HardForkName>,
|
||||
current_circuit: Option<Arc<Box<dyn CircuitsHandler>>>,
|
||||
}
|
||||
|
||||
impl CircuitsHandlerProvider {
|
||||
pub fn new(config: LocalProverConfig) -> Result<Self> {
|
||||
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
|
||||
|
||||
if let Err(e) = AssetsDirEnvConfig::init() {
|
||||
panic!("AssetsDirEnvConfig init failed: {:#}", e);
|
||||
}
|
||||
|
||||
fn handler_builder(
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
&config.low_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_first();
|
||||
DarwinHandler::new(
|
||||
prover_types,
|
||||
&config.low_version_circuit.params_path,
|
||||
&config.low_version_circuit.assets_path,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
m.insert(
|
||||
config.low_version_circuit.hard_fork_name.clone(),
|
||||
handler_builder,
|
||||
);
|
||||
|
||||
fn next_handler_builder(
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
&config.high_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_second();
|
||||
DarwinV2Handler::new(
|
||||
prover_types,
|
||||
&config.high_version_circuit.params_path,
|
||||
&config.high_version_circuit.assets_path,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
|
||||
m.insert(
|
||||
config.high_version_circuit.hard_fork_name.clone(),
|
||||
next_handler_builder,
|
||||
);
|
||||
|
||||
let provider = CircuitsHandlerProvider {
|
||||
config,
|
||||
circuits_handler_builder_map: m,
|
||||
current_fork_name: None,
|
||||
current_circuit: None,
|
||||
};
|
||||
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub fn get_circuits_handler(
|
||||
&mut self,
|
||||
hard_fork_name: &String,
|
||||
prover_types: Vec<ProverType>,
|
||||
) -> Result<Arc<Box<dyn CircuitsHandler>>> {
|
||||
match &self.current_fork_name {
|
||||
Some(fork_name) if fork_name == hard_fork_name => {
|
||||
log::info!("get circuits handler from cache");
|
||||
if let Some(handler) = &self.current_circuit {
|
||||
Ok(handler.clone())
|
||||
} else {
|
||||
bail!("missing cached handler, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
log::info!(
|
||||
"failed to get circuits handler from cache, create a new one: {hard_fork_name}"
|
||||
);
|
||||
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
|
||||
log::info!("building circuits handler for {hard_fork_name}");
|
||||
let handler = builder(prover_types, &self.config)
|
||||
.expect("failed to build circuits handler");
|
||||
self.current_fork_name = Some(hard_fork_name.clone());
|
||||
let arc_handler = Arc::new(handler);
|
||||
self.current_circuit = Some(arc_handler.clone());
|
||||
Ok(arc_handler)
|
||||
} else {
|
||||
bail!("missing builder, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init_vks(
|
||||
&self,
|
||||
config: &LocalProverConfig,
|
||||
prover_types: Vec<ProverType>,
|
||||
) -> Vec<String> {
|
||||
let mut vks = Vec::new();
|
||||
for (hard_fork_name, build) in self.circuits_handler_builder_map.iter() {
|
||||
let handler =
|
||||
build(prover_types.clone(), config).expect("failed to build circuits handler");
|
||||
|
||||
for prover_type in prover_types.iter() {
|
||||
for task_type in get_circuit_types(*prover_type).into_iter() {
|
||||
let vk = handler
|
||||
.get_vk(task_type)
|
||||
.await
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!(
|
||||
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
|
||||
task_type
|
||||
);
|
||||
if !vk.is_empty() {
|
||||
vks.push(vk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
vks
|
||||
}
|
||||
}
|
||||
33
prover/src/zk_circuits_handler/common.rs
Normal file
33
prover/src/zk_circuits_handler/common.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
use std::{collections::BTreeMap, rc::Rc};
|
||||
|
||||
use crate::types::ProverType;
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
|
||||
static mut PARAMS_MAP: OnceCell<Rc<BTreeMap<u32, ParamsKZG<Bn256>>>> = OnceCell::new();
|
||||
|
||||
pub fn get_params_map_instance<'a, F>(load_params_func: F) -> &'a BTreeMap<u32, ParamsKZG<Bn256>>
|
||||
where
|
||||
F: FnOnce() -> BTreeMap<u32, ParamsKZG<Bn256>>,
|
||||
{
|
||||
unsafe {
|
||||
PARAMS_MAP.get_or_init(|| {
|
||||
let params_map = load_params_func();
|
||||
Rc::new(params_map)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_degrees<F>(prover_types: &std::collections::HashSet<ProverType>, f: F) -> Vec<u32>
|
||||
where
|
||||
F: FnMut(&ProverType) -> Vec<u32>,
|
||||
{
|
||||
prover_types
|
||||
.iter()
|
||||
.flat_map(f)
|
||||
.collect::<std::collections::HashSet<u32>>()
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
401
prover/src/zk_circuits_handler/darwin.rs
Normal file
401
prover/src/zk_circuits_handler/darwin.rs
Normal file
@@ -0,0 +1,401 @@
|
||||
use super::{common::*, CircuitsHandler};
|
||||
use crate::types::ProverType;
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::CommonHash;
|
||||
use std::env;
|
||||
|
||||
use prover_darwin::{
|
||||
aggregator::Prover as BatchProver,
|
||||
check_chunk_hashes,
|
||||
common::Prover as CommonProver,
|
||||
config::{AGG_DEGREES, ZKEVM_DEGREES},
|
||||
zkevm::Prover as ChunkProver,
|
||||
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
|
||||
ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
// Only used for debugging.
|
||||
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
#[serde(flatten)]
|
||||
pub batch_proving_task: BatchProvingTask,
|
||||
}
|
||||
|
||||
type BundleTaskDetail = BundleProvingTask;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DarwinHandler {
|
||||
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RwLock<BatchProver<'static>>>,
|
||||
}
|
||||
|
||||
impl DarwinHandler {
|
||||
pub fn new_multi(
|
||||
prover_types: Vec<ProverType>,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
) -> Result<Self> {
|
||||
let class_name = std::intrinsics::type_name::<Self>();
|
||||
let prover_types_set = prover_types
|
||||
.into_iter()
|
||||
.collect::<std::collections::HashSet<ProverType>>();
|
||||
let mut handler = Self {
|
||||
batch_prover: None,
|
||||
chunk_prover: None,
|
||||
};
|
||||
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
|
||||
ProverType::Chunk => ZKEVM_DEGREES.clone(),
|
||||
ProverType::Batch => AGG_DEGREES.clone(),
|
||||
});
|
||||
let params_map = get_params_map_instance(|| {
|
||||
log::info!(
|
||||
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
|
||||
class_name,
|
||||
prover_types_set,
|
||||
degrees
|
||||
);
|
||||
CommonProver::load_params_map(params_dir, °rees)
|
||||
});
|
||||
for prover_type in prover_types_set {
|
||||
match prover_type {
|
||||
ProverType::Chunk => {
|
||||
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)));
|
||||
}
|
||||
ProverType::Batch => {
|
||||
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
|
||||
Self::new_multi(prover_types, params_dir, assets_dir)
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.write()
|
||||
.await
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
|
||||
.collect();
|
||||
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.read().await.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.write().await.gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(batch_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof_raw(
|
||||
&self,
|
||||
bundle_task_detail: BundleTaskDetail,
|
||||
) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.write().await.gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(bundle_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for DarwinHandler {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
|
||||
CircuitType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_batch_vk(),
|
||||
CircuitType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_bundle_vk(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.circuit_type {
|
||||
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
|
||||
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
|
||||
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =================================== tests module ========================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use prover_darwin::utils::chunk_trace_to_witness_block;
|
||||
use scroll_proving_sdk::utils::init_tracing;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
fn init() {
|
||||
init_tracing();
|
||||
log::info!("logger initialized");
|
||||
}
|
||||
|
||||
static DEFAULT_WORK_DIR: &str = "/assets";
|
||||
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
std::env::var("DARWIN_TEST_DIR")
|
||||
.unwrap_or(String::from(DEFAULT_WORK_DIR))
|
||||
.trim_end_matches('/')
|
||||
.to_string()
|
||||
});
|
||||
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
|
||||
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
|
||||
static PROOF_DUMP_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
|
||||
static BATCH_DIR_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
|
||||
static BATCH_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
|
||||
static CHUNK_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = true;
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_circuits() -> Result<()> {
|
||||
let bi_handler = DarwinHandler::new_multi(
|
||||
vec![ProverType::Chunk, ProverType::Batch],
|
||||
&PARAMS_PATH,
|
||||
&ASSETS_PATH,
|
||||
)?;
|
||||
|
||||
let chunk_handler = bi_handler;
|
||||
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
|
||||
|
||||
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_infos = vec![];
|
||||
let mut chunk_proofs = vec![];
|
||||
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
|
||||
let chunk_id = format!("chunk_proof{}", id + 1);
|
||||
log::info!("start to process {chunk_id}");
|
||||
let chunk_trace = read_chunk_trace(chunk_path)?;
|
||||
|
||||
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
|
||||
chunk_infos.push(chunk_info);
|
||||
|
||||
log::info!("start to prove {chunk_id}");
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
|
||||
let proof_data = serde_json::to_string(&chunk_proof)?;
|
||||
dump_proof(chunk_id, proof_data)?;
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler = chunk_handler;
|
||||
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
|
||||
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_batch_task_detail(_: Vec<ChunkInfo>, _: Vec<ChunkProof>) -> BatchTaskDetail {
|
||||
todo!();
|
||||
// BatchTaskDetail {
|
||||
// chunk_infos,
|
||||
// batch_proving_task: BatchProvingTask {
|
||||
// parent_batch_hash: todo!(),
|
||||
// parent_state_root: todo!(),
|
||||
// batch_header: todo!(),
|
||||
// chunk_proofs,
|
||||
// },
|
||||
// }
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: CircuitType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
CircuitType::Batch => BATCH_VK_PATH.clone(),
|
||||
CircuitType::Bundle => todo!(),
|
||||
CircuitType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
Ok(encode_vk(data))
|
||||
}
|
||||
|
||||
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
|
||||
log::info!("read_chunk_trace, {:?}", path);
|
||||
let mut chunk_trace: Vec<BlockTrace> = vec![];
|
||||
|
||||
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
|
||||
let f = std::fs::File::open(file)?;
|
||||
Ok(serde_json::from_reader(&f)?)
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
let entries = std::fs::read_dir(&path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.into_iter()
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
return None;
|
||||
}
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
|
||||
log::info!("files in chunk {:?} is {:?}", path, files);
|
||||
for file in files {
|
||||
let block_trace = read_block_trace(&path.join(file))?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
} else {
|
||||
let block_trace = read_block_trace(&path)?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
Ok(chunk_trace)
|
||||
}
|
||||
|
||||
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
|
||||
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
|
||||
let entries = std::fs::read_dir(&batch_path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
log::info!("files in batch {:?} is {:?}", batch_path, files);
|
||||
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
|
||||
}
|
||||
|
||||
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
|
||||
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
|
||||
Ok(ChunkInfo::from_witness_block(&witness_block, false))
|
||||
}
|
||||
|
||||
fn dump_proof(id: String, proof_data: String) -> Result<()> {
|
||||
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
|
||||
Ok(std::fs::write(dump_path.join(id), proof_data)?)
|
||||
}
|
||||
}
|
||||
459
prover/src/zk_circuits_handler/darwin_v2.rs
Normal file
459
prover/src/zk_circuits_handler/darwin_v2.rs
Normal file
@@ -0,0 +1,459 @@
|
||||
use super::{common::*, CircuitsHandler};
|
||||
use crate::types::ProverType;
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::CommonHash;
|
||||
use std::env;
|
||||
|
||||
use prover_darwin_v2::{
|
||||
aggregator::Prover as BatchProver,
|
||||
check_chunk_hashes,
|
||||
common::Prover as CommonProver,
|
||||
config::{AGG_DEGREES, ZKEVM_DEGREES},
|
||||
zkevm::Prover as ChunkProver,
|
||||
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
|
||||
ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
// Only used for debugging.
|
||||
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
#[serde(flatten)]
|
||||
pub batch_proving_task: BatchProvingTask,
|
||||
}
|
||||
|
||||
type BundleTaskDetail = BundleProvingTask;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DarwinV2Handler {
|
||||
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RwLock<BatchProver<'static>>>,
|
||||
}
|
||||
|
||||
impl DarwinV2Handler {
|
||||
pub fn new_multi(
|
||||
prover_types: Vec<ProverType>,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
) -> Result<Self> {
|
||||
let class_name = std::intrinsics::type_name::<Self>();
|
||||
let prover_types_set = prover_types
|
||||
.into_iter()
|
||||
.collect::<std::collections::HashSet<ProverType>>();
|
||||
let mut handler = Self {
|
||||
batch_prover: None,
|
||||
chunk_prover: None,
|
||||
};
|
||||
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
|
||||
ProverType::Chunk => ZKEVM_DEGREES.clone(),
|
||||
ProverType::Batch => AGG_DEGREES.clone(),
|
||||
});
|
||||
let params_map = get_params_map_instance(|| {
|
||||
log::info!(
|
||||
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
|
||||
class_name,
|
||||
prover_types_set,
|
||||
degrees
|
||||
);
|
||||
CommonProver::load_params_map(params_dir, °rees)
|
||||
});
|
||||
for prover_type in prover_types_set {
|
||||
match prover_type {
|
||||
ProverType::Chunk => {
|
||||
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)));
|
||||
}
|
||||
ProverType::Batch => {
|
||||
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
|
||||
Self::new_multi(prover_types, params_dir, assets_dir)
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.write()
|
||||
.await
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
|
||||
.collect();
|
||||
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.write().await.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.write().await.gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(batch_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof_raw(
|
||||
&self,
|
||||
bundle_task_detail: BundleTaskDetail,
|
||||
) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.write().await.gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(bundle_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for DarwinV2Handler {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
|
||||
CircuitType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_batch_vk(),
|
||||
CircuitType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_bundle_vk(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.circuit_type {
|
||||
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
|
||||
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
|
||||
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =================================== tests module ========================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use ethers_core::types::H256;
|
||||
use prover_darwin_v2::{
|
||||
aggregator::eip4844, utils::chunk_trace_to_witness_block, BatchData, BatchHeader,
|
||||
MAX_AGG_SNARKS,
|
||||
};
|
||||
use scroll_proving_sdk::utils::init_tracing;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
fn init() {
|
||||
init_tracing();
|
||||
log::info!("logger initialized");
|
||||
}
|
||||
|
||||
static DEFAULT_WORK_DIR: &str = "/assets";
|
||||
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
std::env::var("DARWIN_V2_TEST_DIR")
|
||||
.unwrap_or(String::from(DEFAULT_WORK_DIR))
|
||||
.trim_end_matches('/')
|
||||
.to_string()
|
||||
});
|
||||
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
|
||||
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
|
||||
static PROOF_DUMP_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
|
||||
static BATCH_DIR_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
|
||||
static BATCH_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
|
||||
static CHUNK_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = true;
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_circuits() -> Result<()> {
|
||||
let bi_handler = DarwinV2Handler::new_multi(
|
||||
vec![ProverType::Chunk, ProverType::Batch],
|
||||
&PARAMS_PATH,
|
||||
&ASSETS_PATH,
|
||||
)?;
|
||||
|
||||
let chunk_handler = bi_handler;
|
||||
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
|
||||
|
||||
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_traces = vec![];
|
||||
let mut chunk_infos = vec![];
|
||||
let mut chunk_proofs = vec![];
|
||||
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
|
||||
let chunk_id = format!("chunk_proof{}", id + 1);
|
||||
log::info!("start to process {chunk_id}");
|
||||
let chunk_trace = read_chunk_trace(chunk_path)?;
|
||||
chunk_traces.push(chunk_trace.clone());
|
||||
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
|
||||
chunk_infos.push(chunk_info);
|
||||
|
||||
log::info!("start to prove {chunk_id}");
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
|
||||
let proof_data = serde_json::to_string(&chunk_proof)?;
|
||||
dump_proof(chunk_id, proof_data)?;
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler = chunk_handler;
|
||||
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
|
||||
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_traces, chunk_proofs, None);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// copied from https://github.com/scroll-tech/scroll-prover/blob/main/integration/src/prove.rs
|
||||
fn get_blob_from_chunks(chunks: &[ChunkInfo]) -> Vec<u8> {
|
||||
let num_chunks = chunks.len();
|
||||
|
||||
let padded_chunk =
|
||||
ChunkInfo::mock_padded_chunk_info_for_testing(chunks.last().as_ref().unwrap());
|
||||
let chunks_with_padding = [
|
||||
chunks.to_vec(),
|
||||
vec![padded_chunk; MAX_AGG_SNARKS - num_chunks],
|
||||
]
|
||||
.concat();
|
||||
let batch_data = BatchData::<{ MAX_AGG_SNARKS }>::new(chunks.len(), &chunks_with_padding);
|
||||
let batch_bytes = batch_data.get_batch_data_bytes();
|
||||
let blob_bytes = eip4844::get_blob_bytes(&batch_bytes);
|
||||
log::info!("blob_bytes len {}", blob_bytes.len());
|
||||
blob_bytes
|
||||
}
|
||||
|
||||
// TODO: chunk_infos can be extracted from chunk_proofs.
|
||||
// Still needed?
|
||||
fn make_batch_task_detail(
|
||||
chunk_traces: Vec<Vec<BlockTrace>>,
|
||||
chunk_proofs: Vec<ChunkProof>,
|
||||
last_batcher_header: Option<BatchHeader<{ MAX_AGG_SNARKS }>>,
|
||||
) -> BatchTaskDetail {
|
||||
// dummy parent batch hash
|
||||
let dummy_parent_batch_hash = H256([
|
||||
0xab, 0xac, 0xad, 0xae, 0xaf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
]);
|
||||
let chunk_infos: Vec<_> = chunk_proofs.iter().map(|p| p.chunk_info.clone()).collect();
|
||||
|
||||
let l1_message_popped = chunk_traces
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(|chunk| chunk.num_l1_txs())
|
||||
.sum();
|
||||
let last_block_timestamp = chunk_traces.last().map_or(0, |block_traces| {
|
||||
block_traces
|
||||
.last()
|
||||
.map_or(0, |block_trace| block_trace.header.timestamp.as_u64())
|
||||
});
|
||||
|
||||
let blob_bytes = get_blob_from_chunks(&chunk_infos);
|
||||
let batch_header = BatchHeader::construct_from_chunks(
|
||||
last_batcher_header.map_or(4, |header| header.version),
|
||||
last_batcher_header.map_or(123, |header| header.batch_index + 1),
|
||||
l1_message_popped,
|
||||
last_batcher_header.map_or(l1_message_popped, |header| {
|
||||
header.total_l1_message_popped + l1_message_popped
|
||||
}),
|
||||
last_batcher_header.map_or(dummy_parent_batch_hash, |header| header.batch_hash()),
|
||||
last_block_timestamp,
|
||||
&chunk_infos,
|
||||
&blob_bytes,
|
||||
);
|
||||
BatchTaskDetail {
|
||||
chunk_infos,
|
||||
batch_proving_task: BatchProvingTask {
|
||||
chunk_proofs,
|
||||
batch_header,
|
||||
blob_bytes,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: CircuitType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
CircuitType::Batch => BATCH_VK_PATH.clone(),
|
||||
CircuitType::Bundle => todo!(),
|
||||
CircuitType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
Ok(encode_vk(data))
|
||||
}
|
||||
|
||||
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
|
||||
log::info!("read_chunk_trace, {:?}", path);
|
||||
let mut chunk_trace: Vec<BlockTrace> = vec![];
|
||||
|
||||
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
|
||||
let f = std::fs::File::open(file)?;
|
||||
Ok(serde_json::from_reader(&f)?)
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
let entries = std::fs::read_dir(&path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.into_iter()
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
return None;
|
||||
}
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
|
||||
log::info!("files in chunk {:?} is {:?}", path, files);
|
||||
for file in files {
|
||||
let block_trace = read_block_trace(&path.join(file))?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
} else {
|
||||
let block_trace = read_block_trace(&path)?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
Ok(chunk_trace)
|
||||
}
|
||||
|
||||
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
|
||||
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
|
||||
let entries = std::fs::read_dir(&batch_path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
log::info!("files in batch {:?} is {:?}", batch_path, files);
|
||||
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
|
||||
}
|
||||
|
||||
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
|
||||
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
|
||||
Ok(ChunkInfo::from_witness_block(&witness_block, false))
|
||||
}
|
||||
|
||||
fn dump_proof(id: String, proof_data: String) -> Result<()> {
|
||||
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
|
||||
Ok(std::fs::write(dump_path.join(id), proof_data)?)
|
||||
}
|
||||
}
|
||||
@@ -33,45 +33,3 @@ make rollup_bins
|
||||
./build/bin/gas_oracle --config ./conf/config.json
|
||||
./build/bin/rollup_relayer --config ./conf/config.json
|
||||
```
|
||||
|
||||
## Proposer Tool
|
||||
|
||||
The Proposer Tool replays historical blocks with custom configurations (e.g., future hardfork configs, custom chunk/batch/bundle proposer configs) to generate chunks/batches/bundles, helping test parameter changes before protocol upgrade.
|
||||
|
||||
You can:
|
||||
|
||||
1. Enable different hardforks in the genesis configuration.
|
||||
2. Set custom chunk-proposer, batch-proposer, and bundle-proposer parameters.
|
||||
3. Analyze resulting metrics (blob size, block count, transaction count, gas usage).
|
||||
|
||||
## How to run the proposer tool?
|
||||
|
||||
### Set the configs
|
||||
|
||||
1. Set genesis config to enable desired hardforks in [`proposer-tool-genesis.json`](./proposer-tool-genesis.json).
|
||||
2. Set proposer config in [`proposer-tool-config.json`](./proposer-tool-config.json) for data analysis.
|
||||
3. Set `start-l2-block` in the launch command of proposer-tool in [`docker-compose-proposer-tool.yml`](./docker-compose-proposer-tool.yml) to the block number you want to start from. The default is `0`, which means starting from the genesis block.
|
||||
|
||||
### Start the proposer tool using docker-compose
|
||||
|
||||
Prerequisite: an RPC URL to an archive L2 node. The default url in [`proposer-tool-config.json`](./proposer-tool-config.json) is `https://rpc.scroll.io`.
|
||||
|
||||
```
|
||||
cd rollup
|
||||
DOCKER_BUILDKIT=1 docker-compose -f docker-compose-proposer-tool.yml up -d
|
||||
```
|
||||
|
||||
> Note: The port 5432 of database is mapped to the host machine. You can use `psql` or any db clients to connect to the database.
|
||||
|
||||
> The DSN for the database is `postgres://postgres:postgres@db:5432/scroll?sslmode=disable`.
|
||||
|
||||
|
||||
### Reset env
|
||||
```
|
||||
docker-compose -f docker-compose-proposer-tool.yml down -v
|
||||
```
|
||||
|
||||
If you need to rebuild the images, removing the old images is necessary. You can do this by running the following command:
|
||||
```
|
||||
docker images | grep rollup | awk '{print $3}' | xargs docker rmi -f
|
||||
```
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -71,12 +72,22 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
// Fetch the latest block number to decrease the delay when fetching gas prices
|
||||
@@ -95,6 +106,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user