Compare commits

..

45 Commits

Author SHA1 Message Date
Morty
1552e98b79 fix(bridge-history+rollup-relayer): update da-codec to prevent zstd deadlock (#1724)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-08-25 16:23:01 +08:00
Péter Garamvölgyi
a65b3066a3 fix: remove unnecessary logs (#1725) 2025-08-22 15:02:20 +02:00
Morty
1f2b397bbd feat(bridge-history): add aws s3 blob client (#1716)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-08-12 14:59:44 +08:00
colin
ae791a0714 fix(rollup-relayer): sanity checks (#1720) 2025-08-12 14:57:02 +08:00
colin
c012f7132d feat(rollup-relayer): add sanity checks before committing and finalizing (#1714)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-08-11 17:49:29 +08:00
Jonas Theis
6897cc54bd feat(permissionless batches): batch production toolkit and operator recovery (#1555)
Signed-off-by: noelwei <fan@scroll.io>
Co-authored-by: Ömer Faruk Irmak <omerfirmak@gmail.com>
Co-authored-by: noelwei <fan@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com>
Co-authored-by: omerfirmak <omerfirmak@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Morty <yiweichi1@gmail.com>
2025-08-04 12:37:31 +08:00
georgehao
d21fa36803 change l2watcher from w.GetBlockByNumberOrHash to BlockByNumber (#1715) 2025-07-31 18:29:50 +08:00
colin
fc75299eb3 fix(gas-oracle): nonce too low when resubmission (#1712) 2025-07-30 14:50:41 +08:00
Morty
4bfcd35d0c fix(bridge-history): update dependency go-ethereum version (#1713) 2025-07-30 14:32:12 +08:00
colin
6d62f8e5fa fix(gas-oracle): typos in config file example (#1711) 2025-07-28 18:12:50 +08:00
Morty
392ae07736 feat(blob-uploader): support codec v8 (#1707) 2025-07-24 01:34:46 +08:00
colin
db80b47820 fix(rollup-relayer): upgrade boundary message queue hash initialization (#1706) 2025-07-23 18:51:56 +08:00
Zhang Zhuo
daa1387208 circuit-0.5.2 (#1705)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 14:16:16 +08:00
Zhang Zhuo
67b05558e2 upgrade circuit to 0.5.2 (#1703)
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 10:52:08 +08:00
Ho
1e447b0fef [Fix] building failure in gpu image (#1702) 2025-07-21 20:26:39 +08:00
georgehao
f7c6ecadf4 bump to v4.5.31 (#1700) 2025-07-18 16:41:59 +08:00
Ho
9d94f943e5 [Upgrade] feynman 0.5.0rc1 (#1699) 2025-07-18 15:57:31 +08:00
Morty
de17ad43ff fix(blob-uploader): orm function InsertOrUpdateBlobUpload and s3 bucket region configuration (#1679)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-07-16 18:36:27 +08:00
colin
4233ad928c feat(rollup-relayer): support Validium (#1693)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-07-09 15:02:54 +08:00
georgehao
3050ccb40f update feynman prover makefile (#1691) 2025-07-05 10:33:08 +08:00
Ho
12e89201a1 feat: upgrading for feynman (#1690)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-07-04 16:59:48 +08:00
colin
a0ee508bbd fix(bridge-history): commit batch txns and blobs fetching (#1689) 2025-07-04 01:50:52 +08:00
georgehao
b8909d3795 update intermediate docker runs on (#1688) 2025-07-03 21:52:24 +08:00
Ho
b7a172a519 feat: upgrade zkvm-prover to feynman fork (#1686)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2025-07-03 18:48:33 +08:00
georgehao
80807dbb75 Feat/upgrade intermedidate (#1687) 2025-07-03 10:08:03 +08:00
georgehao
a776ca7c82 refactor: remove unused check (#1685) 2025-07-03 09:21:33 +08:00
Ho
ea38ae7e96 Refactor/zkvm 3 (#1684) 2025-07-01 06:39:27 +08:00
colin
9dc57c6126 feat(rollup-relayer): support codecv8 (#1681)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-26 21:17:56 +08:00
Ho
9367565a31 [Refactor] Universal task (#1680)
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-06-25 14:42:51 +08:00
Ho
d2f7663d26 [Refactor] For universal prover task (the update on coordinator) (#1682)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-06-19 14:54:08 +09:00
Morty
b0943b1035 refactor(rollup-relayer): simplify construct commit batch payload (#1673) 2025-06-18 21:37:29 +08:00
Morty
5d6b5a89f4 feat(blob-uploader): blob_upload table add batch hash (#1677) 2025-06-11 18:21:14 +08:00
colin
4ee459a602 fix(gas-oracle & rollup-relayer): blob base fee calculation based on excessBlobGas field (#1676)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-10 17:38:21 +08:00
Morty
276385fd0a feat: add blob storage service (#1672)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-06-10 17:16:16 +08:00
colin
82fb15de3b fix(rollup-relayer): add transaction support to insert l2 blocks (#1674) 2025-06-05 21:27:27 +08:00
colin
5204ad50e0 fix(bridge-history): unclaimed withdrawals (#1671) 2025-05-28 00:54:27 +08:00
colin
f824fb0efc fix(coordinator): remove initialize euclid VKs (#1669) 2025-05-26 15:42:26 +08:00
colin
a55c7bdc77 refactor(coordinator): remove outdated logic (#1668)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-26 14:32:39 +08:00
Alejandro Ranchal-Pedrosa
47b1a037a9 Fix bug sequencer submission strategy and log commit price (#1664)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
2025-05-23 10:02:49 +01:00
Alejandro Ranchal-Pedrosa
ae34020c34 perf(relayer): submission strategy fix logs, use blocktime for submission strategy and log metrics. (#1663)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
Co-authored-by: Jonas Theis <4181434+jonastheis@users.noreply.github.com>
2025-05-22 20:38:10 +08:00
Jonas Theis
fa9fab6e98 fix(relayer): ProcessPendingBatches (#1661)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-05-21 18:23:04 +02:00
colin
c4f869a33a refactor(sender): remove fallback gas limit (#1662) 2025-05-21 22:02:27 +08:00
colin
0cee9a51e6 refactor(rollup-relayer): simplify logic post-Euclid (#1658)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-21 17:59:49 +08:00
colin
97de988228 feat: coordinator and prover support v0.4.2 (#1660)
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
2025-05-20 14:10:52 +08:00
Alejandro Ranchal-Pedrosa
a12175dafc perf(relayer): add sequencer submission strategy with blob‐fee history and target price (#1659)
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-05-19 22:32:28 +02:00
214 changed files with 22745 additions and 12178 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-12-06
toolchain: nightly-2025-02-14
override: true
components: rustfmt, clippy
- name: Install Go
@@ -41,16 +41,12 @@ jobs:
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
workspaces: ". -> target"
# - name: Lint
# working-directory: 'common'
# run: |
# rm -rf $HOME/.cache/golangci-lint
# make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest

View File

@@ -33,7 +33,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-12-03
toolchain: nightly-2025-02-14
override: true
components: rustfmt, clippy
- name: Install Go
@@ -112,7 +112,7 @@ jobs:
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
make libzkp
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3

View File

@@ -10,7 +10,8 @@ env:
jobs:
gas_oracle:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -55,7 +56,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup_relayer:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -99,8 +101,55 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
blob_uploader:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: blob-uploader
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: blob-uploader
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/blob_uploader.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -145,7 +194,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -190,7 +240,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -235,7 +286,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -280,7 +332,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -307,13 +360,6 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Run custom script
run: |
./build/dockerfiles/coordinator-api/init-openvm.sh
- name: Build and push
uses: docker/build-push-action@v3
env:
@@ -331,7 +377,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-cron:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -38,6 +38,7 @@ jobs:
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
make -C coordinator/internal/logic/libzkp build
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

View File

@@ -25,6 +25,7 @@ on:
- nightly-2023-12-03
- nightly-2022-12-10
- 1.86.0
- nightly-2025-02-14
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
@@ -69,7 +70,8 @@ defaults:
jobs:
build-and-publish-intermediate:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4

1
.gitignore vendored
View File

@@ -24,3 +24,4 @@ sftp-config.json
*~
target
zkvm-prover/config.json

10672
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

68
Cargo.toml Normal file
View File

@@ -0,0 +1,68 @@
[workspace]
members = [
"crates/libzkp",
"crates/l2geth",
"crates/libzkp_c",
"crates/prover-bin",
]
resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.2", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "1", default-features = false }
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
serde_with = "3.11.0"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
once_cell = "1.20"
base64 = "0.22"
[patch.crates-io]
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1

View File

@@ -28,7 +28,7 @@ We welcome community contributions to this repository. Before you submit any iss
## Prerequisites
+ Go 1.21
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
+ Hardhat / Foundry
+ Docker

View File

@@ -10,15 +10,15 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect

View File

@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6 h1:EiLZgoRfGgF0QzZ1jGSlo60A6W7g9L/8XLWMhwwtKJ0=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -38,6 +38,7 @@ type FetcherConfig struct {
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
AwsS3Endpoint string `json:"AwsS3Endpoint"`
}
// RedisConfig redis config

View File

@@ -39,6 +39,9 @@ type L1MessageFetcher struct {
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
blobClient := blob_client.NewBlobClients()
if cfg.AwsS3Endpoint != "" {
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
}
if cfg.BeaconNodeAPIEndpoint != "" {
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
if err != nil {

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
@@ -252,6 +253,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
// Cache for the previous transaction to avoid duplicate fetches
var lastTxHash common.Hash
var lastTx *types.Transaction
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
@@ -261,11 +267,28 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
log.Error("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
// Get transaction, reuse if it's the same as previous
var commitTx *types.Transaction
if lastTxHash == vlog.TxHash && lastTx != nil {
commitTx = lastTx
} else {
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
// Create 10-second timeout context for transaction fetch
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
txCancel()
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
commitTx = fetchedTx
lastTxHash = vlog.TxHash
lastTx = commitTx
}
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
@@ -305,7 +328,13 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
}
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
// Create 20-second timeout context for blob processing
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
blobCancel()
if err != nil {
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)

View File

@@ -154,7 +154,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build blob_uploader
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
# Pull blob_uploader into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/blob_uploader /bin/
WORKDIR /app
ENTRYPOINT ["blob_uploader"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,26 +1,25 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
WORKDIR app
FROM chef as planner
COPY ./common/libzkp/impl/ .
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY ./rust-toolchain ./
RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY .git .git
RUN cargo build --release -p libzkp-c
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
@@ -35,9 +34,9 @@ RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,24 +0,0 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }

View File

@@ -1,2 +0,0 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -uex
OPENVM_GPU_COMMIT=dfa10b4
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout openvm-gpu
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*
permissionless-batches/conf/

View File

@@ -18,6 +18,6 @@ RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
COPY --from=builder /src/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app
ENTRYPOINT ["rollup_relayer"]

View File

@@ -0,0 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*
permissionless-batches/conf/

3
common/.gitignore vendored
View File

@@ -1,4 +1,3 @@
/build/bin
.idea
libzkp/impl/target
libzkp/interface/*.a
libzkp

View File

@@ -4,5 +4,4 @@ test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
GOBIN=$(PWD)/build/bin go run ../build/lint.go

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler

View File

@@ -15,7 +15,7 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/go-ethereum v1.10.14-0.20250625112225-a67863c65587
github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
@@ -184,7 +184,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect

View File

@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6 h1:EiLZgoRfGgF0QzZ1jGSlo60A6W7g9L/8XLWMhwwtKJ0=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250625112225-a67863c65587 h1:wG1+gb+K4iLtxAHhiAreMdIjP5x9hB64duraN2+u1QU=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250625112225-a67863c65587/go.mod h1:YyfB2AyAtphlbIuDQgaxc2b9mo0zE4EBA1+qtXvzlmg=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=

File diff suppressed because it is too large Load Diff

View File

@@ -1,34 +0,0 @@
[package]
name = "zkp"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[patch.crates-io]
# patched add rkyv support & MSRV 1.77
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
anyhow = "1.0.86"
[profile.test]
opt-level = 3
[profile.release]
opt-level = 3

View File

@@ -1,11 +0,0 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release
fmt:
@cargo fmt --all -- --check
clippy:
@cargo check --all-features
@cargo clippy --release -- -D warnings

View File

@@ -1 +0,0 @@
nightly-2024-12-06

View File

@@ -1,76 +0,0 @@
mod utils;
mod verifier;
use std::path::Path;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
_dump_vk(fork_name, file);
}
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Ok(verifier) = verifier {
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
}
}

View File

@@ -1,27 +0,0 @@
use std::{
ffi::CStr,
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
};
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
}
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_bytes().to_vec()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -1,88 +0,0 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
Chunk,
Batch,
Bundle,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VKDump {
pub chunk_vk: String,
pub batch_vk: String,
pub bundle_vk: String,
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
fn dump_vk(&self, file: &Path);
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub fork_name: String,
pub params_path: String,
pub assets_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub low_version_circuit: CircuitConfig,
pub high_version_circuit: CircuitConfig,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
}

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidV2Verifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV2,
}
impl EuclidV2Verifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidV2Verifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,12 +0,0 @@
// BatchVerifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
void init(char* config);
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
void dump_vk(char* fork_name, char* file);

4
common/testdata/blobdata.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
}
}
// BlobUploadStatus represents the status of a blob upload
type BlobUploadStatus int
const (
// BlobUploadStatusUndefined indicates an undefined status
BlobUploadStatusUndefined BlobUploadStatus = iota
// BlobUploadStatusPending indicates a pending upload status
BlobUploadStatusPending
// BlobUploadStatusUploaded indicates a successful upload status
BlobUploadStatusUploaded
// BlobUploadStatusFailed indicates a failed upload status
BlobUploadStatusFailed
)
func (s BlobUploadStatus) String() string {
switch s {
case BlobUploadStatusPending:
return "BlobUploadStatusPending"
case BlobUploadStatusUploaded:
return "BlobUploadStatusUploaded"
case BlobUploadStatusFailed:
return "BlobUploadStatusFailed"
default:
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
}
}
// BlobStoragePlatform represents the platform a blob upload to
type BlobStoragePlatform int
const (
// BlobStoragePlatformUndefined indicates an undefined platform
BlobStoragePlatformUndefined BlobStoragePlatform = iota
// BlobStoragePlatformS3 represents AWS S3
BlobStoragePlatformS3
// BlobStoragePlatformArweave represents storage blockchain Arweave
BlobStoragePlatformArweave
)
func (s BlobStoragePlatform) String() string {
switch s {
case BlobStoragePlatformS3:
return "BlobStoragePlatformS3"
case BlobStoragePlatformArweave:
return "BlobStoragePlatformArweave"
default:
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
}
}

View File

@@ -10,13 +10,6 @@ import (
"github.com/scroll-tech/go-ethereum/common/hexutil"
)
const (
EuclidFork = "euclid"
EuclidV2Fork = "euclidV2"
EuclidV2ForkNameForProver = "euclidv2"
)
// ProofType represents the type of task.
type ProofType uint8
@@ -46,7 +39,7 @@ const (
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
@@ -97,7 +90,7 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
@@ -110,7 +103,7 @@ type BatchTaskDetail struct {
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`

23
common/utils/blob.go Normal file
View File

@@ -0,0 +1,23 @@
package utils
import (
"crypto/sha256"
"fmt"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
// calculate kzg4844 commitment from blob
commit, err := kzg4844.BlobToCommitment(&blob)
if err != nil {
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
}
// calculate kzg4844 versioned blob hash from blob commitment
hasher := sha256.New()
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
return vh, nil
}

51
common/utils/blob_test.go Normal file
View File

@@ -0,0 +1,51 @@
package utils
import (
"encoding/hex"
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)
type BlobData struct {
VersionedBlobHash string `json:"versionedBlobHash"`
BlobData string `json:"blobData"`
}
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
func TestCalculateVersionedBlobHash(t *testing.T) {
// Read the test data
data, err := os.ReadFile("../testdata/blobdata.json")
if err != nil {
t.Fatalf("Failed to read blobdata.json: %v", err)
}
var blobData BlobData
if err := json.Unmarshal(data, &blobData); err != nil {
t.Fatalf("Failed to parse blobdata.json: %v", err)
}
blobBytes, err := hex.DecodeString(blobData.BlobData)
if err != nil {
t.Fatalf("Failed to decode blob data: %v", err)
}
// Convert []byte to kzg4844.Blob
var blob kzg4844.Blob
copy(blob[:], blobBytes)
// Calculate the hash
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
if err != nil {
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
}
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
if calculatedHash != blobData.VersionedBlobHash {
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.10"
var tag = "v4.5.42"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -23,7 +23,7 @@ var commit = func() string {
return "000000"
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// ZkVersion is commit-id of cargo.lock/zkvm-prover and openvm, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"

View File

@@ -1,3 +1,4 @@
/build/bin
.idea
internal/logic/verifier/lib
internal/libzkp/lib/libzkp.so

View File

@@ -1,26 +1,31 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKVM_VERSION}-${OPENVM_VERSION}
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
$(LIBZKP_PATH):
$(MAKE) -C ./internal/logic/libzkp build
coordinator_api: libzkp ## Builds the Coordinator api instance.
clean_libzkp:
$(MAKE) -C ./internal/logic/libzkp clean
libzkp: clean_libzkp $(LIBZKP_PATH)
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
coordinator_cron:
@@ -29,8 +34,8 @@ coordinator_cron:
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
#coordinator_api_skip_libzkp:
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
mock_coordinator_api: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
@@ -38,15 +43,15 @@ mock_coordinator_api: ## Builds the mocked Coordinator instance.
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
test-verifier: libzkp
test-verifier: $(LIBZKP_PATH)
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
test-gpu-verifier: libzkp
test-gpu-verifier: $(LIBZKP_PATH)
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd ../ && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -90,12 +90,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
AssetsPath: "",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
MinProverVersion: "v4.4.89",
Verifiers: []coordinatorConfig.AssetConfig{{
AssetsPath: "",
ForkName: "feynman",
},
},
}},
BatchCollectionTimeSec: 60,
ChunkCollectionTimeSec: 60,
SessionAttempts: 10,

View File

@@ -19,6 +19,7 @@ import (
)
var app *cli.App
var cfg *config.Config
func init() {
// Set up coordinator app info.
@@ -29,16 +30,29 @@ func init() {
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
if err := utils.LogSetup(ctx); err != nil {
return err
}
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
var err error
cfg, err = config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
return nil
}
// sub commands
app.Commands = []*cli.Command{
{
Name: "verify",
Usage: "verify an proof, specified by [forkname] <type> <proof path>",
Action: verify,
},
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)

View File

@@ -0,0 +1,109 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
)
func verify(cCtx *cli.Context) error {
var forkName, proofType, proofPath string
if cCtx.Args().Len() <= 2 {
forkName = cfg.ProverManager.Verifier.Verifiers[0].ForkName
proofType = cCtx.Args().First()
proofPath = cCtx.Args().Get(1)
} else {
forkName = cCtx.Args().First()
proofType = cCtx.Args().Get(1)
proofPath = cCtx.Args().Get(2)
}
log.Info("verify proof", "in", proofPath, "type", proofType, "forkName", forkName)
// Load the content of the proof file
data, err := os.ReadFile(filepath.Clean(proofPath))
if err != nil {
return fmt.Errorf("error reading file: %w", err)
}
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
return err
}
var ret bool
switch strings.ToLower(proofType) {
case "chunk":
proof := &message.OpenVMChunkProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.ChunkVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
if len(proof.Vk) != 0 {
if !bytes.Equal(proof.Vk, vk) {
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
base64.StdEncoding.EncodeToString(vk),
base64.StdEncoding.EncodeToString(proof.Vk),
)
}
} else {
proof.Vk = vk
}
ret, err = vf.VerifyChunkProof(proof, forkName)
case "batch":
proof := &message.OpenVMBatchProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.BatchVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
if len(proof.Vk) != 0 {
if !bytes.Equal(proof.Vk, vk) {
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
base64.StdEncoding.EncodeToString(vk),
base64.StdEncoding.EncodeToString(proof.Vk),
)
}
} else {
proof.Vk = vk
}
ret, err = vf.VerifyBatchProof(proof, forkName)
case "bundle":
proof := &message.OpenVMBundleProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.BundleVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
proof.Vk = vk
ret, err = vf.VerifyBundleProof(proof, forkName)
default:
return fmt.Errorf("unsupport proof type %s", proofType)
}
if err != nil {
return err
}
log.Info("verified:", "ret", ret)
return nil
}

View File

@@ -7,11 +7,17 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"high_version_circuit": {
"assets_path": "assets",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
"min_prover_version": "v4.4.45",
"verifiers": [
{
"assets_path": "assets",
"fork_name": "euclidV2"
},
{
"assets_path": "assets",
"fork_name": "feynman"
}
]
}
},
"db": {
@@ -21,7 +27,10 @@
"maxIdleNum": 20
},
"l2": {
"chain_id": 111
"chain_id": 111,
"l2geth": {
"endpoint": "not need to specified for mocking"
}
},
"auth": {
"secret": "prover secret key",

View File

@@ -9,8 +9,8 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7
@@ -46,6 +46,7 @@ require (
)
require (
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
@@ -55,28 +56,57 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/huin/goupnp v1.0.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,12 +1,18 @@
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
@@ -24,6 +30,9 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
@@ -45,16 +54,32 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
@@ -73,19 +98,31 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -93,13 +130,24 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
@@ -115,6 +163,7 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -129,14 +178,22 @@ github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
@@ -145,51 +202,76 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6 h1:EiLZgoRfGgF0QzZ1jGSlo60A6W7g9L/8XLWMhwwtKJ0=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -216,6 +298,8 @@ github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPD
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
@@ -227,11 +311,16 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@@ -240,7 +329,10 @@ golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -250,13 +342,25 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -269,36 +373,56 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -28,10 +28,16 @@ type ProverManager struct {
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
}
// l2geth client configuration items
type L2Endpoint struct {
Url string `json:"endpoint"`
}
// L2 loads l2geth configuration items.
type L2 struct {
// l2geth chain_id.
ChainID uint64 `json:"chain_id"`
ChainID uint64 `json:"chain_id"`
Endpoint *L2Endpoint `json:"l2geth"`
}
// Auth provides the auth coordinator
@@ -49,16 +55,17 @@ type Config struct {
Auth *Auth `json:"auth"`
}
// CircuitConfig circuit items.
type CircuitConfig struct {
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
type AssetConfig struct {
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
Vkfile string `json:"vk_file,omitempty"`
}
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
MinProverVersion string `json:"min_prover_version"`
Verifiers []AssetConfig `json:"verifiers"`
}
// NewConfig returns a new instance of Config.

View File

@@ -20,11 +20,11 @@ func TestConfig(t *testing.T) {
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"high_version_circuit": {
"min_prover_version": "v4.4.45",
"verifiers": [{
"assets_path": "assets",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
"fork_name": "feynman"
}]
},
"max_verifier_workers": 4
},

View File

@@ -1,12 +1,15 @@
package api
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/verifier"
)
@@ -28,7 +31,18 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
// TODO: enable this when the libzkp has been updated
l2cfg := cfg.L2.Endpoint
if l2cfg == nil {
panic("l2geth is not specified")
}
l2cfgBytes, err := json.Marshal(l2cfg)
if err != nil {
panic(err)
}
libzkp.InitL2geth(string(l2cfgBytes))
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
}

View File

@@ -17,6 +17,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -25,13 +26,15 @@ type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
l2syncer *l2Syncer
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, verifier *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, verifier.ChunkVk, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, verifier.BatchVk, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, verifier.BundleVk, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -44,6 +47,13 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
if syncer, err := createL2Syncer(cfg); err != nil {
log.Crit("can not init l2 syncer", "err", err)
} else {
ptc.l2syncer = syncer
}
return ptc
}
@@ -78,6 +88,17 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return
}
if getTaskParameter.ProverHeight == 0 {
// help update the prover height with internal l2geth
if blk, err := ptc.l2syncer.getLatestBlockNumber(ctx); err == nil {
getTaskParameter.ProverHeight = blk
} else {
nerr := fmt.Errorf("inner l2geth failure, err:%w", err)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
}
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {

View File

@@ -0,0 +1,71 @@
//go:build !mock_verifier
package api
import (
"errors"
"fmt"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
)
type l2Syncer struct {
l2gethClient *ethclient.Client
lastBlockNumber struct {
sync.RWMutex
data uint64
t time.Time
}
}
func createL2Syncer(cfg *config.Config) (*l2Syncer, error) {
if cfg.L2 == nil || cfg.L2.Endpoint == nil {
return nil, fmt.Errorf("l2 endpoint is not set in config")
} else {
l2gethClient, err := ethclient.Dial(cfg.L2.Endpoint.Url)
if err != nil {
return nil, fmt.Errorf("dial l2geth endpoint fail, err: %s", err)
}
return &l2Syncer{
l2gethClient: l2gethClient,
}, nil
}
}
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
func (syncer *l2Syncer) getLatestBlockNumber(ctx *gin.Context) (uint64, error) {
// First check if we have a cached value that's still valid
syncer.lastBlockNumber.RLock()
if !syncer.lastBlockNumber.t.IsZero() && time.Since(syncer.lastBlockNumber.t) < time.Second*10 {
blockNumber := syncer.lastBlockNumber.data
syncer.lastBlockNumber.RUnlock()
return blockNumber, nil
}
syncer.lastBlockNumber.RUnlock()
// If not cached or expired, fetch from the client
if syncer.l2gethClient == nil {
return 0, errors.New("L2 geth client not initialized")
}
blockNumber, err := syncer.l2gethClient.BlockNumber(ctx)
if err != nil {
return 0, fmt.Errorf("failed to get latest block number: %w", err)
}
// Update the cache
syncer.lastBlockNumber.Lock()
syncer.lastBlockNumber.data = blockNumber
syncer.lastBlockNumber.t = time.Now()
syncer.lastBlockNumber.Unlock()
log.Debug("updated block height reference", "height", blockNumber)
return blockNumber, nil
}

View File

@@ -0,0 +1,20 @@
//go:build mock_verifier
package api
import (
"scroll-tech/coordinator/internal/config"
"github.com/gin-gonic/gin"
)
type l2Syncer struct{}
func createL2Syncer(_ *config.Config) (*l2Syncer, error) {
return &l2Syncer{}, nil
}
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
func (syncer *l2Syncer) getLatestBlockNumber(_ *gin.Context) (uint64, error) {
return 99999994, nil
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
@@ -32,10 +31,11 @@ type LoginLogic struct {
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
var hardForks []string
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
hardForks = append(hardForks, cfg.ForkName)
}
proverVersionHardForkMap[cfg.ProverManager.Verifier.MinProverVersion] = hardForks
return &LoginLogic{
cfg: cfg,
@@ -58,8 +58,8 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
}
vks := make(map[string]struct{})

View File

@@ -0,0 +1,17 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release -p libzkp-c
@mkdir -p lib
@cp -f ../../../../target/release/libzkp.so lib/
fmt:
@cargo fmt --all -- --check
clean:
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
@rm -f lib/libzkp.so
clippy:
@cargo check --release --all-features
@cargo clippy --release -- -D warnings

View File

@@ -20,7 +20,7 @@ function build_test_bins() {
cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
cd $REPO/coordinator/internal/logic/libzkp
}
build_test_bins

View File

@@ -0,0 +1,142 @@
package libzkp
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import (
"fmt"
"os"
"strings"
"unsafe"
"scroll-tech/common/types/message"
)
func init() {
C.init_tracing()
}
// Helper function to convert Go string to C string and handle cleanup
func goToCString(s string) *C.char {
return C.CString(s)
}
// Helper function to free C string
func freeCString(s *C.char) {
C.free(unsafe.Pointer(s))
}
// Initialize the verifier
func InitVerifier(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_verifier(cConfig)
}
// Verify a chunk proof
func VerifyChunkProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_chunk_proof(cProof, cForkName)
return result != 0
}
// Verify a batch proof
func VerifyBatchProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_batch_proof(cProof, cForkName)
return result != 0
}
// Verify a bundle proof
func VerifyBundleProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_bundle_proof(cProof, cForkName)
return result != 0
}
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk)
}
// Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON)
cMetadata := goToCString(metadata)
defer freeCString(cProofJSON)
defer freeCString(cMetadata)
// Create a C array from Go slice
var cVkData *C.char
if len(vkData) > 0 {
cVkData = (*C.char)(unsafe.Pointer(&vkData[0]))
}
resultPtr := C.gen_wrapped_proof(cProofJSON, cMetadata, cVkData, C.size_t(len(vkData)))
if resultPtr == nil {
return ""
}
// Convert result to Go string and free C memory
result := C.GoString(resultPtr)
C.release_string(resultPtr)
return result
}
// Dumps a verification key to a file
func DumpVk(forkName, filePath string) error {
cForkName := goToCString(strings.ToLower(forkName))
cFilePath := goToCString(filePath)
defer freeCString(cForkName)
defer freeCString(cFilePath)
// Call the C function to dump the verification key
C.dump_vk(cForkName, cFilePath)
// Check if the file was created successfully
// Note: The C function doesn't return an error code, so we check if the file exists
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return fmt.Errorf("failed to dump verification key: file %s was not created", filePath)
}
return nil
}

View File

@@ -0,0 +1,57 @@
// Verifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
// - Verify a chunk proof
#ifndef LIBZKP_H
#define LIBZKP_H
#include <stddef.h> // For size_t
// Init log tracing
void init_tracing();
// Initialize the verifier with configuration
void init_verifier(char* config);
// Initialize the l2geth with configuration
void init_l2geth(char* config);
// Verify proofs - returns non-zero for success, zero for failure
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
// Dump verification key to file
void dump_vk(char* fork_name, char* file);
// The result struct to hold data from handling a proving task
typedef struct {
char ok;
char* universal_task;
char* metadata;
char expected_pi_hash[32];
} HandlingResult;
// Generate a universal task based on task type and input JSON
// Returns a struct containing task data, metadata, and expected proof hash
HandlingResult gen_universal_task(
int task_type,
char* task,
char* fork_name,
const unsigned char* expected_vk,
size_t expected_vk_len
);
// Release memory allocated for a HandlingResult returned by gen_universal_task
void release_task_result(HandlingResult result);
// Generate a wrapped proof from the universal prover output and metadata
// Returns a JSON string containing the wrapped proof, or NULL on error
// The caller must call release_string on the returned pointer when done
char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_len);
// Release memory allocated for a string returned by gen_wrapped_proof
void release_string(char* string_ptr);
#endif /* LIBZKP_H */

View File

@@ -0,0 +1,45 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
"fmt"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
)
func InitL2geth(configJSON string) {
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
var metadata interface{}
switch taskType {
case TaskTypeChunk:
metadata = struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}
case TaskTypeBatch:
metadata = struct {
BatchInfo *message.OpenVMBatchInfo `json:"batch_info"`
BatchHash common.Hash `json:"batch_hash"`
}{BatchInfo: &message.OpenVMBatchInfo{}}
case TaskTypeBundle:
metadata = struct {
BundleInfo *message.OpenVMBundleInfo `json:"bundle_info"`
BundlePIHash common.Hash `json:"bundle_pi_hash"`
}{BundleInfo: &message.OpenVMBundleInfo{}}
}
encodeData, err := json.Marshal(metadata)
if err != nil {
fmt.Println("mock encoding json fail:", err)
return false, "", "", nil
}
return true, "UniversalTask data is not parsed", string(encodeData), []byte{0}
}

View File

@@ -0,0 +1,51 @@
//go:build !mock_verifier
package libzkp
/*
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import "unsafe"
// Initialize the handler for universal task
func InitL2geth(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_l2geth(cConfig)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)
defer freeCString(cTask)
defer freeCString(cForkName)
// Create a C array from Go slice
var cVk *C.uchar
if len(expectedVk) > 0 {
cVk = (*C.uchar)(unsafe.Pointer(&expectedVk[0]))
}
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)))
defer C.release_task_result(result)
// Check if the operation was successful
if result.ok == 0 {
return false, "", "", nil
}
// Convert C strings to Go strings
universalTask := C.GoString(result.universal_task)
metadata := C.GoString(result.metadata)
// Convert C array to Go slice
piHash := make([]byte, 32)
for i := 0; i < 32; i++ {
piHash[i] = byte(result.expected_pi_hash[i])
}
return true, universalTask, metadata, piHash
}

View File

@@ -36,12 +36,13 @@ type BatchProverTask struct {
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chainCfg: chainCfg,
expectedVk: expectedVk,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -83,10 +84,37 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
// if the assigned batch dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
@@ -114,29 +142,32 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
batchTask = tmpBatchTask
@@ -149,31 +180,51 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, batchTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch", "err", err)
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
// Store session info.
if taskCtx.hasAssignedTask == nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
@@ -233,7 +284,6 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
@@ -255,18 +305,12 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
ForkName: hardForkName,
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8:
default:
return taskDetail, nil
}

View File

@@ -33,12 +33,13 @@ type BundleProverTask struct {
}
// NewBundleProverTask new a bundle collector
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BundleProverTask {
bp := &BundleProverTask{
BaseProverTask: BaseProverTask{
db: db,
chainCfg: chainCfg,
cfg: cfg,
expectedVk: expectedVk,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -81,10 +82,37 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBundleTask *orm.Bundle
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
// if the assigned chunk dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBundleTask == nil {
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
@@ -112,31 +140,33 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
bundleTask = tmpBundleTask
break
}
@@ -147,31 +177,52 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
}
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle", "err", err)
return nil, ErrCoordinatorInternalFailure
}
// bundle proof require snark
taskMsg.UseSnark = true
proverTask.Metadata = metadata
}
// Store session info.
if taskCtx.hasAssignedTask == nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
@@ -195,9 +246,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
var prevStateRoot common.Hash
// this would be common in test cases: the first batch has empty parent
if batches[0].Index > 1 {
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
var batchProofs []*message.OpenVMBatchProof
@@ -211,18 +267,12 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
ForkName: hardForkName,
}
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
PrevStateRoot: prevStateRoot,
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),
@@ -237,7 +287,6 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),

View File

@@ -33,12 +33,13 @@ type ChunkProverTask struct {
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chainCfg: chainCfg,
expectedVk: expectedVk,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
@@ -79,12 +80,39 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get chunk has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
// if the assigned chunk dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
@@ -110,31 +138,33 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
chunkTask = tmpChunkTask
break
}
@@ -145,31 +175,51 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
taskMsg, err := cp.formatProverTask(ctx.Copy(), proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = cp.applyUniversal(taskMsg)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk", "err", err)
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
if taskCtx.hasAssignedTask == nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
@@ -184,20 +234,14 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
}
var taskDetailBytes []byte
taskDetail := message.ChunkTaskDetail{
BlockHashes: blockHashes,
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
ForkName: hardForkName,
}
var err error
@@ -207,7 +251,6 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(taskDetailBytes),

View File

@@ -16,6 +16,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -37,9 +38,10 @@ type ProverTask interface {
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
expectedVk map[string][]byte
batchOrm *orm.Batch
chunkOrm *orm.Chunk
@@ -56,10 +58,11 @@ type proverTaskContext struct {
ProverProviderType uint8
HardForkNames map[string]struct{}
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
hasAssignedTask *orm.ProverTask
}
// hardForkName get the chunk/batch/bundle hard fork name
@@ -174,17 +177,30 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
assigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
}
if isAssigned {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
ptc.hasAssignedTask = assigned
return &ptc, nil
}
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
expectedVk, ok := b.expectedVk[schema.HardForkName]
if !ok {
return nil, nil, fmt.Errorf("no expectedVk found from hardfork %s", schema.HardForkName)
}
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName, expectedVk)
if !ok {
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
}
schema.TaskData = uTaskData
return schema, []byte(metadata), nil
}
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{

View File

@@ -19,6 +19,8 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
@@ -69,6 +71,10 @@ type ProofReceiverLogic struct {
validateFailureProverTaskStatusNotOk prometheus.Counter
validateFailureProverTaskTimeout prometheus.Counter
validateFailureProverTaskHaveVerifier prometheus.Counter
ChunkTask provertask.ProverTask
BundleTask provertask.ProverTask
BatchTask provertask.ProverTask
}
// NewSubmitProofReceiverLogic create a proof receiver logic
@@ -168,6 +174,28 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
if getHardForkErr != nil {
return ErrGetHardForkNameFailed
}
if proofParameter.Universal {
if len(proverTask.Metadata) == 0 {
return errors.New("can not re-wrapping proof: no metadata has been recorded in advance")
}
var expected_vk []byte
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
expected_vk = m.verifier.ChunkVk[hardForkName]
case message.ProofTypeBatch:
expected_vk = m.verifier.BatchVk[hardForkName]
case message.ProofTypeBundle:
expected_vk = m.verifier.BundleVk[hardForkName]
}
if len(expected_vk) == 0 {
return errors.New("no vk specified match current hard fork, check your config")
}
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), expected_vk)
if proofParameter.Proof == "" {
return errors.New("can not re-wrapping proof, see coordinator log for reason")
}
}
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:

View File

@@ -10,7 +10,13 @@ import (
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
return &Verifier{
cfg: cfg,
OpenVMVkMap: map[string]struct{}{"mock_vk": {}},
ChunkVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
BatchVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
BundleVk: map[string][]byte{},
}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.

View File

@@ -11,4 +11,7 @@ const InvalidTestProof = "this is a invalid proof"
type Verifier struct {
cfg *config.VerifierConfig
OpenVMVkMap map[string]struct{}
ChunkVk map[string][]byte
BatchVk map[string][]byte
BundleVk map[string][]byte
}

View File

@@ -2,30 +2,25 @@
package verifier
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/
import "C" //nolint:typecheck
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"path"
"unsafe"
"path/filepath"
"strings"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
)
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `CircuitConfig` in libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
@@ -33,24 +28,28 @@ type rustCircuitConfig struct {
AssetsPath string `json:"assets_path"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
func newRustCircuitConfig(cfg config.AssetConfig) *rustCircuitConfig {
return &rustCircuitConfig{
ForkName: cfg.ForkName,
AssetsPath: cfg.AssetsPath,
}
}
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
Circuits []*rustCircuitConfig `json:"circuits"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
out := &rustVerifierConfig{}
for _, cfg := range cfg.Verifiers {
out.Circuits = append(out.Circuits, newRustCircuitConfig(cfg))
}
return out
}
type rustVkDump struct {
@@ -67,24 +66,20 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return nil, err
}
configStr := C.CString(string(configBytes))
defer func() {
C.free(unsafe.Pointer(configStr))
}()
C.init(configStr)
libzkp.InitVerifier(string(configBytes))
v := &Verifier{
cfg: cfg,
OpenVMVkMap: make(map[string]struct{}),
ChunkVk: make(map[string][]byte),
BatchVk: make(map[string][]byte),
BundleVk: make(map[string][]byte),
}
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
return nil, err
}
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
return nil, err
for _, cfg := range cfg.Verifiers {
if err := v.loadOpenVMVks(cfg); err != nil {
return nil, err
}
}
return v, nil
@@ -98,15 +93,7 @@ func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName st
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBatchProof(string(buf), forkName), nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
@@ -117,15 +104,8 @@ func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName st
}
log.Info("Start to verify chunk proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_chunk_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyChunkProof(string(buf), forkName), nil
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
@@ -135,46 +115,29 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
return false, err
}
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify bundle proof ...")
verified := C.verify_bundle_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
/*
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
*/
const blocked_vks = `
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
`
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
vkFileName := cfg.Vkfile
if vkFileName == "" {
vkFileName = "openVmVk.json"
}
byt, err := io.ReadAll(f)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(byt), nil
}
vkFile := path.Join(cfg.AssetsPath, vkFileName)
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {
if err := os.Remove(tempFile); err != nil {
log.Error("failed to remove temp file", "err", err)
}
}()
forkNameCStr := C.CString(forkName)
defer C.free(unsafe.Pointer(forkNameCStr))
tempFileCStr := C.CString(tempFile)
defer C.free(unsafe.Pointer(tempFileCStr))
C.dump_vk(forkNameCStr, tempFileCStr)
f, err := os.Open(tempFile)
f, err := os.Open(filepath.Clean(vkFile))
if err != nil {
return err
}
@@ -187,8 +150,36 @@ func (v *Verifier) loadOpenVMVks(forkName string) error {
if err := json.Unmarshal(byt, &dump); err != nil {
return err
}
if strings.Contains(blocked_vks, dump.Chunk) {
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
}
if strings.Contains(blocked_vks, dump.Batch) {
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
}
if strings.Contains(blocked_vks, dump.Bundle) {
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
}
v.OpenVMVkMap[dump.Chunk] = struct{}{}
v.OpenVMVkMap[dump.Batch] = struct{}{}
v.OpenVMVkMap[dump.Bundle] = struct{}{}
log.Info("Load vks", "from", cfg.AssetsPath, "chunk", dump.Chunk, "batch", dump.Batch, "bundle", dump.Bundle)
decodedBytes, err := base64.StdEncoding.DecodeString(dump.Chunk)
if err != nil {
return err
}
v.ChunkVk[cfg.ForkName] = decodedBytes
decodedBytes, err = base64.StdEncoding.DecodeString(dump.Batch)
if err != nil {
return err
}
v.BatchVk[cfg.ForkName] = decodedBytes
decodedBytes, err = base64.StdEncoding.DecodeString(dump.Bundle)
if err != nil {
return err
}
v.BundleVk[cfg.ForkName] = decodedBytes
return nil
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
@@ -29,11 +29,11 @@ func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
HighVersionCircuit: &config.CircuitConfig{
AssetsPath: *assetsPathHi,
ForkName: "euclidV2",
MinProverVersion: "",
},
MinProverVersion: "",
Verifiers: []config.AssetConfig{{
AssetsPath: *assetsPathHi,
ForkName: "euclidV2",
}},
}
v, err := NewVerifier(cfg)
@@ -58,25 +58,25 @@ func TestFFI(t *testing.T) {
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *message.OpenVMBatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.OpenVMBatchProof{}
proof := &message.OpenVMBatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *message.OpenVMChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.OpenVMChunkProof{}
proof := &message.OpenVMChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -37,6 +37,7 @@ type ProverTask struct {
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
Metadata []byte `json:"metadata" gorm:"column:metadata;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
// metadata
@@ -56,17 +57,17 @@ func (*ProverTask) TableName() string {
}
// IsProverAssigned checks if a prover with the given public key has been assigned a task.
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bool, error) {
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
var task ProverTask
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil
return nil, nil
}
return false, err
return nil, err
}
return true, nil
return &task, nil
}
// GetProverTasks get prover tasks
@@ -268,6 +269,24 @@ func (o *ProverTask) UpdateProverTaskProvingStatusAndFailureType(ctx context.Con
return nil
}
// UpdateProverTaskAssignedTime updates the assigned_at time of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskAssignedTime(ctx context.Context, uuid uuid.UUID, t time.Time, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("uuid = ?", uuid)
updates := make(map[string]interface{})
updates["assigned_at"] = t
if err := db.Updates(updates).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskAssignedTime error: %w, uuid:%s, status: %v", err, uuid, t)
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, uuid uuid.UUID, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -4,6 +4,8 @@ package types
type GetTaskParameter struct {
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskTypes []int `form:"task_types" json:"task_types"`
TaskID string `form:"task_id,omitempty" json:"task_id,omitempty"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
}
// GetTaskSchema the schema data return to prover for get prover task
@@ -11,6 +13,7 @@ type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
UseSnark bool `json:"use_snark,omitempty"`
TaskData string `json:"task_data"`
HardForkName string `json:"hard_fork_name"`
}

View File

@@ -2,6 +2,7 @@ package types
import (
"fmt"
"scroll-tech/common/types/message"
)

View File

@@ -7,6 +7,7 @@ type SubmitProofParameter struct {
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
}

View File

@@ -79,16 +79,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
tokenTimeout = 60
conf = &config.Config{
L2: &config.L2{
ChainID: 111,
ChainID: 111,
Endpoint: &config.L2Endpoint{},
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
HighVersionCircuit: &config.CircuitConfig{
AssetsPath: "",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
},
MinProverVersion: "v4.4.89",
Verifiers: []config.AssetConfig{{
AssetsPath: "",
ForkName: "euclidV2",
}},
},
BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10,

View File

@@ -161,7 +161,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}}).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -191,7 +191,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -207,32 +207,33 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
var proof []byte
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBatch:
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
if proofStatus == verifiedFailed {
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
if proofStatus != verifiedFailed {
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
} else {
// in "verifiedFailed" status, we purpose the mockprover submit proof but not valid
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
@@ -240,11 +241,12 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
submitProof := types.SubmitProofParameter{
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
Universal: true,
}
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})

View File

@@ -0,0 +1,45 @@
[patch."https://github.com/openvm-org/openvm.git"]
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.1" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
.PHONY: build update clean
ZKVM_COMMIT ?= freebuild
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
GIT_REV ?= $(shell git rev-parse --short HEAD)
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
$(info ZK_GPU_VERSION is ${ZK_VERSION})
clean:
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# build gpu prover, never touch lock file
build:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# update Cargo.lock while override config has been updated
#update:
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock

View File

@@ -1,6 +1,6 @@
#!/bin/bash
config_file=~/.cargo/config.toml
config_file=.cargo/config.toml
plonky3_gpu_path=$(grep 'path.*plonky3-gpu' "$config_file" | cut -d'"' -f2 | head -n 1)
plonky3_gpu_path=$(dirname "$plonky3_gpu_path")

26
crates/l2geth/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "l2geth"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = {version = "1", features = ["rt-multi-thread"]}
async-trait = "0.1"
url = ">=2.5.3"
libzkp = { path = "../libzkp" }
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
sbv-primitives = { workspace = true, features = ["scroll"] }
sbv-utils = { workspace = true, features = ["scroll"] }
eyre.workspace = true
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true

19
crates/l2geth/src/lib.rs Normal file
View File

@@ -0,0 +1,19 @@
pub mod rpc_client;
pub use rpc_client::RpcConfig;
use std::sync::{Arc, OnceLock};
static GLOBAL_L2GETH_CLI: OnceLock<Arc<rpc_client::RpcClientCore>> = OnceLock::new();
pub fn init(config: &str) -> eyre::Result<()> {
let cfg: RpcConfig = serde_json::from_str(config)?;
GLOBAL_L2GETH_CLI.get_or_init(|| Arc::new(rpc_client::RpcClientCore::create(&cfg).unwrap()));
Ok(())
}
pub fn get_client() -> rpc_client::RpcClient<'static> {
GLOBAL_L2GETH_CLI
.get()
.expect("must has been inited")
.get_client()
}

View File

@@ -0,0 +1,245 @@
use alloy::{
providers::{Provider, ProviderBuilder, RootProvider},
rpc::client::ClientBuilder,
transports::layers::RetryBackoffLayer,
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::Network;
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {
10
}
fn default_backoff() -> u64 {
100
}
fn default_cups() -> u64 {
100
}
fn default_workers() -> usize {
4
}
fn default_max_concurrency() -> usize {
10
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcConfig {
#[serde(alias = "endpoint")]
pub rpc_url: String,
// The threads used in rt, default 4
#[serde(default = "default_workers")]
pub workers: usize,
// The blocking threads to handle rpc tasks, default 10
#[serde(default = "default_max_concurrency")]
pub max_concurrency: usize,
// Retry parameters
#[serde(default = "default_max_retry")]
pub max_retry: u32,
// backoff duration in milliseconds, default 100ms
#[serde(default = "default_backoff")]
pub backoff: u64,
// compute units per second: default 100
#[serde(default = "default_cups")]
pub cups: u64,
}
/// An rpc client prover which carrying async runtime,
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
pub struct RpcClientCore {
/// rpc prover
provider: RootProvider<Network>,
rt: tokio::runtime::Runtime,
}
#[derive(Clone, Copy)]
pub struct RpcClient<'a> {
provider: &'a RootProvider<Network>,
handle: &'a tokio::runtime::Handle,
}
impl RpcClientCore {
pub fn create(config: &RpcConfig) -> Result<Self> {
let rpc = url::Url::parse(&config.rpc_url)?;
tracing::info!("Using RPC: {}", rpc);
// note we MUST use multi rt since we have no a main thread for driving
// for each call in our method we can acquire a handle of the rt to resolve one or more
// async tasks
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(config.workers)
.max_blocking_threads(config.max_concurrency)
.enable_all()
.build()?;
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
Ok(Self {
provider: ProviderBuilder::<_, _, Network>::default().connect_client(client),
rt,
})
}
pub fn get_client(&self) -> RpcClient {
RpcClient {
provider: &self.provider,
handle: self.rt.handle(),
}
}
}
impl ChunkInterpreter for RpcClient<'_> {
fn try_fetch_block_witness(
&self,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
async fn fetch_witness_async(
provider: &RootProvider<Network>,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
let chain_id = provider.get_chain_id().await?;
let block = provider
.get_block_by_hash(block_hash)
.full()
.await?
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
let number = block.header.number;
let parent_hash = block.header.parent_hash;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let mut witness_builder = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?);
let prev_state_root = match prev_witness {
Some(witness) => {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
}
None => {
let parent_block = provider
.get_block_by_hash(parent_hash)
.await?
.expect("parent block should exist");
parent_block.header.state_root
}
};
witness_builder = witness_builder.prev_state_root(prev_state_root);
Ok(witness_builder.build()?)
}
tracing::debug!("fetch witness for {block_hash}");
self.handle
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
}
fn try_fetch_storage_node(
&self,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
async fn fetch_storage_node_async(
provider: &RootProvider<Network>,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
let ret = provider
.client()
.request::<_, sbv_primitives::Bytes>("debug_dbGet", (node_hash,))
.await?;
Ok(ret)
}
tracing::debug!("fetch storage node for {node_hash}");
self.handle
.block_on(fetch_storage_node_async(self.provider, node_hash))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy::primitives::hex;
use sbv_primitives::B256;
use std::env;
fn create_config_from_env() -> RpcConfig {
let endpoint =
env::var("L2GETH_ENDPOINT").expect("L2GETH_ENDPOINT environment variable must be set");
let config_json = format!(r#"{{"endpoint": "{}"}}"#, endpoint);
serde_json::from_str(&config_json).expect("Failed to parse RPC config")
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_block_witness() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// latest - 1 block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let wit1 = client
.try_fetch_block_witness(block_hash, None)
.expect("should success");
// latest block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
)
.unwrap(),
);
let wit2 = client
.try_fetch_block_witness(block_hash, Some(&wit1))
.expect("should success");
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_storage_node() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// the root node (state root) of the block in unittest above
let node_hash = B256::from(
hex::const_decode_to_array(
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let node = client
.try_fetch_storage_node(node_hash)
.expect("should success");
println!("{}", serde_json::to_string_pretty(&node).unwrap());
}
}

24
crates/libzkp/Cargo.toml Normal file
View File

@@ -0,0 +1,24 @@
[package]
name = "libzkp"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-verifier-euclid.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true
eyre.workspace = true
git-version = "0.3.5"
serde_stacker = "0.1"
regex = "1.11"
c-kzg = { version = "2.0", features = ["serde"] }

145
crates/libzkp/src/lib.rs Normal file
View File

@@ -0,0 +1,145 @@
pub mod proofs;
pub mod tasks;
pub mod verifier;
pub use verifier::{TaskType, VerifierConfig};
mod utils;
use sbv_primitives::B256;
use scroll_zkvm_types::util::vec_as_base64;
use serde::{Deserialize, Serialize};
use serde_json::value::RawValue;
use std::path::Path;
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
/// Turn the coordinator's chunk task into a json string for formal chunk proving
/// task (with full witnesses)
pub fn checkout_chunk_task(
task_json: &str,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<String> {
let chunk_task = serde_json::from_str::<tasks::ChunkTask>(task_json)?;
let ret = serde_json::to_string(&tasks::ChunkProvingTask::try_from_with_interpret(
chunk_task,
interpreter,
)?)?;
Ok(ret)
}
/// Generate required staff for proving tasks
/// return (pi_hash, metadata, task)
pub fn gen_universal_task(
task_type: i32,
task_json: &str,
fork_name_str: &str,
expected_vk: &[u8],
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
use proofs::*;
use tasks::*;
/// Wrapper for metadata
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(untagged)]
enum AnyMetaData {
Chunk(ChunkProofMetadata),
Batch(BatchProofMetadata),
Bundle(BundleProofMetadata),
}
let (pi_hash, metadata, mut u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
// normailze fork name field in task
task.fork_name = task.fork_name.to_lowercase();
// always respect the fork_name_str (which has been normalized) being passed
// if the fork_name wrapped in task is not match, consider it a malformed task
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
})
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_batch_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Batch(metadata), u_task)
}
x if x == TaskType::Bundle as i32 => {
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_bundle_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
}
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
};
u_task.vk = Vec::from(expected_vk);
Ok((
pi_hash,
serde_json::to_string(&metadata)?,
serde_json::to_string(&u_task)?,
))
}
/// helper to rearrange the proof return by universal prover into corresponding wrapped proof
pub fn gen_wrapped_proof(proof_json: &str, metadata: &str, vk: &[u8]) -> eyre::Result<String> {
#[derive(Serialize)]
struct RearrangeWrappedProofJson<'a> {
#[serde(borrow)]
pub metadata: &'a RawValue,
#[serde(borrow)]
pub proof: &'a RawValue,
#[serde(with = "vec_as_base64", default)]
pub vk: Vec<u8>,
pub git_version: String,
}
let re_arrange = RearrangeWrappedProofJson {
metadata: serde_json::from_str(metadata)?,
proof: serde_json::from_str(proof_json)?,
vk: vk.to_vec(),
git_version: utils::short_git_version(),
};
let ret = serde_json::to_string(&re_arrange)?;
Ok(ret)
}
/// init verifier
pub fn verifier_init(config: &str) -> eyre::Result<()> {
let cfg: VerifierConfig = serde_json::from_str(config)?;
verifier::init(cfg);
Ok(())
}
/// verify proof
pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyre::Result<bool> {
let verifier = verifier::get_verifier(fork_name)?;
let ret = verifier.lock().unwrap().verify(task_type, &proof)?;
Ok(ret)
}
/// dump vk
pub fn dump_vk(fork_name: &str, file: &str) -> eyre::Result<()> {
let verifier = verifier::get_verifier(fork_name)?;
verifier.lock().unwrap().dump_vk(Path::new(file));
Ok(())
}

340
crates/libzkp/src/proofs.rs Normal file
View File

@@ -0,0 +1,340 @@
use std::path::Path;
use crate::utils::short_git_version;
use eyre::Result;
use sbv_primitives::B256;
use scroll_zkvm_types::{
batch::BatchInfo,
bundle::BundleInfo,
chunk::ChunkInfo,
proof::{EvmProof, OpenVmEvmProof, ProofEnum, RootProof},
public_inputs::{ForkName, MultiVersionPublicInputs},
types_agg::{AggregationInput, ProgramCommitment},
util::vec_as_base64,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
/// A wrapper around the actual inner proof.
#[derive(Clone, Serialize, Deserialize)]
pub struct WrappedProof<Metadata> {
/// Generic metadata carried by a proof.
pub metadata: Metadata,
/// The inner proof, either a [`RootProof`] or [`EvmProof`] depending on the
/// [`crate::ProverType`].
pub proof: ProofEnum,
/// Represents the verifying key in serialized form. The purpose of including the verifying key
/// along with the proof is to allow a verifier-only mode to identify the source of proof
/// generation.
///
/// For [`RootProof`] the verifying key is denoted by the digest of the VM's program.
///
/// For [`EvmProof`] its the raw bytes of the halo2 circuit's `VerifyingKey`.
///
/// We encode the vk in base64 format during JSON serialization.
#[serde(with = "vec_as_base64", default)]
pub vk: Vec<u8>,
/// Represents the git ref for `zkvm-prover` that was used to construct the proof.
///
/// This is useful for debugging.
pub git_version: String,
}
pub trait AsRootProof {
fn as_root_proof(&self) -> &RootProof;
}
pub trait AsEvmProof {
fn as_evm_proof(&self) -> &EvmProof;
}
pub trait IntoEvmProof {
fn into_evm_proof(self) -> OpenVmEvmProof;
}
/// Alias for convenience.
pub type ChunkProof = WrappedProof<ChunkProofMetadata>;
/// Alias for convenience.
pub type BatchProof = WrappedProof<BatchProofMetadata>;
/// Alias for convenience.
pub type BundleProof = WrappedProof<BundleProofMetadata>;
impl AsRootProof for ChunkProof {
fn as_root_proof(&self) -> &RootProof {
self.proof
.as_root_proof()
.expect("batch proof use root proof")
}
}
impl AsRootProof for BatchProof {
fn as_root_proof(&self) -> &RootProof {
self.proof
.as_root_proof()
.expect("batch proof use root proof")
}
}
impl AsEvmProof for BundleProof {
fn as_evm_proof(&self) -> &EvmProof {
self.proof
.as_evm_proof()
.expect("bundle proof use evm proof")
}
}
impl IntoEvmProof for BundleProof {
fn into_evm_proof(self) -> OpenVmEvmProof {
self.proof
.as_evm_proof()
.expect("bundle proof use evm proof")
.clone()
.into()
}
}
/// Trait to enable operations in metadata
pub trait ProofMetadata: Serialize + DeserializeOwned + std::fmt::Debug {
type PublicInputs: MultiVersionPublicInputs;
fn pi_hash_info(&self) -> &Self::PublicInputs;
fn new_proof<P: Into<ProofEnum>>(self, proof: P, vk: Option<&[u8]>) -> WrappedProof<Self> {
WrappedProof {
metadata: self,
proof: proof.into(),
vk: vk.map(Vec::from).unwrap_or_default(),
git_version: short_git_version(),
}
}
}
pub trait PersistableProof: Sized {
/// Read and deserialize the proof.
fn from_json<P: AsRef<Path>>(path_proof: P) -> Result<Self>;
/// Serialize the proof and dumping at the given path.
fn dump<P: AsRef<Path>>(&self, path_proof: P) -> Result<()>;
}
/// Metadata attached to [`ChunkProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ChunkProofMetadata {
/// The chunk information describing the list of blocks contained within the chunk.
pub chunk_info: ChunkInfo,
}
impl ProofMetadata for ChunkProofMetadata {
type PublicInputs = ChunkInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.chunk_info
}
}
/// Metadata attached to [`BatchProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BatchProofMetadata {
/// The batch information describing the list of chunks.
pub batch_info: BatchInfo,
/// The [`scroll_zkvm_types::batch::BatchHeader`]'s digest.
pub batch_hash: B256,
}
impl ProofMetadata for BatchProofMetadata {
type PublicInputs = BatchInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.batch_info
}
}
/// Metadata attached to [`BundleProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BundleProofMetadata {
/// The bundle information describing the list of batches to be finalised on-chain.
pub bundle_info: BundleInfo,
/// The public-input digest for the bundle.
pub bundle_pi_hash: B256,
}
impl ProofMetadata for BundleProofMetadata {
type PublicInputs = BundleInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.bundle_info
}
}
impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
fn from(value: &WrappedProof<Metadata>) -> Self {
Self {
public_values: value.proof.public_values(),
commitment: ProgramCommitment::deserialize(&value.vk),
}
}
}
impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn pi_hash_check(&self, fork_name: ForkName) -> bool {
let proof_pi = self.proof.public_values();
let expected_pi = self
.metadata
.pi_hash_info()
.pi_hash_by_fork(fork_name)
.0
.as_ref()
.iter()
.map(|&v| v as u32)
.collect::<Vec<_>>();
let ret = expected_pi == proof_pi;
if !ret {
tracing::warn!("pi mismatch: expected={expected_pi:?}, found={proof_pi:?}");
}
ret
}
}
impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
fn from_json<P: AsRef<Path>>(path_proof: P) -> Result<Self> {
crate::utils::read_json_deep(path_proof)
}
fn dump<P: AsRef<Path>>(&self, path_proof: P) -> Result<()> {
crate::utils::write_json(path_proof, &self)
}
}
#[cfg(test)]
mod tests {
use base64::{prelude::BASE64_STANDARD, Engine};
use sbv_primitives::B256;
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof, public_inputs::ForkName};
use super::*;
#[test]
fn test_roundtrip() -> eyre::Result<()> {
macro_rules! assert_roundtrip {
($fd:expr, $proof:ident) => {
let proof_str_expected =
std::fs::read_to_string(std::path::Path::new("./testdata").join($fd))?;
let proof = serde_json::from_str::<$proof>(&proof_str_expected)?;
let proof_str_got = serde_json::to_string(&proof)?;
assert_eq!(proof_str_got, proof_str_expected);
};
}
assert_roundtrip!("chunk-proof.json", ChunkProof);
assert_roundtrip!("batch-proof.json", BatchProof);
assert_roundtrip!("bundle-proof.json", BundleProof);
Ok(())
}
#[test]
fn test_dummy_proof() -> eyre::Result<()> {
// 1. Metadata
let metadata = {
let bundle_info = BundleInfo {
chain_id: 12345,
num_batches: 12,
prev_state_root: B256::repeat_byte(1),
prev_batch_hash: B256::repeat_byte(2),
post_state_root: B256::repeat_byte(3),
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
};
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
BundleProofMetadata {
bundle_info,
bundle_pi_hash,
}
};
// 2. Proof
let (proof, proof_base64) = {
let proof = std::iter::empty()
.chain(std::iter::repeat_n(1, 1))
.chain(std::iter::repeat_n(2, 2))
.chain(std::iter::repeat_n(3, 3))
.chain(std::iter::repeat_n(4, 4))
.chain(std::iter::repeat_n(5, 5))
.chain(std::iter::repeat_n(6, 6))
.chain(std::iter::repeat_n(7, 7))
.chain(std::iter::repeat_n(8, 8))
.chain(std::iter::repeat_n(9, 9))
.collect::<Vec<u8>>();
let proof_base64 = BASE64_STANDARD.encode(&proof);
(proof, proof_base64)
};
// 3. Instances
let (instances, instances_base64) = {
// LE: [0x56, 0x34, 0x12, 0x00, 0x00, ..., 0x00]
// LE: [0x32, 0x54, 0x76, 0x98, 0x00, ..., 0x00]
let instances = std::iter::empty()
.chain(std::iter::repeat_n(0x00, 29))
.chain(std::iter::once(0x12))
.chain(std::iter::once(0x34))
.chain(std::iter::once(0x56))
.chain(std::iter::repeat_n(0x00, 28))
.chain(std::iter::once(0x98))
.chain(std::iter::once(0x76))
.chain(std::iter::once(0x54))
.chain(std::iter::once(0x32))
.collect::<Vec<u8>>();
let instances_base64 = BASE64_STANDARD.encode(&instances);
(instances, instances_base64)
};
// 4. VK
let (vk, vk_base64) = {
let vk = std::iter::empty()
.chain(std::iter::repeat_n(1, 9))
.chain(std::iter::repeat_n(2, 8))
.chain(std::iter::repeat_n(3, 7))
.chain(std::iter::repeat_n(4, 6))
.chain(std::iter::repeat_n(5, 5))
.chain(std::iter::repeat_n(6, 4))
.chain(std::iter::repeat_n(7, 3))
.chain(std::iter::repeat_n(8, 2))
.chain(std::iter::repeat_n(9, 1))
.collect::<Vec<u8>>();
let vk_base64 = BASE64_STANDARD.encode(&vk);
(vk, vk_base64)
};
let evm_proof = EvmProof { instances, proof };
let bundle_proof = metadata.new_proof(evm_proof, Some(vk.as_slice()));
let bundle_proof_json = serde_json::to_value(&bundle_proof)?;
assert_eq!(
bundle_proof_json.get("proof").unwrap(),
&serde_json::json!({
"proof": proof_base64,
"instances": instances_base64,
}),
);
assert_eq!(
bundle_proof_json.get("vk").unwrap(),
&serde_json::Value::String(vk_base64),
);
let bundle_proof_de = serde_json::from_value::<BundleProof>(bundle_proof_json)?;
assert_eq!(
bundle_proof_de.proof.as_evm_proof(),
bundle_proof.proof.as_evm_proof()
);
assert_eq!(bundle_proof_de.vk, bundle_proof.vk);
Ok(())
}
}

View File

@@ -0,0 +1,93 @@
pub mod batch;
pub mod bundle;
pub mod chunk;
pub mod chunk_interpreter;
pub use batch::BatchProvingTask;
pub use bundle::BundleProvingTask;
pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::{
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
utils::panic_catch,
};
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
fn check_aggregation_proofs<Metadata>(
proofs: &[proofs::WrappedProof<Metadata>],
fork_name: ForkName,
) -> eyre::Result<()>
where
Metadata: proofs::ProofMetadata,
{
panic_catch(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
}
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
Ok(())
}
/// Generate required staff for chunk proving
pub fn gen_universal_chunk_task(
mut task: ChunkProvingTask,
fork_name: ForkName,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
if let Some(interpreter) = interpreter {
task.prepare_task_via_interpret(interpreter)?;
}
let chunk_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
ChunkProofMetadata { chunk_info },
proving_task,
))
}
/// Generate required staff for batch proving
pub fn gen_universal_batch_task(
task: BatchProvingTask,
fork_name: ForkName,
) -> eyre::Result<(B256, BatchProofMetadata, ProvingTask)> {
let batch_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = batch_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
BatchProofMetadata {
batch_info,
batch_hash: expected_pi_hash,
},
proving_task,
))
}
/// Generate required staff for bundle proving
pub fn gen_universal_bundle_task(
task: BundleProvingTask,
fork_name: ForkName,
) -> eyre::Result<(B256, BundleProofMetadata, ProvingTask)> {
let bundle_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = bundle_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
BundleProofMetadata {
bundle_info,
bundle_pi_hash: expected_pi_hash,
},
proving_task,
))
}

Some files were not shown because too many files have changed in this diff Show More