mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a05300ab3 | ||
|
|
5d378a015d | ||
|
|
88066d72e8 | ||
|
|
226d32f9bf | ||
|
|
2ecc42e2f5 | ||
|
|
de72e2dccb | ||
|
|
edb51236e2 | ||
|
|
15a23478d1 | ||
|
|
9100a0bd4a | ||
|
|
0ede0cd41f | ||
|
|
9dceae1ca2 | ||
|
|
235ba874c6 | ||
|
|
6bee33036f | ||
|
|
1985e54ab3 | ||
|
|
bfc0fdd7ce | ||
|
|
426c57a5fa | ||
|
|
b7fdf48c30 | ||
|
|
ad0c918944 | ||
|
|
1098876183 | ||
|
|
9e520e7769 | ||
|
|
de7f6e56a9 | ||
|
|
3b323198dc | ||
|
|
c11e0283e8 | ||
|
|
a5a7844646 | ||
|
|
7ff5b190ec | ||
|
|
b297edd28d | ||
|
|
47c85d4983 | ||
|
|
1552e98b79 | ||
|
|
a65b3066a3 | ||
|
|
1f2b397bbd | ||
|
|
ae791a0714 | ||
|
|
c012f7132d | ||
|
|
6897cc54bd | ||
|
|
d21fa36803 | ||
|
|
fc75299eb3 | ||
|
|
4bfcd35d0c | ||
|
|
6d62f8e5fa | ||
|
|
392ae07736 | ||
|
|
db80b47820 | ||
|
|
daa1387208 | ||
|
|
67b05558e2 | ||
|
|
1e447b0fef | ||
|
|
f7c6ecadf4 | ||
|
|
9d94f943e5 | ||
|
|
de17ad43ff | ||
|
|
4233ad928c | ||
|
|
3050ccb40f | ||
|
|
12e89201a1 | ||
|
|
a0ee508bbd | ||
|
|
b8909d3795 | ||
|
|
b7a172a519 | ||
|
|
80807dbb75 | ||
|
|
a776ca7c82 | ||
|
|
ea38ae7e96 |
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2024-12-06
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
87
.github/workflows/docker.yml
vendored
87
.github/workflows/docker.yml
vendored
@@ -10,7 +10,8 @@ env:
|
||||
|
||||
jobs:
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -50,12 +51,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -95,12 +95,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -140,12 +139,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -185,12 +183,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -230,12 +227,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -275,12 +271,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -320,12 +315,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -352,48 +346,6 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for repositories and clone them
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
# Setup for plonky3-gpu
|
||||
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
|
||||
chmod 600 ~/.ssh/plonky3_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
|
||||
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
echo "Loaded plonky3-gpu key"
|
||||
|
||||
# Clone plonky3-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
|
||||
|
||||
# Setup for openvm-stark-gpu
|
||||
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_stark_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-stark-gpu key"
|
||||
|
||||
# Clone openvm-stark-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
|
||||
|
||||
# Setup for openvm-gpu
|
||||
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-gpu key"
|
||||
|
||||
# Clone openvm-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
|
||||
|
||||
# Show number of loaded keys
|
||||
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
|
||||
|
||||
- name: Checkout specific commits
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/checkout_all.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
@@ -406,12 +358,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -451,6 +402,4 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
11
.github/workflows/intermediate-docker.yml
vendored
11
.github/workflows/intermediate-docker.yml
vendored
@@ -22,10 +22,9 @@ on:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
- nightly-2025-08-18
|
||||
default: "nightly-2025-08-18"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
required: false
|
||||
@@ -40,7 +39,8 @@ on:
|
||||
options:
|
||||
- "11.7.1"
|
||||
- "12.2.2"
|
||||
default: "11.7.1"
|
||||
- "12.9.1"
|
||||
default: "12.9.1"
|
||||
CARGO_CHEF_TAG:
|
||||
description: "Cargo chef version"
|
||||
required: true
|
||||
@@ -69,7 +69,8 @@ defaults:
|
||||
|
||||
jobs:
|
||||
build-and-publish-intermediate:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,3 +24,4 @@ sftp-config.json
|
||||
*~
|
||||
|
||||
target
|
||||
zkvm-prover/config.json
|
||||
3618
Cargo.lock
generated
3618
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
47
Cargo.toml
47
Cargo.toml
@@ -14,53 +14,54 @@ edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.8"
|
||||
version = "4.7.1"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-prover" }
|
||||
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-verifier" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de" }
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll"] }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
metrics-tracing-context = "0.16.0"
|
||||
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "0.11", default-features = false }
|
||||
alloy-primitives = { version = "0.8", default-features = false }
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "0.8", default-features = false }
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
rkyv = "0.8"
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_derive = "1.0"
|
||||
serde_with = "3.11.0"
|
||||
serde_with = "3"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tracing = "0.1"
|
||||
eyre = "0.6"
|
||||
bincode_v1 = { version = "1.3", package = "bincode"}
|
||||
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
|
||||
"loader_halo2",
|
||||
"halo2-axiom",
|
||||
"display",
|
||||
] }
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
#TODO: upgrade when Feynman
|
||||
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
|
||||
|
||||
[patch.crates-io]
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
|
||||
revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
codegen-units = 1
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
L2GETH_TAG=scroll-v5.9.7
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -10,15 +10,18 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251113125950-906b730d541d
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
// Hotfix for header hash incompatibility issue.
|
||||
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
|
||||
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
@@ -30,10 +33,10 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/consensys/bavard v0.1.27 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
@@ -41,7 +44,7 @@ require (
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -98,7 +101,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
@@ -110,7 +113,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
||||
@@ -53,16 +53,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
|
||||
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -88,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -214,8 +214,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c h1:IpEBKM6O+xOK2qZVZztGxcobFXkKMb5hAkBEVzfXjVg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -341,10 +341,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
@@ -387,8 +387,8 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
||||
@@ -38,6 +38,7 @@ type FetcherConfig struct {
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
AwsS3Endpoint string `json:"AwsS3Endpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -39,6 +39,9 @@ type L1MessageFetcher struct {
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.AwsS3Endpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
|
||||
}
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
|
||||
@@ -361,7 +361,6 @@ func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepos
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
|
||||
total, err := h.redis.ZCard(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zcard result", "error", err)
|
||||
@@ -372,6 +371,10 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
if start >= total {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
@@ -450,5 +453,6 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
|
||||
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -252,6 +253,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
|
||||
// Cache for the previous transaction to avoid duplicate fetches
|
||||
var lastTxHash common.Hash
|
||||
var lastTx *types.Transaction
|
||||
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
@@ -261,11 +267,28 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
log.Error("Failed to unpack CommitBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
|
||||
// Get transaction, reuse if it's the same as previous
|
||||
var commitTx *types.Transaction
|
||||
if lastTxHash == vlog.TxHash && lastTx != nil {
|
||||
commitTx = lastTx
|
||||
} else {
|
||||
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
|
||||
|
||||
// Create 10-second timeout context for transaction fetch
|
||||
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
|
||||
txCancel()
|
||||
|
||||
if err != nil || isPending {
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
commitTx = fetchedTx
|
||||
lastTxHash = vlog.TxHash
|
||||
lastTx = commitTx
|
||||
}
|
||||
|
||||
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
@@ -305,7 +328,13 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
|
||||
|
||||
// Create 20-second timeout context for blob processing
|
||||
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
blobCancel()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
|
||||
|
||||
@@ -157,7 +157,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
db = db.Limit(10000)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
|
||||
16
build/common.mk
Normal file
16
build/common.mk
Normal file
@@ -0,0 +1,16 @@
|
||||
UNAME_S := $(shell uname -s)
|
||||
IS_DARWIN := $(findstring Darwin,$(UNAME_S))
|
||||
|
||||
SHLIB_EXT := so
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
SHLIB_EXT := dylib
|
||||
endif
|
||||
|
||||
LIB_ZKP_NAME := libzkp.$(SHLIB_EXT)
|
||||
|
||||
define macos_codesign
|
||||
@if [ -n "$(IS_DARWIN)" ]; then \
|
||||
codesign --force --sign - '$(1)'; \
|
||||
codesign --verify --deep --verbose '$(1)'; \
|
||||
fi
|
||||
endef
|
||||
@@ -1,9 +1,9 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
COPY ./crates ./
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
@@ -11,21 +11,15 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run scripts to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./crates ./
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY .git .git
|
||||
RUN cargo build --release -p libzkp-c
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -45,7 +39,7 @@ RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_a
|
||||
RUN mv coordinator/internal/logic/libzkp/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
|
||||
FROM ubuntu:20.04
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
# ENV CHAIN_ID=534353
|
||||
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
PLONKY3_GPU_COMMIT=261b322 # v0.2.0
|
||||
OPENVM_STARK_GPU_COMMIT=3082234 # PR#48
|
||||
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout plonky3-gpu
|
||||
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-stark-gpu
|
||||
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-gpu
|
||||
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch --all --force
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-stark-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-stark-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
|
||||
fi
|
||||
cd $DIR/openvm-stark-gpu && git fetch --all --force
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone plonky3-gpu if not exists
|
||||
if [ ! -d $DIR/plonky3-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
|
||||
fi
|
||||
cd $DIR/plonky3-gpu && git fetch --all --force
|
||||
@@ -1,92 +0,0 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
|
||||
# stark-backend
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
# plonky3
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
|
||||
# gpu crates
|
||||
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
|
||||
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
|
||||
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
|
||||
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
|
||||
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
|
||||
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
@@ -1,2 +0,0 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["rollup_relayer"]
|
||||
@@ -0,0 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
3
common/.gitignore
vendored
3
common/.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
/build/bin
|
||||
.idea
|
||||
libzkp/impl/target
|
||||
libzkp/interface/*.a
|
||||
libzkp
|
||||
@@ -4,5 +4,4 @@ test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
@@ -15,7 +15,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -64,7 +64,7 @@ require (
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
@@ -79,7 +79,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/da-codec v0.9.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -198,7 +198,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
|
||||
@@ -155,8 +155,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -214,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -707,8 +707,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/compose"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
@@ -220,11 +221,21 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
|
||||
rpcCli, err := t.GetL2Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.NewClient(rpcCli), nil
|
||||
}
|
||||
|
||||
// GetL2GethClient returns a rpc client by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2Client() (*rpc.Client, error) {
|
||||
endpoint, err := t.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
client, err := rpc.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -10,12 +10,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
const (
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
// ProofType represents the type of task.
|
||||
type ProofType uint8
|
||||
|
||||
@@ -45,10 +39,12 @@ const (
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// it is a hex encoded big with fixed length on 48 bytes
|
||||
@@ -96,40 +92,59 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof *Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment *Byte48 `json:"kzg_commitment,omitempty"`
|
||||
// ChallengeDigest should be a common.Hash type if it is not nil
|
||||
ChallengeDigest interface{} `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
type RawBytes []byte
|
||||
|
||||
func (r RawBytes) MarshalJSON() ([]byte, error) {
|
||||
if r == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
// Marshal the []byte as a JSON array of numbers
|
||||
rn := make([]uint16, len(r))
|
||||
for i := range r {
|
||||
rn[i] = uint16(r[i])
|
||||
}
|
||||
return json.Marshal(rn)
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
type ChunkInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
// TxBytes []byte `json:"tx_bytes"`
|
||||
TxBytesHash common.Hash `json:"tx_data_digest"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
TxDataLength uint64 `json:"tx_data_length"`
|
||||
InitialBlockNumber uint64 `json:"initial_block_number"`
|
||||
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
|
||||
PrevBlockhash common.Hash `json:"prev_blockhash"`
|
||||
PostBlockhash common.Hash `json:"post_blockhash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// BlockContextV2 is the block context for euclid v2
|
||||
@@ -141,10 +156,18 @@ type BlockContextV2 struct {
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Metric data carried with OpenVMProof
|
||||
type OpenVMProofStat struct {
|
||||
TotalCycle uint64 `json:"total_cycles"`
|
||||
ExecutionTimeMills uint64 `json:"execution_time_mills"`
|
||||
ProvingTimeMills uint64 `json:"proving_time_mills"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Stat *OpenVMProofStat `json:"stat,omitempty"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
@@ -156,7 +179,8 @@ type OpenVMEvmProof struct {
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
TotalGasUsed uint64 `json:"chunk_total_gas"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
@@ -183,6 +207,7 @@ type OpenVMBatchInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -243,6 +268,7 @@ type OpenVMBundleInfo struct {
|
||||
PrevBatchHash common.Hash `json:"prev_batch_hash"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
MsgQueueHash common.Hash `json:"msg_queue_hash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.25"
|
||||
var tag = "v4.7.5"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
3
coordinator/.gitignore
vendored
3
coordinator/.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
/build/bin
|
||||
.idea
|
||||
internal/logic/verifier/lib
|
||||
internal/libzkp/lib/libzkp.so
|
||||
libzkp.so
|
||||
libzkp.dylib
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
|
||||
include ../build/common.mk
|
||||
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
|
||||
LIBZKP_PATH=./internal/logic/libzkp/lib/$(LIB_ZKP_NAME)
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
@@ -27,6 +29,7 @@ libzkp: clean_libzkp $(LIBZKP_PATH)
|
||||
|
||||
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
$(call macos_codesign,$(PWD)/build/bin/coordinator_api)
|
||||
|
||||
coordinator_cron:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
@@ -34,6 +37,19 @@ coordinator_cron:
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
@if [ -f "$(PWD)/conf/config.template.json" ]; then \
|
||||
SRC="$(PWD)/conf/config.template.json"; \
|
||||
else \
|
||||
SRC="$(CURDIR)/conf/config.json"; \
|
||||
fi; \
|
||||
cp -fL "$$SRC" "$(CURDIR)/build/bin/conf/config.template.json"
|
||||
@echo "Setting up releases..."
|
||||
cd $(CURDIR)/build && bash setup_releases.sh
|
||||
|
||||
|
||||
#coordinator_api_skip_libzkp:
|
||||
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
@@ -51,6 +67,7 @@ test-gpu-verifier: $(LIBZKP_PATH)
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
cd ../ && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -10,6 +10,8 @@ See [monorepo prerequisites](../README.md#prerequisites).
|
||||
|
||||
## Build
|
||||
|
||||
Using Go version 1.22
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make coordinator_api
|
||||
|
||||
72
coordinator/build/setup_releases.sh
Normal file
72
coordinator/build/setup_releases.sh
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
echo "SCROLL_ZKVM_VERSION not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# default fork name from env or "galileo"
|
||||
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileo}"
|
||||
|
||||
# set ASSET_DIR by reading from config.json
|
||||
CONFIG_FILE="bin/conf/config.template.json"
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo "Config file $CONFIG_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get the number of verifiers in the array
|
||||
VERIFIER_COUNT=$(jq -r '.prover_manager.verifier.verifiers | length' "$CONFIG_FILE")
|
||||
|
||||
if [ "$VERIFIER_COUNT" = "null" ] || [ "$VERIFIER_COUNT" -eq 0 ]; then
|
||||
echo "No verifiers found in config file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found $VERIFIER_COUNT verifier(s) in config"
|
||||
|
||||
# iterate through each verifier entry
|
||||
for ((i=0; i<$VERIFIER_COUNT; i++)); do
|
||||
# extract assets_path for current verifier
|
||||
ASSETS_PATH=$(jq -r ".prover_manager.verifier.verifiers[$i].assets_path" "$CONFIG_FILE")
|
||||
FORK_NAME=$(jq -r ".prover_manager.verifier.verifiers[$i].fork_name" "$CONFIG_FILE")
|
||||
|
||||
# skip if this verifier's fork doesn't match the target fork
|
||||
if [ "$FORK_NAME" != "$SCROLL_FORK_NAME" ]; then
|
||||
echo "Expect $SCROLL_FORK_NAME, skip current fork ($FORK_NAME)"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$ASSETS_PATH" = "null" ]; then
|
||||
echo "Warning: Could not find assets_path for verifier $i, skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Processing verifier $i ($FORK_NAME): assets_path=$ASSETS_PATH"
|
||||
|
||||
# check if it's an absolute path (starts with /)
|
||||
if [[ "$ASSETS_PATH" = /* ]]; then
|
||||
# absolute path, use as is
|
||||
ASSET_DIR="$ASSETS_PATH"
|
||||
else
|
||||
# relative path, prefix with "bin/"
|
||||
ASSET_DIR="bin/$ASSETS_PATH"
|
||||
fi
|
||||
|
||||
echo "Using ASSET_DIR: $ASSET_DIR"
|
||||
|
||||
# create directory if it doesn't exist
|
||||
mkdir -p "$ASSET_DIR"
|
||||
|
||||
# assets for verifier-only mode
|
||||
echo "Downloading assets for $FORK_NAME to $ASSET_DIR..."
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
|
||||
|
||||
echo "Completed downloading assets for $FORK_NAME"
|
||||
echo "---"
|
||||
done
|
||||
|
||||
echo "All verifier assets downloaded successfully"
|
||||
@@ -90,10 +90,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
MinProverVersion: "v4.4.89",
|
||||
Verifiers: []coordinatorConfig.AssetConfig{
|
||||
{
|
||||
AssetsPath: "",
|
||||
ForkName: "galileo",
|
||||
},
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
var cfg *config.Config
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
@@ -29,16 +30,29 @@ func init() {
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
if err := utils.LogSetup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
var err error
|
||||
cfg, err = config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// sub commands
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "verify",
|
||||
Usage: "verify an proof, specified by [forkname] <type> <proof path>",
|
||||
Action: verify,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
|
||||
109
coordinator/cmd/tool/verify.go
Normal file
109
coordinator/cmd/tool/verify.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func verify(cCtx *cli.Context) error {
|
||||
var forkName, proofType, proofPath string
|
||||
if cCtx.Args().Len() <= 2 {
|
||||
forkName = cfg.ProverManager.Verifier.Verifiers[0].ForkName
|
||||
proofType = cCtx.Args().First()
|
||||
proofPath = cCtx.Args().Get(1)
|
||||
} else {
|
||||
forkName = cCtx.Args().First()
|
||||
proofType = cCtx.Args().Get(1)
|
||||
proofPath = cCtx.Args().Get(2)
|
||||
}
|
||||
log.Info("verify proof", "in", proofPath, "type", proofType, "forkName", forkName)
|
||||
|
||||
// Load the content of the proof file
|
||||
data, err := os.ReadFile(filepath.Clean(proofPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading file: %w", err)
|
||||
}
|
||||
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, cfg.L2.ValidiumMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ret bool
|
||||
switch strings.ToLower(proofType) {
|
||||
case "chunk":
|
||||
proof := &message.OpenVMChunkProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.ChunkVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
if len(proof.Vk) != 0 {
|
||||
if !bytes.Equal(proof.Vk, vk) {
|
||||
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
|
||||
base64.StdEncoding.EncodeToString(vk),
|
||||
base64.StdEncoding.EncodeToString(proof.Vk),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
proof.Vk = vk
|
||||
}
|
||||
|
||||
ret, err = vf.VerifyChunkProof(proof, forkName)
|
||||
case "batch":
|
||||
proof := &message.OpenVMBatchProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.BatchVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
if len(proof.Vk) != 0 {
|
||||
if !bytes.Equal(proof.Vk, vk) {
|
||||
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
|
||||
base64.StdEncoding.EncodeToString(vk),
|
||||
base64.StdEncoding.EncodeToString(proof.Vk),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
proof.Vk = vk
|
||||
}
|
||||
|
||||
ret, err = vf.VerifyBatchProof(proof, forkName)
|
||||
case "bundle":
|
||||
proof := &message.OpenVMBundleProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.BundleVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
proof.Vk = vk
|
||||
|
||||
ret, err = vf.VerifyBundleProof(proof, forkName)
|
||||
default:
|
||||
return fmt.Errorf("unsupport proof type %s", proofType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("verified:", "ret", ret)
|
||||
return nil
|
||||
}
|
||||
@@ -7,11 +7,18 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [
|
||||
{
|
||||
"features": "legacy_witness:openvm_13",
|
||||
"assets_path": "assets_feynman",
|
||||
"fork_name": "feynman"
|
||||
},
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileo"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
@@ -21,7 +28,10 @@
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
"chain_id": 111,
|
||||
"l2geth": {
|
||||
"endpoint": "not need to specified for mocking"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
|
||||
@@ -9,8 +9,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -46,6 +46,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
@@ -53,30 +54,59 @@ require (
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.0.2 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
|
||||
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
|
||||
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
@@ -24,6 +30,9 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1
|
||||
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
|
||||
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
|
||||
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
@@ -38,23 +47,39 @@ github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
@@ -73,19 +98,31 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -93,13 +130,24 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
@@ -115,6 +163,7 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
@@ -129,14 +178,22 @@ github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
@@ -145,51 +202,76 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -200,8 +282,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
@@ -216,6 +298,8 @@ github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPD
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
@@ -227,11 +311,16 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
@@ -240,7 +329,10 @@ golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
@@ -250,13 +342,25 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -269,36 +373,56 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -36,8 +36,9 @@ type L2Endpoint struct {
|
||||
// L2 loads l2geth configuration items.
|
||||
type L2 struct {
|
||||
// l2geth chain_id.
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
Endpoint *L2Endpoint `json:"l2geth"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
Endpoint *L2Endpoint `json:"l2geth"`
|
||||
ValidiumMode bool `json:"validium_mode"`
|
||||
}
|
||||
|
||||
// Auth provides the auth coordinator
|
||||
@@ -47,24 +48,34 @@ type Auth struct {
|
||||
LoginExpireDurationSec int `json:"login_expire_duration_sec"`
|
||||
}
|
||||
|
||||
// The sequencer controlled data
|
||||
type Sequencer struct {
|
||||
DecryptionKey string `json:"decryption_key"`
|
||||
}
|
||||
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
ProverManager *ProverManager `json:"prover_manager"`
|
||||
DB *database.Config `json:"db"`
|
||||
L2 *L2 `json:"l2"`
|
||||
Auth *Auth `json:"auth"`
|
||||
Sequencer *Sequencer `json:"sequencer"`
|
||||
}
|
||||
|
||||
// CircuitConfig circuit items.
|
||||
type CircuitConfig struct {
|
||||
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
|
||||
type AssetConfig struct {
|
||||
AssetsPath string `json:"assets_path"`
|
||||
Version uint8 `json:"version,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Vkfile string `json:"vk_file,omitempty"`
|
||||
MinProverVersion string `json:"min_prover_version,omitempty"`
|
||||
Features string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Verifiers []AssetConfig `json:"verifiers"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
|
||||
@@ -20,11 +20,11 @@ func TestConfig(t *testing.T) {
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
"fork_name": "galileo"
|
||||
}]
|
||||
},
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
@@ -35,13 +35,17 @@ func TestConfig(t *testing.T) {
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
"chain_id": 111,
|
||||
"validium_mode": false
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
}
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "sequencer decryption key"
|
||||
}
|
||||
}`
|
||||
|
||||
t.Run("Success Case", func(t *testing.T) {
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/libzkp"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
@@ -21,7 +24,9 @@ var (
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) {
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
|
||||
validiumMode := cfg.L2.ValidiumMode
|
||||
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, validiumMode)
|
||||
if err != nil {
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
@@ -29,7 +34,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
|
||||
|
||||
// TODO: enable this when the libzkp has been updated
|
||||
/*l2cfg := cfg.L2.Endpoint
|
||||
l2cfg := cfg.L2.Endpoint
|
||||
if l2cfg == nil {
|
||||
panic("l2geth is not specified")
|
||||
}
|
||||
@@ -37,9 +42,9 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
libzkp.InitL2geth(string(l2cfgBytes))*/
|
||||
libzkp.InitL2geth(string(l2cfgBytes))
|
||||
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provertask"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
@@ -25,13 +26,15 @@ type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
|
||||
l2syncer *l2Syncer
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
|
||||
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, verifier *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, verifier.ChunkVk, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, verifier.BatchVk, reg)
|
||||
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, verifier.BundleVk, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
@@ -44,6 +47,13 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
|
||||
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
|
||||
|
||||
if syncer, err := createL2Syncer(cfg); err != nil {
|
||||
log.Crit("can not init l2 syncer", "err", err)
|
||||
} else {
|
||||
ptc.l2syncer = syncer
|
||||
}
|
||||
|
||||
return ptc
|
||||
}
|
||||
|
||||
@@ -78,6 +88,17 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if getTaskParameter.ProverHeight == 0 {
|
||||
// help update the prover height with internal l2geth
|
||||
if blk, err := ptc.l2syncer.getLatestBlockNumber(ctx); err == nil {
|
||||
getTaskParameter.ProverHeight = blk
|
||||
} else {
|
||||
nerr := fmt.Errorf("inner l2geth failure, err:%w", err)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
proofType := ptc.proofType(&getTaskParameter)
|
||||
proverTask, isExist := ptc.proverTasks[proofType]
|
||||
if !isExist {
|
||||
|
||||
71
coordinator/internal/controller/api/l2_syncer.go
Normal file
71
coordinator/internal/controller/api/l2_syncer.go
Normal file
@@ -0,0 +1,71 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
type l2Syncer struct {
|
||||
l2gethClient *ethclient.Client
|
||||
lastBlockNumber struct {
|
||||
sync.RWMutex
|
||||
data uint64
|
||||
t time.Time
|
||||
}
|
||||
}
|
||||
|
||||
func createL2Syncer(cfg *config.Config) (*l2Syncer, error) {
|
||||
|
||||
if cfg.L2 == nil || cfg.L2.Endpoint == nil {
|
||||
return nil, fmt.Errorf("l2 endpoint is not set in config")
|
||||
} else {
|
||||
l2gethClient, err := ethclient.Dial(cfg.L2.Endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dial l2geth endpoint fail, err: %s", err)
|
||||
}
|
||||
return &l2Syncer{
|
||||
l2gethClient: l2gethClient,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
|
||||
func (syncer *l2Syncer) getLatestBlockNumber(ctx *gin.Context) (uint64, error) {
|
||||
// First check if we have a cached value that's still valid
|
||||
syncer.lastBlockNumber.RLock()
|
||||
if !syncer.lastBlockNumber.t.IsZero() && time.Since(syncer.lastBlockNumber.t) < time.Second*10 {
|
||||
blockNumber := syncer.lastBlockNumber.data
|
||||
syncer.lastBlockNumber.RUnlock()
|
||||
return blockNumber, nil
|
||||
}
|
||||
syncer.lastBlockNumber.RUnlock()
|
||||
|
||||
// If not cached or expired, fetch from the client
|
||||
if syncer.l2gethClient == nil {
|
||||
return 0, errors.New("L2 geth client not initialized")
|
||||
}
|
||||
|
||||
blockNumber, err := syncer.l2gethClient.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get latest block number: %w", err)
|
||||
}
|
||||
|
||||
// Update the cache
|
||||
syncer.lastBlockNumber.Lock()
|
||||
syncer.lastBlockNumber.data = blockNumber
|
||||
syncer.lastBlockNumber.t = time.Now()
|
||||
syncer.lastBlockNumber.Unlock()
|
||||
|
||||
log.Debug("updated block height reference", "height", blockNumber)
|
||||
return blockNumber, nil
|
||||
}
|
||||
20
coordinator/internal/controller/api/mock_syncer.go
Normal file
20
coordinator/internal/controller/api/mock_syncer.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type l2Syncer struct{}
|
||||
|
||||
func createL2Syncer(_ *config.Config) (*l2Syncer, error) {
|
||||
return &l2Syncer{}, nil
|
||||
}
|
||||
|
||||
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
|
||||
func (syncer *l2Syncer) getLatestBlockNumber(_ *gin.Context) (uint64, error) {
|
||||
return 99999994, nil
|
||||
}
|
||||
@@ -24,16 +24,16 @@ type LoginLogic struct {
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string][]string
|
||||
proverVersionHardForkMap map[string]string
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
proverVersionHardForkMap := make(map[string]string)
|
||||
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
|
||||
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
|
||||
}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
@@ -56,8 +56,8 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
vks := make(map[string]struct{})
|
||||
@@ -99,9 +99,15 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
|
||||
}
|
||||
|
||||
proverVersion := proverVersionSplits[0]
|
||||
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
var hardForkNames []string
|
||||
for n, minVersion := range l.proverVersionHardForkMap {
|
||||
if minVersion == "" || version.CheckScrollRepoVersion(proverVersion, minVersion) {
|
||||
hardForkNames = append(hardForkNames, n)
|
||||
}
|
||||
}
|
||||
if len(hardForkNames) == 0 {
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
}
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
include ../../../../build/common.mk
|
||||
|
||||
.PHONY: help fmt clippy test test-ci test-all clean build
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
@cargo build --release -p libzkp-c
|
||||
@mkdir -p lib
|
||||
@cp -f ../../../../target/release/libzkp.so lib/
|
||||
@cp -f ../../../../target/release/$(LIB_ZKP_NAME) lib/
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
clean:
|
||||
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
|
||||
@rm -f lib/libzkp.so
|
||||
@rm -f lib/$(LIB_ZKP_NAME)
|
||||
|
||||
clippy:
|
||||
@cargo check --release --all-features
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package libzkp
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo linux LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo darwin LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "libzkp.h"
|
||||
@@ -11,11 +12,16 @@ import "C" //nolint:typecheck
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
func init() {
|
||||
C.init_tracing()
|
||||
}
|
||||
|
||||
// Helper function to convert Go string to C string and handle cleanup
|
||||
func goToCString(s string) *C.char {
|
||||
return C.CString(s)
|
||||
@@ -34,18 +40,10 @@ func InitVerifier(configJSON string) {
|
||||
C.init_verifier(cConfig)
|
||||
}
|
||||
|
||||
// Initialize the verifier
|
||||
func InitL2geth(configJSON string) {
|
||||
cConfig := goToCString(configJSON)
|
||||
defer freeCString(cConfig)
|
||||
|
||||
C.init_l2geth(cConfig)
|
||||
}
|
||||
|
||||
// Verify a chunk proof
|
||||
func VerifyChunkProof(proofData, forkName string) bool {
|
||||
cProof := goToCString(proofData)
|
||||
cForkName := goToCString(forkName)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
defer freeCString(cProof)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
@@ -56,7 +54,7 @@ func VerifyChunkProof(proofData, forkName string) bool {
|
||||
// Verify a batch proof
|
||||
func VerifyBatchProof(proofData, forkName string) bool {
|
||||
cProof := goToCString(proofData)
|
||||
cForkName := goToCString(forkName)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
defer freeCString(cProof)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
@@ -67,7 +65,7 @@ func VerifyBatchProof(proofData, forkName string) bool {
|
||||
// Verify a bundle proof
|
||||
func VerifyBundleProof(proofData, forkName string) bool {
|
||||
cProof := goToCString(proofData)
|
||||
cForkName := goToCString(forkName)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
defer freeCString(cProof)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
@@ -96,8 +94,8 @@ func fromMessageTaskType(taskType int) int {
|
||||
}
|
||||
|
||||
// Generate a universal task
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, forkName)
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
// Generate wrapped proof
|
||||
@@ -127,7 +125,7 @@ func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
|
||||
// Dumps a verification key to a file
|
||||
func DumpVk(forkName, filePath string) error {
|
||||
cForkName := goToCString(forkName)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
cFilePath := goToCString(filePath)
|
||||
defer freeCString(cForkName)
|
||||
defer freeCString(cFilePath)
|
||||
@@ -143,3 +141,20 @@ func DumpVk(forkName, filePath string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnivTaskCompatibilityFix calls the universal task compatibility fix function
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
cTaskJSON := goToCString(taskJSON)
|
||||
defer freeCString(cTaskJSON)
|
||||
|
||||
resultPtr := C.univ_task_compatibility_fix(cTaskJSON)
|
||||
if resultPtr == nil {
|
||||
return "", fmt.Errorf("univ_task_compatibility_fix failed")
|
||||
}
|
||||
|
||||
// Convert result to Go string and free C memory
|
||||
result := C.GoString(resultPtr)
|
||||
C.release_string(resultPtr)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,9 @@
|
||||
|
||||
#include <stddef.h> // For size_t
|
||||
|
||||
// Init log tracing
|
||||
void init_tracing();
|
||||
|
||||
// Initialize the verifier with configuration
|
||||
void init_verifier(char* config);
|
||||
|
||||
@@ -32,7 +35,15 @@ typedef struct {
|
||||
|
||||
// Generate a universal task based on task type and input JSON
|
||||
// Returns a struct containing task data, metadata, and expected proof hash
|
||||
HandlingResult gen_universal_task(int task_type, char* task, char* fork_name);
|
||||
HandlingResult gen_universal_task(
|
||||
int task_type,
|
||||
char* task,
|
||||
char* fork_name,
|
||||
const unsigned char* expected_vk,
|
||||
size_t expected_vk_len,
|
||||
const unsigned char* decryption_key,
|
||||
size_t decryption_key_len
|
||||
);
|
||||
|
||||
// Release memory allocated for a HandlingResult returned by gen_universal_task
|
||||
void release_task_result(HandlingResult result);
|
||||
@@ -45,4 +56,7 @@ char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_le
|
||||
// Release memory allocated for a string returned by gen_wrapped_proof
|
||||
void release_string(char* string_ptr);
|
||||
|
||||
// Universal task compatibility fix function
|
||||
char* univ_task_compatibility_fix(char* task_json);
|
||||
|
||||
#endif /* LIBZKP_H */
|
||||
|
||||
@@ -11,7 +11,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
|
||||
func InitL2geth(configJSON string) {
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
|
||||
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
|
||||
var metadata interface{}
|
||||
|
||||
@@ -7,14 +7,35 @@ package libzkp
|
||||
#include "libzkp.h"
|
||||
*/
|
||||
import "C" //nolint:typecheck
|
||||
import "unsafe"
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
|
||||
// Initialize the handler for universal task
|
||||
func InitL2geth(configJSON string) {
|
||||
cConfig := goToCString(configJSON)
|
||||
defer freeCString(cConfig)
|
||||
|
||||
C.init_l2geth(cConfig)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
cTask := goToCString(taskJSON)
|
||||
cForkName := goToCString(forkName)
|
||||
defer freeCString(cTask)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
result := C.gen_universal_task(C.int(taskType), cTask, cForkName)
|
||||
// Create a C array from Go slice
|
||||
var cVk *C.uchar
|
||||
if len(expectedVk) > 0 {
|
||||
cVk = (*C.uchar)(unsafe.Pointer(&expectedVk[0]))
|
||||
}
|
||||
|
||||
// Create a C array from Go slice
|
||||
var cDk *C.uchar
|
||||
if len(decryptionKey) > 0 {
|
||||
cDk = (*C.uchar)(unsafe.Pointer(&decryptionKey[0]))
|
||||
}
|
||||
|
||||
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)), cDk, C.size_t(len(decryptionKey)))
|
||||
defer C.release_task_result(result)
|
||||
|
||||
// Check if the operation was successful
|
||||
|
||||
@@ -36,12 +36,13 @@ type BatchProverTask struct {
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BatchProverTask {
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -83,10 +84,37 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBatchTask == nil {
|
||||
// if the assigned batch dropped, there would be too much issue to assign another
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBatchTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBatchTask == nil {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
@@ -114,29 +142,32 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
batchTask = tmpBatchTask
|
||||
@@ -149,19 +180,24 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
var proverTask *orm.ProverTask
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
proverTask = &orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
} else {
|
||||
proverTask = taskCtx.hasAssignedTask
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, batchTask, hardForkName)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
|
||||
@@ -169,20 +205,31 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
if getTaskParameter.Universal {
|
||||
var metadata []byte
|
||||
|
||||
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch")
|
||||
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
proverTask.Metadata = metadata
|
||||
|
||||
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
|
||||
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
|
||||
if err := fixCompatibility(taskMsg); err != nil {
|
||||
log.Error("apply compatibility failure", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
// notice uuid is set as a side effect of InsertProverTask
|
||||
taskMsg.UUID = proverTask.UUID.String()
|
||||
@@ -210,36 +257,21 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
}
|
||||
|
||||
var chunkProofs []*message.OpenVMChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
// var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
var proof message.OpenVMChunkProof
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
|
||||
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
|
||||
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkProofs, hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.Marshal(taskDetail)
|
||||
taskBytesWithchunkProofs, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
@@ -247,7 +279,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
taskMsg := &coordinatorType.GetTaskSchema{
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeBatch),
|
||||
TaskData: string(chunkProofsBytes),
|
||||
TaskData: string(taskBytesWithchunkProofs),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
@@ -262,44 +294,59 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8:
|
||||
default:
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
// Get the version byte.
|
||||
version, err := bp.version(hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
return nil, fmt.Errorf("failed to decode version byte: %w", err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
Version: version,
|
||||
ChunkProofs: chunkProofs,
|
||||
ForkName: hardForkName,
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
|
||||
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
if !bp.validiumMode() {
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case 0:
|
||||
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
|
||||
return taskDetail, nil
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
|
||||
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
|
||||
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
taskDetail.KzgProof = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
} else {
|
||||
log.Info("Apply validium mode for batch proving task")
|
||||
codec := cutils.FromVersion(version)
|
||||
batchHeader, decodeErr := codec.DABatchForTaskFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
batchHeader.SetHash(common.HexToHash(dbBatch.Hash))
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
}
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
@@ -33,12 +33,13 @@ type BundleProverTask struct {
|
||||
}
|
||||
|
||||
// NewBundleProverTask new a bundle collector
|
||||
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
|
||||
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BundleProverTask {
|
||||
bp := &BundleProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
chainCfg: chainCfg,
|
||||
cfg: cfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -81,10 +82,37 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBundleTask *orm.Bundle
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBundleTask == nil {
|
||||
// if the assigned chunk dropped, there would be too much issue to assign another
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBundleTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBundleTask == nil {
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
@@ -112,31 +140,33 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
bundleTask = tmpBundleTask
|
||||
break
|
||||
}
|
||||
@@ -147,19 +177,24 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
}
|
||||
|
||||
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: bundleTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBundle),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
var proverTask *orm.ProverTask
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
proverTask = &orm.ProverTask{
|
||||
TaskID: bundleTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBundle),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
} else {
|
||||
proverTask = taskCtx.hasAssignedTask
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, hardForkName)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
|
||||
@@ -170,19 +205,29 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle")
|
||||
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// bundle proof require snark
|
||||
taskMsg.UseSnark = true
|
||||
proverTask.Metadata = metadata
|
||||
|
||||
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
|
||||
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
|
||||
if err := fixCompatibility(taskMsg); err != nil {
|
||||
log.Error("apply compatibility failure", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
// notice uuid is set as a side effect of InsertProverTask
|
||||
taskMsg.UUID = proverTask.UUID.String()
|
||||
@@ -209,9 +254,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
|
||||
}
|
||||
|
||||
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
var prevStateRoot common.Hash
|
||||
// this would be common in test cases: the first batch has empty parent
|
||||
if batches[0].Index > 1 {
|
||||
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
|
||||
}
|
||||
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
@@ -223,20 +273,21 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
BatchProofs: batchProofs,
|
||||
// Get the version byte.
|
||||
version, err := bp.version(hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode version byte: %w", err)
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
Version: version,
|
||||
BatchProofs: batchProofs,
|
||||
ForkName: hardForkName,
|
||||
}
|
||||
|
||||
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
|
||||
PrevStateRoot: prevStateRoot,
|
||||
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
|
||||
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
|
||||
NumBatches: uint32(len(batches)),
|
||||
|
||||
@@ -33,12 +33,13 @@ type ChunkProverTask struct {
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *ChunkProverTask {
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
@@ -79,12 +80,39 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get chunk has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpChunkTask == nil {
|
||||
// if the assigned chunk dropped, there would be too much issue to assign another
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpChunkTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpChunkTask == nil {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpChunkTask == nil {
|
||||
@@ -110,31 +138,33 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
chunkTask = tmpChunkTask
|
||||
break
|
||||
}
|
||||
@@ -145,19 +175,24 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
var proverTask *orm.ProverTask
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
proverTask = &orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
} else {
|
||||
proverTask = taskCtx.hasAssignedTask
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), proverTask, chunkTask, hardForkName)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
@@ -169,16 +204,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
taskMsg, metadata, err = cp.applyUniversal(taskMsg)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk")
|
||||
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
proverTask.Metadata = metadata
|
||||
}
|
||||
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
// notice uuid is set as a side effect of InsertProverTask
|
||||
taskMsg.UUID = proverTask.UUID.String()
|
||||
@@ -197,23 +234,24 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
if dbErr != nil || len(blockHashes) == 0 {
|
||||
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
|
||||
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
|
||||
}
|
||||
|
||||
// Get the version byte.
|
||||
version, err := cp.version(hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode version byte: %w", err)
|
||||
}
|
||||
|
||||
var taskDetailBytes []byte
|
||||
taskDetail := message.ChunkTaskDetail{
|
||||
Version: version,
|
||||
BlockHashes: blockHashes,
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
ForkName: hardForkName,
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
var err error
|
||||
taskDetailBytes, err = json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package provertask
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -14,11 +15,13 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/libzkp"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
"scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -38,9 +41,10 @@ type ProverTask interface {
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
chainCfg *params.ChainConfig
|
||||
db *gorm.DB
|
||||
cfg *config.Config
|
||||
chainCfg *params.ChainConfig
|
||||
db *gorm.DB
|
||||
expectedVk map[string][]byte
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -57,10 +61,22 @@ type proverTaskContext struct {
|
||||
ProverProviderType uint8
|
||||
HardForkNames map[string]struct{}
|
||||
|
||||
taskType message.ProofType
|
||||
chunkTask *orm.Chunk
|
||||
batchTask *orm.Batch
|
||||
bundleTask *orm.Bundle
|
||||
taskType message.ProofType
|
||||
chunkTask *orm.Chunk
|
||||
batchTask *orm.Batch
|
||||
bundleTask *orm.Bundle
|
||||
hasAssignedTask *orm.ProverTask
|
||||
}
|
||||
|
||||
func (b *BaseProverTask) version(hardForkName string) (uint8, error) {
|
||||
return utils.Version(hardForkName, b.validiumMode())
|
||||
}
|
||||
|
||||
// validiumMode induce different behavior in task generation:
|
||||
// + skip the point_evaluation part in batch task
|
||||
// + encode batch header with codec in utils instead of da-codec
|
||||
func (b *BaseProverTask) validiumMode() bool {
|
||||
return b.cfg.L2.ValidiumMode
|
||||
}
|
||||
|
||||
// hardForkName get the chunk/batch/bundle hard fork name
|
||||
@@ -175,19 +191,31 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
|
||||
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
assigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
|
||||
}
|
||||
|
||||
if isAssigned {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
ptc.hasAssignedTask = assigned
|
||||
return &ptc, nil
|
||||
}
|
||||
|
||||
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
|
||||
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName)
|
||||
expectedVk, ok := b.expectedVk[schema.HardForkName]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("no expectedVk found from hardfork %s", schema.HardForkName)
|
||||
}
|
||||
|
||||
var decryptionKey []byte
|
||||
if b.cfg.L2.ValidiumMode {
|
||||
var err error
|
||||
decryptionKey, err = hex.DecodeString(b.cfg.Sequencer.DecryptionKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("sequencer decryption key hex-decoding failed")
|
||||
}
|
||||
}
|
||||
|
||||
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName, expectedVk, decryptionKey)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
|
||||
}
|
||||
@@ -196,6 +224,23 @@ func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (
|
||||
return schema, []byte(metadata), nil
|
||||
}
|
||||
|
||||
const CompatibilityVersion = "4.5.43"
|
||||
|
||||
func isCompatibilityFixingVersion(ver string) bool {
|
||||
return !version.CheckScrollRepoVersion(ver, CompatibilityVersion)
|
||||
}
|
||||
|
||||
func fixCompatibility(schema *coordinatorType.GetTaskSchema) error {
|
||||
|
||||
fixedTask, err := libzkp.UniversalTaskCompatibilityFix(schema.TaskData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
schema.TaskData = fixedTask
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
|
||||
@@ -71,6 +71,9 @@ type ProofReceiverLogic struct {
|
||||
validateFailureProverTaskStatusNotOk prometheus.Counter
|
||||
validateFailureProverTaskTimeout prometheus.Counter
|
||||
validateFailureProverTaskHaveVerifier prometheus.Counter
|
||||
proverSpeed *prometheus.GaugeVec
|
||||
provingTime prometheus.Gauge
|
||||
evmCyclePerGas prometheus.Gauge
|
||||
|
||||
ChunkTask provertask.ProverTask
|
||||
BundleTask provertask.ProverTask
|
||||
@@ -79,6 +82,7 @@ type ProofReceiverLogic struct {
|
||||
|
||||
// NewSubmitProofReceiverLogic create a proof receiver logic
|
||||
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
|
||||
|
||||
return &ProofReceiverLogic{
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -133,6 +137,18 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
|
||||
Name: "coordinator_validate_failure_submit_have_been_verifier",
|
||||
Help: "Total number of submit proof validate failure proof have been verifier.",
|
||||
}),
|
||||
evmCyclePerGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "evm_circuit_cycle_per_gas",
|
||||
Help: "VM cycles cost for a gas unit cost in evm execution",
|
||||
}),
|
||||
provingTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "chunk_proving_time",
|
||||
Help: "Wall clock time for chunk proving in second",
|
||||
}),
|
||||
proverSpeed: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "prover_speed",
|
||||
Help: "Cycle against running time of prover (in mhz)",
|
||||
}, []string{"type", "phase"}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +194,20 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
if len(proverTask.Metadata) == 0 {
|
||||
return errors.New("can not re-wrapping proof: no metadata has been recorded in advance")
|
||||
}
|
||||
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), []byte{})
|
||||
var expected_vk []byte
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
expected_vk = m.verifier.ChunkVk[hardForkName]
|
||||
case message.ProofTypeBatch:
|
||||
expected_vk = m.verifier.BatchVk[hardForkName]
|
||||
case message.ProofTypeBundle:
|
||||
expected_vk = m.verifier.BundleVk[hardForkName]
|
||||
}
|
||||
if len(expected_vk) == 0 {
|
||||
return errors.New("no vk specified match current hard fork, check your config")
|
||||
}
|
||||
|
||||
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), expected_vk)
|
||||
if proofParameter.Proof == "" {
|
||||
return errors.New("can not re-wrapping proof, see coordinator log for reason")
|
||||
}
|
||||
@@ -191,12 +220,34 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
|
||||
if stat := chunkProof.VmProof.Stat; stat != nil {
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("chunk", "exec"); g != nil && stat.ExecutionTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ExecutionTimeMills*1000))
|
||||
}
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("chunk", "proving"); g != nil && stat.ProvingTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ProvingTimeMills*1000))
|
||||
}
|
||||
if chunkProof.MetaData.TotalGasUsed > 0 {
|
||||
cycle_per_gas := float64(stat.TotalCycle) / float64(chunkProof.MetaData.TotalGasUsed)
|
||||
m.evmCyclePerGas.Set(cycle_per_gas)
|
||||
}
|
||||
m.provingTime.Set(float64(stat.ProvingTimeMills) / 1000)
|
||||
}
|
||||
|
||||
case message.ProofTypeBatch:
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
|
||||
if stat := batchProof.VmProof.Stat; stat != nil {
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("batch", "exec"); g != nil && stat.ExecutionTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ExecutionTimeMills*1000))
|
||||
}
|
||||
if g, _ := m.proverSpeed.GetMetricWithLabelValues("batch", "proving"); g != nil && stat.ProvingTimeMills > 0 {
|
||||
g.Set(float64(stat.TotalCycle) / float64(stat.ProvingTimeMills*1000))
|
||||
}
|
||||
}
|
||||
case message.ProofTypeBundle:
|
||||
bundleProof := &message.OpenVMBundleProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
|
||||
@@ -9,8 +9,14 @@ import (
|
||||
)
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
|
||||
func NewVerifier(cfg *config.VerifierConfig, _ bool) (*Verifier, error) {
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
OpenVMVkMap: map[string]struct{}{"mock_vk": {}},
|
||||
ChunkVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
|
||||
BatchVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
|
||||
BundleVk: map[string][]byte{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
|
||||
@@ -11,4 +11,7 @@ const InvalidTestProof = "this is a invalid proof"
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
OpenVMVkMap map[string]struct{}
|
||||
ChunkVk map[string][]byte
|
||||
BatchVk map[string][]byte
|
||||
BundleVk map[string][]byte
|
||||
}
|
||||
|
||||
@@ -4,11 +4,14 @@ package verifier
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
@@ -16,34 +19,54 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/libzkp"
|
||||
"scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// This struct maps to `CircuitConfig` in libzkp/impl/src/verifier.rs
|
||||
// This struct maps to `CircuitConfig` in libzkp/src/verifier.rs
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.CircuitConfig` being changed
|
||||
type rustCircuitConfig struct {
|
||||
Version uint `json:"version"`
|
||||
ForkName string `json:"fork_name"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
Features string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
var validiumMode bool
|
||||
|
||||
func newRustCircuitConfig(cfg config.AssetConfig) *rustCircuitConfig {
|
||||
ver := cfg.Version
|
||||
if ver == 0 {
|
||||
var err error
|
||||
ver, err = utils.Version(cfg.ForkName, validiumMode)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return &rustCircuitConfig{
|
||||
ForkName: cfg.ForkName,
|
||||
Version: uint(ver),
|
||||
AssetsPath: cfg.AssetsPath,
|
||||
ForkName: cfg.ForkName,
|
||||
Features: cfg.Features,
|
||||
}
|
||||
}
|
||||
|
||||
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/impl/src/verifier.rs
|
||||
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/src/verifier.rs
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.VerifierConfig` being changed
|
||||
type rustVerifierConfig struct {
|
||||
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
|
||||
Circuits []*rustCircuitConfig `json:"circuits"`
|
||||
}
|
||||
|
||||
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
|
||||
return &rustVerifierConfig{
|
||||
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
|
||||
|
||||
out := &rustVerifierConfig{}
|
||||
|
||||
for _, cfg := range cfg.Verifiers {
|
||||
out.Circuits = append(out.Circuits, newRustCircuitConfig(cfg))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type rustVkDump struct {
|
||||
@@ -53,7 +76,8 @@ type rustVkDump struct {
|
||||
}
|
||||
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
func NewVerifier(cfg *config.VerifierConfig, useValidiumMode bool) (*Verifier, error) {
|
||||
validiumMode = useValidiumMode
|
||||
verifierConfig := newRustVerifierConfig(cfg)
|
||||
configBytes, err := json.Marshal(verifierConfig)
|
||||
if err != nil {
|
||||
@@ -65,10 +89,15 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
ChunkVk: make(map[string][]byte),
|
||||
BatchVk: make(map[string][]byte),
|
||||
BundleVk: make(map[string][]byte),
|
||||
}
|
||||
|
||||
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
|
||||
return nil, err
|
||||
for _, cfg := range cfg.Verifiers {
|
||||
if err := v.loadOpenVMVks(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
@@ -108,27 +137,42 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
|
||||
return libzkp.VerifyBundleProof(string(buf), forkName), nil
|
||||
}
|
||||
|
||||
func (v *Verifier) ReadVK(filePat string) (string, error) {
|
||||
/*
|
||||
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
|
||||
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
|
||||
*/
|
||||
const blocked_vks = `
|
||||
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
|
||||
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
|
||||
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
|
||||
`
|
||||
|
||||
f, err := os.Open(filepath.Clean(filePat))
|
||||
if err != nil {
|
||||
return "", err
|
||||
// tries to decode s as hex, and if that fails, as base64.
|
||||
func decodeVkString(s string) ([]byte, error) {
|
||||
// Try hex decoding first
|
||||
if b, err := hex.DecodeString(s); err == nil {
|
||||
return b, nil
|
||||
}
|
||||
byt, err := io.ReadAll(f)
|
||||
// Fallback to base64 decoding
|
||||
b, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
if len(b) == 0 {
|
||||
return nil, fmt.Errorf("decode vk string %s fail (empty bytes)", s)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (v *Verifier) loadOpenVMVks(forkName string) error {
|
||||
tempFile := path.Join(os.TempDir(), "openVmVk.json")
|
||||
err := libzkp.DumpVk(forkName, tempFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
|
||||
|
||||
f, err := os.Open(filepath.Clean(tempFile))
|
||||
vkFileName := cfg.Vkfile
|
||||
if vkFileName == "" {
|
||||
vkFileName = "openVmVk.json"
|
||||
}
|
||||
vkFile := path.Join(cfg.AssetsPath, vkFileName)
|
||||
|
||||
f, err := os.Open(filepath.Clean(vkFile))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,8 +185,36 @@ func (v *Verifier) loadOpenVMVks(forkName string) error {
|
||||
if err := json.Unmarshal(byt, &dump); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(blocked_vks, dump.Chunk) {
|
||||
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
|
||||
}
|
||||
if strings.Contains(blocked_vks, dump.Batch) {
|
||||
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
|
||||
}
|
||||
if strings.Contains(blocked_vks, dump.Bundle) {
|
||||
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
|
||||
}
|
||||
|
||||
v.OpenVMVkMap[dump.Chunk] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Batch] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Bundle] = struct{}{}
|
||||
log.Info("Load vks", "from", cfg.AssetsPath, "chunk", dump.Chunk, "batch", dump.Batch, "bundle", dump.Bundle)
|
||||
|
||||
decodedBytes, err := decodeVkString(dump.Chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.ChunkVk[cfg.ForkName] = decodedBytes
|
||||
decodedBytes, err = decodeVkString(dump.Batch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.BatchVk[cfg.ForkName] = decodedBytes
|
||||
decodedBytes, err = decodeVkString(dump.Bundle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.BundleVk[cfg.ForkName] = decodedBytes
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -29,11 +29,11 @@ func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
cfg := &config.VerifierConfig{
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
AssetsPath: *assetsPathHi,
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
MinProverVersion: "",
|
||||
Verifiers: []config.AssetConfig{{
|
||||
AssetsPath: *assetsPathHi,
|
||||
ForkName: "euclidV2",
|
||||
}},
|
||||
}
|
||||
|
||||
v, err := NewVerifier(cfg)
|
||||
|
||||
@@ -57,17 +57,17 @@ func (*ProverTask) TableName() string {
|
||||
}
|
||||
|
||||
// IsProverAssigned checks if a prover with the given public key has been assigned a task.
|
||||
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bool, error) {
|
||||
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
var task ProverTask
|
||||
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return false, nil
|
||||
return nil, nil
|
||||
}
|
||||
return false, err
|
||||
return nil, err
|
||||
}
|
||||
return true, nil
|
||||
return &task, nil
|
||||
}
|
||||
|
||||
// GetProverTasks get prover tasks
|
||||
@@ -269,6 +269,24 @@ func (o *ProverTask) UpdateProverTaskProvingStatusAndFailureType(ctx context.Con
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProverTaskAssignedTime updates the assigned_at time of a specific ProverTask record.
|
||||
func (o *ProverTask) UpdateProverTaskAssignedTime(ctx context.Context, uuid uuid.UUID, t time.Time, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("uuid = ?", uuid)
|
||||
|
||||
updates := make(map[string]interface{})
|
||||
updates["assigned_at"] = t
|
||||
if err := db.Updates(updates).Error; err != nil {
|
||||
return fmt.Errorf("ProverTask.UpdateProverTaskAssignedTime error: %w, uuid:%s, status: %v", err, uuid, t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProverTaskFailureType update the prover task failure type
|
||||
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, uuid uuid.UUID, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
|
||||
91
coordinator/internal/utils/codec_validium.go
Normal file
91
coordinator/internal/utils/codec_validium.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
type CodecVersion uint8
|
||||
|
||||
const (
|
||||
daBatchValidiumEncodedLength = 137
|
||||
)
|
||||
|
||||
type DABatch interface {
|
||||
SetHash(common.Hash)
|
||||
}
|
||||
|
||||
type daBatchValidiumV1 struct {
|
||||
Version CodecVersion `json:"version"`
|
||||
BatchIndex uint64 `json:"batch_index"`
|
||||
ParentBatchHash common.Hash `json:"parent_batch_hash"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithDrawRoot common.Hash `json:"withdraw_root"`
|
||||
Commitment common.Hash `json:"commitment"`
|
||||
}
|
||||
|
||||
type daBatchValidium struct {
|
||||
V1 *daBatchValidiumV1 `json:"V1,omitempty"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
}
|
||||
|
||||
func (da *daBatchValidium) SetHash(h common.Hash) {
|
||||
da.BatchHash = h
|
||||
}
|
||||
|
||||
func FromVersion(v uint8) CodecVersion {
|
||||
return CodecVersion(v & STFVersionMask)
|
||||
}
|
||||
|
||||
func (c CodecVersion) DABatchForTaskFromBytes(b []byte) (DABatch, error) {
|
||||
switch c {
|
||||
case 1:
|
||||
if v1, err := decodeDABatchV1(b); err == nil {
|
||||
return &daBatchValidium{
|
||||
V1: v1,
|
||||
}, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown codec type %d", c)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDABatchV1(data []byte) (*daBatchValidiumV1, error) {
|
||||
if len(data) != daBatchValidiumEncodedLength {
|
||||
return nil, fmt.Errorf("invalid data length for DABatchV7, expected %d bytes but got %d", daBatchValidiumEncodedLength, len(data))
|
||||
}
|
||||
|
||||
const (
|
||||
versionSize = 1
|
||||
indexSize = 8
|
||||
hashSize = 32
|
||||
)
|
||||
|
||||
// Offsets (same as encodeBatchHeaderValidium)
|
||||
versionOffset := 0
|
||||
indexOffset := versionOffset + versionSize
|
||||
parentHashOffset := indexOffset + indexSize
|
||||
stateRootOffset := parentHashOffset + hashSize
|
||||
withdrawRootOffset := stateRootOffset + hashSize
|
||||
commitmentOffset := withdrawRootOffset + hashSize
|
||||
|
||||
version := CodecVersion(data[versionOffset])
|
||||
batchIndex := binary.BigEndian.Uint64(data[indexOffset : indexOffset+indexSize])
|
||||
parentBatchHash := common.BytesToHash(data[parentHashOffset : parentHashOffset+hashSize])
|
||||
postStateRoot := common.BytesToHash(data[stateRootOffset : stateRootOffset+hashSize])
|
||||
withdrawRoot := common.BytesToHash(data[withdrawRootOffset : withdrawRootOffset+hashSize])
|
||||
commitment := common.BytesToHash(data[commitmentOffset : commitmentOffset+hashSize])
|
||||
|
||||
return &daBatchValidiumV1{
|
||||
Version: version,
|
||||
BatchIndex: batchIndex,
|
||||
ParentBatchHash: parentBatchHash,
|
||||
PostStateRoot: postStateRoot,
|
||||
WithDrawRoot: withdrawRoot,
|
||||
Commitment: commitment,
|
||||
}, nil
|
||||
}
|
||||
40
coordinator/internal/utils/version.go
Normal file
40
coordinator/internal/utils/version.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
DomainOffset = 6
|
||||
STFVersionMask = (1 << DomainOffset) - 1
|
||||
)
|
||||
|
||||
// version get the version for the chain instance
|
||||
//
|
||||
// TODO: This is not foolproof and does not cover all scenarios.
|
||||
func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
|
||||
|
||||
var domain, stfVersion uint8
|
||||
|
||||
if ValidiumMode {
|
||||
domain = 1
|
||||
stfVersion = 1
|
||||
} else {
|
||||
domain = 0
|
||||
switch canonicalName := strings.ToLower(hardForkName); canonicalName {
|
||||
case "euclidv1":
|
||||
stfVersion = 6
|
||||
case "euclidv2":
|
||||
stfVersion = 7
|
||||
case "feynman":
|
||||
stfVersion = 8
|
||||
case "galileo":
|
||||
stfVersion = 9
|
||||
default:
|
||||
return 0, errors.New("unknown fork name " + canonicalName)
|
||||
}
|
||||
}
|
||||
|
||||
return (domain << DomainOffset) + stfVersion, nil
|
||||
}
|
||||
@@ -79,16 +79,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
tokenTimeout = 60
|
||||
conf = &config.Config{
|
||||
L2: &config.L2{
|
||||
ChainID: 111,
|
||||
ChainID: 111,
|
||||
Endpoint: &config.L2Endpoint{},
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
MinProverVersion: "v4.4.89",
|
||||
Verifiers: []config.AssetConfig{{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
}},
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
@@ -131,7 +132,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.4.89"
|
||||
version.Version = "v4.5.45"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -583,7 +584,8 @@ func testTimeoutProof(t *testing.T) {
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
TotalGasUsed uint64 `json:"chunk_total_gas"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
|
||||
@@ -13,6 +13,7 @@ libzkp = { path = "../libzkp" }
|
||||
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
|
||||
sbv-primitives = { workspace = true, features = ["scroll"] }
|
||||
sbv-utils = { workspace = true, features = ["scroll"] }
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
|
||||
eyre.workspace = true
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ pub fn init(config: &str) -> eyre::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_client() -> rpc_client::RpcClient<'static> {
|
||||
pub fn get_client() -> impl libzkp::tasks::ChunkInterpreter {
|
||||
GLOBAL_L2GETH_CLI
|
||||
.get()
|
||||
.expect("must has been inited")
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use alloy::{
|
||||
providers::{Provider, ProviderBuilder, RootProvider},
|
||||
providers::{Provider, ProviderBuilder},
|
||||
rpc::client::ClientBuilder,
|
||||
transports::layers::RetryBackoffLayer,
|
||||
};
|
||||
use eyre::Result;
|
||||
use libzkp::tasks::ChunkInterpreter;
|
||||
use sbv_primitives::types::Network;
|
||||
use sbv_primitives::types::{consensus::TxL1Message, Network};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
fn default_max_retry() -> u32 {
|
||||
@@ -49,13 +49,13 @@ pub struct RpcConfig {
|
||||
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
|
||||
pub struct RpcClientCore {
|
||||
/// rpc prover
|
||||
provider: RootProvider<Network>,
|
||||
client: alloy::rpc::client::RpcClient,
|
||||
rt: tokio::runtime::Runtime,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct RpcClient<'a> {
|
||||
provider: &'a RootProvider<Network>,
|
||||
pub struct RpcClient<'a, T: Provider<Network>> {
|
||||
provider: T,
|
||||
handle: &'a tokio::runtime::Handle,
|
||||
}
|
||||
|
||||
@@ -75,76 +75,78 @@ impl RpcClientCore {
|
||||
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
|
||||
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
|
||||
|
||||
Ok(Self {
|
||||
provider: ProviderBuilder::<_, _, Network>::default().on_client(client),
|
||||
rt,
|
||||
})
|
||||
Ok(Self { client, rt })
|
||||
}
|
||||
|
||||
pub fn get_client(&self) -> RpcClient {
|
||||
pub fn get_client(&self) -> RpcClient<'_, impl Provider<Network>> {
|
||||
RpcClient {
|
||||
provider: &self.provider,
|
||||
provider: ProviderBuilder::<_, _, Network>::default()
|
||||
.connect_client(self.client.clone()),
|
||||
handle: self.rt.handle(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ChunkInterpreter for RpcClient<'_> {
|
||||
impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
|
||||
fn try_fetch_block_witness(
|
||||
&self,
|
||||
block_hash: sbv_primitives::B256,
|
||||
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
|
||||
) -> Result<sbv_primitives::types::BlockWitness> {
|
||||
prev_witness: Option<&sbv_core::BlockWitness>,
|
||||
) -> Result<sbv_core::BlockWitness> {
|
||||
async fn fetch_witness_async(
|
||||
provider: &RootProvider<Network>,
|
||||
provider: impl Provider<Network>,
|
||||
block_hash: sbv_primitives::B256,
|
||||
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
|
||||
) -> Result<sbv_primitives::types::BlockWitness> {
|
||||
use alloy::network::primitives::BlockTransactionsKind;
|
||||
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
|
||||
prev_witness: Option<&sbv_core::BlockWitness>,
|
||||
) -> Result<sbv_core::BlockWitness> {
|
||||
use sbv_utils::rpc::ProviderExt;
|
||||
|
||||
let chain_id = provider.get_chain_id().await?;
|
||||
|
||||
let block = provider
|
||||
.get_block_by_hash(block_hash, BlockTransactionsKind::Full)
|
||||
.await?
|
||||
.ok_or_else(|| eyre::eyre!("Block not found"))?;
|
||||
|
||||
let number = block.header.number;
|
||||
if number == 0 {
|
||||
eyre::bail!("no number in header or use block 0");
|
||||
}
|
||||
|
||||
let prev_state_root = if let Some(witness) = prev_witness {
|
||||
if witness.header.number != number - 1 {
|
||||
eyre::bail!(
|
||||
"the ref witness is not the previous block, expected {} get {}",
|
||||
number - 1,
|
||||
witness.header.number,
|
||||
);
|
||||
}
|
||||
witness.header.state_root
|
||||
let (chain_id, block_num, prev_state_root) = if let Some(w) = prev_witness {
|
||||
(w.chain_id, w.header.number + 1, w.header.state_root)
|
||||
} else {
|
||||
provider
|
||||
.scroll_disk_root((number - 1).into())
|
||||
let chain_id = provider.get_chain_id().await?;
|
||||
let block = provider
|
||||
.get_block_by_hash(block_hash)
|
||||
.full()
|
||||
.await?
|
||||
.disk_root
|
||||
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
|
||||
|
||||
let parent_block = provider
|
||||
.get_block_by_hash(block.header.parent_hash)
|
||||
.await?
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"parent block for block {} should exist",
|
||||
block.header.number
|
||||
)
|
||||
})?;
|
||||
|
||||
(
|
||||
chain_id,
|
||||
block.header.number,
|
||||
parent_block.header.state_root,
|
||||
)
|
||||
};
|
||||
|
||||
let witness = WitnessBuilder::new()
|
||||
.block(block)
|
||||
.chain_id(chain_id)
|
||||
.execution_witness(provider.debug_execution_witness(number.into()).await?)
|
||||
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
|
||||
.prev_state_root(prev_state_root)
|
||||
.build()?;
|
||||
let req = provider
|
||||
.dump_block_witness(block_num)
|
||||
.with_chain_id(chain_id)
|
||||
.with_prev_state_root(prev_state_root);
|
||||
|
||||
let witness = req
|
||||
.send()
|
||||
.await
|
||||
.transpose()
|
||||
.ok_or_else(|| eyre::eyre!("Block witness {block_num} not available"))??;
|
||||
|
||||
Ok(witness)
|
||||
}
|
||||
|
||||
tracing::debug!("fetch witness for {block_hash}");
|
||||
self.handle
|
||||
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
|
||||
self.handle.block_on(fetch_witness_async(
|
||||
&self.provider,
|
||||
block_hash,
|
||||
prev_witness,
|
||||
))
|
||||
}
|
||||
|
||||
fn try_fetch_storage_node(
|
||||
@@ -152,7 +154,7 @@ impl ChunkInterpreter for RpcClient<'_> {
|
||||
node_hash: sbv_primitives::B256,
|
||||
) -> Result<sbv_primitives::Bytes> {
|
||||
async fn fetch_storage_node_async(
|
||||
provider: &RootProvider<Network>,
|
||||
provider: impl Provider<Network>,
|
||||
node_hash: sbv_primitives::B256,
|
||||
) -> Result<sbv_primitives::Bytes> {
|
||||
let ret = provider
|
||||
@@ -164,7 +166,41 @@ impl ChunkInterpreter for RpcClient<'_> {
|
||||
|
||||
tracing::debug!("fetch storage node for {node_hash}");
|
||||
self.handle
|
||||
.block_on(fetch_storage_node_async(self.provider, node_hash))
|
||||
.block_on(fetch_storage_node_async(&self.provider, node_hash))
|
||||
}
|
||||
|
||||
fn try_fetch_l1_msgs(&self, block_number: u64) -> Result<Vec<TxL1Message>> {
|
||||
async fn fetch_l1_msgs(
|
||||
provider: impl Provider<Network>,
|
||||
block_number: u64,
|
||||
) -> Result<Vec<TxL1Message>> {
|
||||
let block_number_hex = format!("0x{:x}", block_number);
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[serde(untagged)]
|
||||
enum NullOrVec {
|
||||
Null, // matches JSON `null`
|
||||
Vec(Vec<TxL1Message>), // matches JSON array
|
||||
}
|
||||
|
||||
Ok(
|
||||
match provider
|
||||
.client()
|
||||
.request::<_, NullOrVec>(
|
||||
"scroll_getL1MessagesInBlock",
|
||||
(block_number_hex, "synced"),
|
||||
)
|
||||
.await?
|
||||
{
|
||||
NullOrVec::Null => Vec::new(),
|
||||
NullOrVec::Vec(r) => r,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
tracing::debug!("fetch L1 msgs for {block_number}");
|
||||
self.handle
|
||||
.block_on(fetch_l1_msgs(&self.provider, block_number))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,10 +226,10 @@ mod tests {
|
||||
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
|
||||
let client = client_core.get_client();
|
||||
|
||||
// latest - 1 block in 2025.6.15
|
||||
// latest - 1 block in 2025.9.11
|
||||
let block_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
|
||||
b"0x093fb6bf2e556a659b35428ac447cd9f0635382fc40ffad417b5910824f9e932",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -203,10 +239,10 @@ mod tests {
|
||||
.try_fetch_block_witness(block_hash, None)
|
||||
.expect("should success");
|
||||
|
||||
// latest block in 2025.6.15
|
||||
// block selected in 2025.9.11
|
||||
let block_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
|
||||
b"0x77cc84dd7a4dedf6fe5fb9b443aeb5a4fb0623ad088a365d3232b7b23fc848e5",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -219,23 +255,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
|
||||
fn test_try_fetch_storage_node() {
|
||||
fn test_try_fetch_l1_messages() {
|
||||
let config = create_config_from_env();
|
||||
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
|
||||
let client = client_core.get_client();
|
||||
|
||||
// the root node (state root) of the block in unittest above
|
||||
let node_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
let msgs = client.try_fetch_l1_msgs(32).expect("should success");
|
||||
|
||||
// This is expected to fail since we're using a dummy hash, but it tests the code path
|
||||
let node = client
|
||||
.try_fetch_storage_node(node_hash)
|
||||
.expect("should success");
|
||||
println!("{}", serde_json::to_string_pretty(&node).unwrap());
|
||||
println!("{}", serde_json::to_string_pretty(&msgs).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,17 @@ name = "libzkp"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["rlib", "cdylib"]
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-verifier-euclid.workspace = true
|
||||
scroll-zkvm-types = { workspace = true, features = ["scroll"] }
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
|
||||
sbv-primitives.workspace = true
|
||||
alloy-primitives.workspace = true #depress the effect of "native-keccak"
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-info", "scroll"]}
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
base64.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive.workspace = true
|
||||
@@ -17,7 +22,8 @@ tracing.workspace = true
|
||||
eyre.workspace = true
|
||||
|
||||
git-version = "0.3.5"
|
||||
bincode = { version = "2", features = ["serde"] }
|
||||
serde_stacker = "0.1"
|
||||
regex = "1.11"
|
||||
c-kzg = { version = "1.0", features = ["serde"] }
|
||||
c-kzg = { version = "2.0", features = ["serde"] }
|
||||
|
||||
|
||||
@@ -1,36 +1,121 @@
|
||||
pub mod proofs;
|
||||
pub mod tasks;
|
||||
pub use tasks::ProvingTaskExt;
|
||||
pub mod verifier;
|
||||
use verifier::HardForkName;
|
||||
pub use verifier::{TaskType, VerifierConfig};
|
||||
mod utils;
|
||||
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::util::vec_as_base64;
|
||||
use scroll_zkvm_types::{utils::vec_as_base64, version::Version};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::value::RawValue;
|
||||
use std::path::Path;
|
||||
use std::{collections::HashMap, path::Path, sync::OnceLock};
|
||||
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
|
||||
|
||||
pub(crate) fn witness_use_legacy_mode(fork_name: &str) -> eyre::Result<bool> {
|
||||
ADDITIONAL_FEATURES
|
||||
.get()
|
||||
.and_then(|features| features.get(fork_name))
|
||||
.map(|cfg| cfg.legacy_witness_encoding)
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"can not find features setting for unrecognized fork {}",
|
||||
fork_name
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct FeatureOptions {
|
||||
legacy_witness_encoding: bool,
|
||||
for_openvm_13_prover: bool,
|
||||
}
|
||||
|
||||
static ADDITIONAL_FEATURES: OnceLock<HashMap<HardForkName, FeatureOptions>> = OnceLock::new();
|
||||
|
||||
impl FeatureOptions {
|
||||
pub fn new(feats: &str) -> Self {
|
||||
let mut ret: Self = Default::default();
|
||||
|
||||
for feat_s in feats.split(':') {
|
||||
match feat_s.trim().to_lowercase().as_str() {
|
||||
"legacy_witness" => {
|
||||
tracing::info!("set witness encoding for legacy mode");
|
||||
ret.legacy_witness_encoding = true;
|
||||
}
|
||||
"openvm_13" => {
|
||||
tracing::info!("set prover should use openvm 13");
|
||||
ret.for_openvm_13_prover = true;
|
||||
}
|
||||
s => tracing::warn!("unrecognized dynamic feature: {s}"),
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn the coordinator's chunk task into a json string for formal chunk proving
|
||||
/// task (with full witnesses)
|
||||
pub fn checkout_chunk_task(
|
||||
task_json: &str,
|
||||
decryption_key: Option<&[u8]>,
|
||||
interpreter: impl ChunkInterpreter,
|
||||
) -> eyre::Result<String> {
|
||||
let chunk_task = serde_json::from_str::<tasks::ChunkTask>(task_json)?;
|
||||
let ret = serde_json::to_string(&tasks::ChunkProvingTask::try_from_with_interpret(
|
||||
chunk_task,
|
||||
interpreter,
|
||||
)?)?;
|
||||
Ok(ret)
|
||||
Ok(serde_json::to_string(
|
||||
&tasks::ChunkProvingTask::try_from_with_interpret(chunk_task, decryption_key, interpreter)?,
|
||||
)?)
|
||||
}
|
||||
|
||||
/// Convert the universal task json into compatible form for old prover
|
||||
pub fn univ_task_compatibility_fix(task_json: &str) -> eyre::Result<String> {
|
||||
use scroll_zkvm_types::proof::VmInternalStarkProof;
|
||||
|
||||
let task: tasks::ProvingTask = serde_json::from_str(task_json)?;
|
||||
let aggregated_proofs: Vec<VmInternalStarkProof> = task
|
||||
.aggregated_proofs
|
||||
.into_iter()
|
||||
.map(|proof| VmInternalStarkProof {
|
||||
proofs: proof.proofs,
|
||||
public_values: proof.public_values,
|
||||
})
|
||||
.collect();
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CompatibleProvingTask {
|
||||
/// seralized witness which should be written into stdin first
|
||||
pub serialized_witness: Vec<Vec<u8>>,
|
||||
/// aggregated proof carried by babybear fields, should be written into stdin
|
||||
/// followed `serialized_witness`
|
||||
pub aggregated_proofs: Vec<VmInternalStarkProof>,
|
||||
/// Fork name specify
|
||||
pub fork_name: String,
|
||||
/// The vk of app which is expcted to prove this task
|
||||
pub vk: Vec<u8>,
|
||||
/// An identifier assigned by coordinator, it should be kept identify for the
|
||||
/// same task (for example, using chunk, batch and bundle hashes)
|
||||
pub identifier: String,
|
||||
}
|
||||
|
||||
let compatible_u_task = CompatibleProvingTask {
|
||||
serialized_witness: task.serialized_witness,
|
||||
aggregated_proofs,
|
||||
fork_name: task.fork_name,
|
||||
vk: task.vk,
|
||||
identifier: task.identifier,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&compatible_u_task)?)
|
||||
}
|
||||
|
||||
/// Generate required staff for proving tasks
|
||||
/// return (pi_hash, metadata, task)
|
||||
pub fn gen_universal_task(
|
||||
task_type: i32,
|
||||
task_json: &str,
|
||||
fork_name: &str,
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
fork_name_str: &str,
|
||||
expected_vk: &[u8],
|
||||
) -> eyre::Result<(B256, String, String)> {
|
||||
use proofs::*;
|
||||
use tasks::*;
|
||||
@@ -44,30 +129,88 @@ pub fn gen_universal_task(
|
||||
Bundle(BundleProofMetadata),
|
||||
}
|
||||
|
||||
let (pi_hash, metadata, u_task) = match task_type {
|
||||
let (pi_hash, metadata, mut u_task) = match task_type {
|
||||
x if x == TaskType::Chunk as i32 => {
|
||||
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
|
||||
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
|
||||
// normailze fork name field in task
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
// always respect the fork_name_str (which has been normalized) being passed
|
||||
// if the fork_name wrapped in task is not match, consider it a malformed task
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
"given task version, expected fork={fork_name_str}, got={version_fork}",
|
||||
version_fork = version.fork.as_str()
|
||||
);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
gen_universal_chunk_task(task, fork_name.into(), interpreter)?;
|
||||
utils::panic_catch(move || gen_universal_chunk_task(task))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Batch as i32 => {
|
||||
let task = serde_json::from_str::<BatchProvingTask>(task_json)?;
|
||||
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name.into())?;
|
||||
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
"given task version, expected fork={fork_name_str}, got={version_fork}",
|
||||
version_fork = version.fork.as_str()
|
||||
);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_batch_task(task))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Batch(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Bundle as i32 => {
|
||||
let task = serde_json::from_str::<BundleProvingTask>(task_json)?;
|
||||
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name.into())?;
|
||||
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
"given task version, expected fork={fork_name_str}, got={version_fork}",
|
||||
version_fork = version.fork.as_str()
|
||||
);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_bundle_task(task))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
|
||||
}
|
||||
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
|
||||
};
|
||||
|
||||
u_task.vk = Vec::from(expected_vk);
|
||||
let fork_name = u_task.fork_name.clone();
|
||||
let mut u_task_ext = ProvingTaskExt::new(u_task);
|
||||
|
||||
// set additional settings from global features
|
||||
if let Some(cfg) = ADDITIONAL_FEATURES
|
||||
.get()
|
||||
.and_then(|features| features.get(&fork_name))
|
||||
{
|
||||
u_task_ext.use_openvm_13 = cfg.for_openvm_13_prover;
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"can not found features setting for unrecognized fork {}",
|
||||
fork_name
|
||||
);
|
||||
}
|
||||
|
||||
Ok((
|
||||
pi_hash,
|
||||
serde_json::to_string(&metadata)?,
|
||||
serde_json::to_string(&u_task)?,
|
||||
serde_json::to_string(&u_task_ext)?,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -98,7 +241,26 @@ pub fn gen_wrapped_proof(proof_json: &str, metadata: &str, vk: &[u8]) -> eyre::R
|
||||
/// init verifier
|
||||
pub fn verifier_init(config: &str) -> eyre::Result<()> {
|
||||
let cfg: VerifierConfig = serde_json::from_str(config)?;
|
||||
ADDITIONAL_FEATURES
|
||||
.set(HashMap::from_iter(cfg.circuits.iter().map(|config| {
|
||||
tracing::info!(
|
||||
"start setting features [{:?}] for fork {}",
|
||||
config.features,
|
||||
config.fork_name
|
||||
);
|
||||
(
|
||||
config.fork_name.to_lowercase(),
|
||||
config
|
||||
.features
|
||||
.as_ref()
|
||||
.map(|features| FeatureOptions::new(features.as_str()))
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
})))
|
||||
.map_err(|c| eyre::eyre!("Fail to init additional features: {c:?}"))?;
|
||||
|
||||
verifier::init(cfg);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -106,8 +268,7 @@ pub fn verifier_init(config: &str) -> eyre::Result<()> {
|
||||
pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyre::Result<bool> {
|
||||
let verifier = verifier::get_verifier(fork_name)?;
|
||||
|
||||
let ret = verifier.verify(task_type, proof)?;
|
||||
|
||||
let ret = verifier.lock().unwrap().verify(task_type, &proof)?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -115,7 +276,7 @@ pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyr
|
||||
pub fn dump_vk(fork_name: &str, file: &str) -> eyre::Result<()> {
|
||||
let verifier = verifier::get_verifier(fork_name)?;
|
||||
|
||||
verifier.dump_vk(Path::new(file));
|
||||
verifier.lock().unwrap().dump_vk(Path::new(file));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ use scroll_zkvm_types::{
|
||||
batch::BatchInfo,
|
||||
bundle::BundleInfo,
|
||||
chunk::ChunkInfo,
|
||||
proof::{EvmProof, OpenVmEvmProof, ProofEnum, RootProof},
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
types_agg::{AggregationInput, ProgramCommitment},
|
||||
util::vec_as_base64,
|
||||
proof::{EvmProof, OpenVmEvmProof, ProofEnum, StarkProof},
|
||||
public_inputs::MultiVersionPublicInputs,
|
||||
types_agg::AggregationInput,
|
||||
utils::{serialize_vk, vec_as_base64},
|
||||
version,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
@@ -40,7 +41,7 @@ pub struct WrappedProof<Metadata> {
|
||||
}
|
||||
|
||||
pub trait AsRootProof {
|
||||
fn as_root_proof(&self) -> &RootProof;
|
||||
fn as_root_proof(&self) -> &StarkProof;
|
||||
}
|
||||
|
||||
pub trait AsEvmProof {
|
||||
@@ -61,17 +62,17 @@ pub type BatchProof = WrappedProof<BatchProofMetadata>;
|
||||
pub type BundleProof = WrappedProof<BundleProofMetadata>;
|
||||
|
||||
impl AsRootProof for ChunkProof {
|
||||
fn as_root_proof(&self) -> &RootProof {
|
||||
fn as_root_proof(&self) -> &StarkProof {
|
||||
self.proof
|
||||
.as_root_proof()
|
||||
.as_stark_proof()
|
||||
.expect("batch proof use root proof")
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRootProof for BatchProof {
|
||||
fn as_root_proof(&self) -> &RootProof {
|
||||
fn as_root_proof(&self) -> &StarkProof {
|
||||
self.proof
|
||||
.as_root_proof()
|
||||
.as_stark_proof()
|
||||
.expect("batch proof use root proof")
|
||||
}
|
||||
}
|
||||
@@ -122,6 +123,8 @@ pub trait PersistableProof: Sized {
|
||||
pub struct ChunkProofMetadata {
|
||||
/// The chunk information describing the list of blocks contained within the chunk.
|
||||
pub chunk_info: ChunkInfo,
|
||||
/// Additional data for stat
|
||||
pub chunk_total_gas: u64,
|
||||
}
|
||||
|
||||
impl ProofMetadata for ChunkProofMetadata {
|
||||
@@ -137,8 +140,6 @@ impl ProofMetadata for ChunkProofMetadata {
|
||||
pub struct BatchProofMetadata {
|
||||
/// The batch information describing the list of chunks.
|
||||
pub batch_info: BatchInfo,
|
||||
/// The [`scroll_zkvm_types::batch::BatchHeader`]'s digest.
|
||||
pub batch_hash: B256,
|
||||
}
|
||||
|
||||
impl ProofMetadata for BatchProofMetadata {
|
||||
@@ -170,7 +171,7 @@ impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
|
||||
fn from(value: &WrappedProof<Metadata>) -> Self {
|
||||
Self {
|
||||
public_values: value.proof.public_values(),
|
||||
commitment: ProgramCommitment::deserialize(&value.vk),
|
||||
commitment: serialize_vk::deserialize(&value.vk),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -179,23 +180,24 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
|
||||
/// Sanity checks on the wrapped proof:
|
||||
///
|
||||
/// - pi_hash computed in host does in fact match pi_hash computed in guest
|
||||
pub fn sanity_check(&self, fork_name: ForkName) {
|
||||
pub fn pi_hash_check(&self, ver: version::Version) -> bool {
|
||||
let proof_pi = self.proof.public_values();
|
||||
|
||||
let expected_pi = self
|
||||
.metadata
|
||||
.pi_hash_info()
|
||||
.pi_hash_by_fork(fork_name)
|
||||
.pi_hash_by_version(ver)
|
||||
.0
|
||||
.as_ref()
|
||||
.iter()
|
||||
.map(|&v| v as u32)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert_eq!(
|
||||
expected_pi, proof_pi,
|
||||
"pi mismatch: expected={expected_pi:?}, found={proof_pi:?}"
|
||||
);
|
||||
let ret = expected_pi == proof_pi;
|
||||
if !ret {
|
||||
tracing::warn!("pi mismatch: expected={expected_pi:?}, found={proof_pi:?}");
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,11 +215,7 @@ impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
|
||||
mod tests {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{
|
||||
bundle::{BundleInfo, BundleInfoV1},
|
||||
proof::EvmProof,
|
||||
public_inputs::PublicInputs,
|
||||
};
|
||||
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -244,7 +242,7 @@ mod tests {
|
||||
fn test_dummy_proof() -> eyre::Result<()> {
|
||||
// 1. Metadata
|
||||
let metadata = {
|
||||
let bundle_info: BundleInfoV1 = BundleInfo {
|
||||
let bundle_info = BundleInfo {
|
||||
chain_id: 12345,
|
||||
num_batches: 12,
|
||||
prev_state_root: B256::repeat_byte(1),
|
||||
@@ -253,11 +251,11 @@ mod tests {
|
||||
batch_hash: B256::repeat_byte(4),
|
||||
withdraw_root: B256::repeat_byte(5),
|
||||
msg_queue_hash: B256::repeat_byte(6),
|
||||
}
|
||||
.into();
|
||||
let bundle_pi_hash = bundle_info.pi_hash();
|
||||
encryption_key: None,
|
||||
};
|
||||
let bundle_pi_hash = bundle_info.pi_hash_euclidv1();
|
||||
BundleProofMetadata {
|
||||
bundle_info: bundle_info.0,
|
||||
bundle_info,
|
||||
bundle_pi_hash,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -9,30 +9,67 @@ pub use chunk::{ChunkProvingTask, ChunkTask};
|
||||
pub use chunk_interpreter::ChunkInterpreter;
|
||||
pub use scroll_zkvm_types::task::ProvingTask;
|
||||
|
||||
use crate::proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
|
||||
use chunk_interpreter::{DummyInterpreter, TryFromWithInterpreter};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{
|
||||
chunk::ChunkInfo,
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
use crate::{
|
||||
proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
|
||||
utils::panic_catch,
|
||||
};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::public_inputs::{MultiVersionPublicInputs, Version};
|
||||
|
||||
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
|
||||
let config = bincode::config::standard();
|
||||
Ok(bincode::serde::encode_to_vec(task, config)?)
|
||||
}
|
||||
|
||||
fn check_aggregation_proofs<Metadata: MultiVersionPublicInputs>(
|
||||
metadata: &[Metadata],
|
||||
version: Version,
|
||||
) -> eyre::Result<()> {
|
||||
panic_catch(|| {
|
||||
for w in metadata.windows(2) {
|
||||
w[1].validate(&w[0], version);
|
||||
}
|
||||
})
|
||||
.map_err(|e| eyre::eyre!("Metadata validation failed: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub struct ProvingTaskExt {
|
||||
#[serde(flatten)]
|
||||
task: ProvingTask,
|
||||
#[serde(default)]
|
||||
pub use_openvm_13: bool,
|
||||
}
|
||||
|
||||
impl From<ProvingTaskExt> for ProvingTask {
|
||||
fn from(wrap_t: ProvingTaskExt) -> Self {
|
||||
wrap_t.task
|
||||
}
|
||||
}
|
||||
|
||||
impl ProvingTaskExt {
|
||||
pub fn new(task: ProvingTask) -> Self {
|
||||
Self {
|
||||
task,
|
||||
use_openvm_13: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate required staff for chunk proving
|
||||
pub fn gen_universal_chunk_task(
|
||||
mut task: ChunkProvingTask,
|
||||
fork_name: ForkName,
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
task: ChunkProvingTask,
|
||||
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
|
||||
let chunk_info = if let Some(interpreter) = interpreter {
|
||||
ChunkInfo::try_from_with_interpret(&mut task, interpreter)
|
||||
} else {
|
||||
ChunkInfo::try_from_with_interpret(&mut task, DummyInterpreter {})
|
||||
}?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
|
||||
let chunk_total_gas = task.stats().total_gas_used;
|
||||
let (proving_task, chunk_info, chunk_pi_hash) = task.into_proving_task_with_precheck()?;
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
ChunkProofMetadata { chunk_info },
|
||||
chunk_pi_hash,
|
||||
ChunkProofMetadata {
|
||||
chunk_info,
|
||||
chunk_total_gas,
|
||||
},
|
||||
proving_task,
|
||||
))
|
||||
}
|
||||
@@ -40,18 +77,11 @@ pub fn gen_universal_chunk_task(
|
||||
/// Generate required staff for batch proving
|
||||
pub fn gen_universal_batch_task(
|
||||
task: BatchProvingTask,
|
||||
fork_name: ForkName,
|
||||
) -> eyre::Result<(B256, BatchProofMetadata, ProvingTask)> {
|
||||
let batch_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = batch_info.pi_hash_by_fork(fork_name);
|
||||
|
||||
let (proving_task, batch_info, batch_pi_hash) = task.into_proving_task_with_precheck()?;
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
BatchProofMetadata {
|
||||
batch_info,
|
||||
batch_hash: expected_pi_hash,
|
||||
},
|
||||
batch_pi_hash,
|
||||
BatchProofMetadata { batch_info },
|
||||
proving_task,
|
||||
))
|
||||
}
|
||||
@@ -59,17 +89,13 @@ pub fn gen_universal_batch_task(
|
||||
/// Generate required staff for bundle proving
|
||||
pub fn gen_universal_bundle_task(
|
||||
task: BundleProvingTask,
|
||||
fork_name: ForkName,
|
||||
) -> eyre::Result<(B256, BundleProofMetadata, ProvingTask)> {
|
||||
let bundle_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = bundle_info.pi_hash_by_fork(fork_name);
|
||||
|
||||
let (proving_task, bundle_info, bundle_pi_hash) = task.into_proving_task_with_precheck()?;
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
bundle_pi_hash,
|
||||
BundleProofMetadata {
|
||||
bundle_info,
|
||||
bundle_pi_hash: expected_pi_hash,
|
||||
bundle_pi_hash,
|
||||
},
|
||||
proving_task,
|
||||
))
|
||||
|
||||
@@ -1,36 +1,58 @@
|
||||
use crate::proofs::ChunkProof;
|
||||
use c_kzg::Bytes48;
|
||||
use eyre::Result;
|
||||
use sbv_primitives::{B256, U256};
|
||||
use scroll_zkvm_types::{
|
||||
batch::{
|
||||
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, EnvelopeV6, EnvelopeV7,
|
||||
PointEvalWitness, ReferenceHeader, N_BLOB_BYTES,
|
||||
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
|
||||
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
|
||||
ReferenceHeader, N_BLOB_BYTES,
|
||||
},
|
||||
public_inputs::ForkName,
|
||||
chunk::ChunkInfo,
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
version::{Codec, Domain, STFVersion},
|
||||
};
|
||||
|
||||
use crate::proofs::ChunkProof;
|
||||
|
||||
mod utils;
|
||||
use utils::{base64, point_eval};
|
||||
|
||||
/// Define variable batch header type, since BatchHeaderV6 can not
|
||||
/// be decoded as V7 we can always has correct deserialization
|
||||
/// Notice: V6 header MUST be put above V7 since untagged enum
|
||||
/// try to decode each defination in order
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BatchHeaderV {
|
||||
V6(BatchHeaderV6),
|
||||
V7(BatchHeaderV7),
|
||||
pub struct BatchHeaderValidiumWithHash {
|
||||
#[serde(flatten)]
|
||||
header: BatchHeaderValidium,
|
||||
batch_hash: B256,
|
||||
}
|
||||
|
||||
impl From<BatchHeaderV> for ReferenceHeader {
|
||||
fn from(value: BatchHeaderV) -> Self {
|
||||
match value {
|
||||
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
|
||||
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
|
||||
/// Parse header types passed from golang side and adapt to the
|
||||
/// definition in zkvm-prover's types
|
||||
/// We distinguish the header type in golang side according to the codec
|
||||
/// version, i.e. v7 - v9 (current), and validium
|
||||
/// And adapt it to the corresponding header version used in zkvm-prover's witness
|
||||
/// definition, i.e. v7- v8 (current), and validium
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
#[serde(untagged)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum BatchHeaderV {
|
||||
/// Header for validium mode.
|
||||
Validium(BatchHeaderValidiumWithHash),
|
||||
/// Header for scroll's STF version v6.
|
||||
V6(BatchHeaderV6),
|
||||
/// Header for scroll's STF versions v7, v8, v9.
|
||||
///
|
||||
/// Since the codec essentially is unchanged for the above STF versions, we do not define new
|
||||
/// variants, instead re-using the [`BatchHeaderV7`] variant.
|
||||
V7_V8_V9(BatchHeaderV7),
|
||||
}
|
||||
|
||||
impl core::fmt::Display for BatchHeaderV {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
BatchHeaderV::V6(_) => write!(f, "V6"),
|
||||
BatchHeaderV::V7_V8_V9(_) => write!(f, "V7_V8_V9"),
|
||||
BatchHeaderV::Validium(_) => write!(f, "Validium"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -39,21 +61,29 @@ impl BatchHeaderV {
|
||||
pub fn batch_hash(&self) -> B256 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7_V8_V9(h) => h.batch_hash(),
|
||||
BatchHeaderV::Validium(h) => h.header.batch_hash(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h,
|
||||
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
|
||||
_ => unreachable!("A header of {} is considered to be v6", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
|
||||
pub fn must_v7_v8_v9_header(&self) -> &BatchHeaderV7 {
|
||||
match self {
|
||||
BatchHeaderV::V7(h) => h,
|
||||
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
|
||||
BatchHeaderV::V7_V8_V9(h) => h,
|
||||
_ => unreachable!("A header of {} is considered to be in [v7, v8, v9]", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_validium_header(&self) -> &BatchHeaderValidium {
|
||||
match self {
|
||||
BatchHeaderV::Validium(h) => &h.header,
|
||||
_ => unreachable!("A header of {} is considered to be validium", self),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -62,6 +92,8 @@ impl BatchHeaderV {
|
||||
/// is compatible with both pre-euclidv2 and euclidv2
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub struct BatchProvingTask {
|
||||
/// The version of the chunks in the batch, as per [`Version`].
|
||||
pub version: u8,
|
||||
/// Chunk proofs for the contiguous list of chunks within the batch.
|
||||
pub chunk_proofs: Vec<ChunkProof>,
|
||||
/// The [`BatchHeaderV6/V7`], as computed on-chain for this batch.
|
||||
@@ -80,174 +112,251 @@ pub struct BatchProvingTask {
|
||||
pub fork_name: String,
|
||||
}
|
||||
|
||||
impl TryFrom<BatchProvingTask> for ProvingTask {
|
||||
type Error = eyre::Error;
|
||||
impl BatchProvingTask {
|
||||
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BatchInfo, B256)> {
|
||||
let (witness, metadata, batch_pi_hash) = self.precheck()?;
|
||||
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
|
||||
let legacy_witness = LegacyBatchWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
fn try_from(value: BatchProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.batch_header.batch_hash().to_string(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: value
|
||||
let proving_task = ProvingTask {
|
||||
identifier: self.batch_header.batch_hash().to_string(),
|
||||
fork_name: self.fork_name,
|
||||
aggregated_proofs: self
|
||||
.chunk_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.into_vec()],
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
};
|
||||
|
||||
Ok((proving_task, metadata, batch_pi_hash))
|
||||
}
|
||||
}
|
||||
|
||||
impl BatchProvingTask {
|
||||
fn build_guest_input(&self) -> BatchWitness {
|
||||
let fork_name = self.fork_name.to_lowercase().as_str().into();
|
||||
fn build_guest_input(&self, version: Version) -> BatchWitness {
|
||||
tracing::info!(
|
||||
"Handling batch task for input, version byte {}, Version data: {:?}",
|
||||
self.version,
|
||||
version
|
||||
);
|
||||
// sanity check for if result of header type parsing match to version
|
||||
match &self.batch_header {
|
||||
BatchHeaderV::Validium(_) => assert!(
|
||||
version.is_validium(),
|
||||
"version {:?} is not match with parsed header, get validium header but version is not validium", version,
|
||||
),
|
||||
BatchHeaderV::V6(_) => assert_eq!(version.fork, ForkName::EuclidV1,
|
||||
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
),
|
||||
BatchHeaderV::V7_V8_V9(_) => assert!(
|
||||
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo),
|
||||
"hardfork mismatch for da-codec@v7/8/9 header: found={}, expected={:?}",
|
||||
version.fork,
|
||||
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo],
|
||||
),
|
||||
}
|
||||
|
||||
// calculate point eval needed and compare with task input
|
||||
let (kzg_commitment, kzg_proof, challenge_digest) = {
|
||||
let blob = point_eval::to_blob(&self.blob_bytes);
|
||||
let commitment = point_eval::blob_to_kzg_commitment(&blob);
|
||||
let versioned_hash = point_eval::get_versioned_hash(&commitment);
|
||||
let challenge_digest = match &self.batch_header {
|
||||
BatchHeaderV::V6(_) => {
|
||||
assert_eq!(
|
||||
fork_name,
|
||||
ForkName::EuclidV1,
|
||||
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
|
||||
ForkName::EuclidV1,
|
||||
);
|
||||
EnvelopeV6::from(self.blob_bytes.as_slice()).challenge_digest(versioned_hash)
|
||||
}
|
||||
BatchHeaderV::V7(_) => {
|
||||
assert_eq!(
|
||||
fork_name,
|
||||
ForkName::EuclidV2,
|
||||
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
|
||||
ForkName::EuclidV2,
|
||||
);
|
||||
let padded_blob_bytes = {
|
||||
let mut padded_blob_bytes = self.blob_bytes.to_vec();
|
||||
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
|
||||
padded_blob_bytes
|
||||
};
|
||||
EnvelopeV7::from(padded_blob_bytes.as_slice()).challenge_digest(versioned_hash)
|
||||
}
|
||||
let point_eval_witness = if !version.is_validium() {
|
||||
// sanity check: calculate point eval needed and compare with task input
|
||||
let (kzg_commitment, kzg_proof, challenge_digest) = {
|
||||
let blob = point_eval::to_blob(&self.blob_bytes);
|
||||
let commitment = point_eval::blob_to_kzg_commitment(&blob);
|
||||
let versioned_hash = point_eval::get_versioned_hash(&commitment);
|
||||
|
||||
let padded_blob_bytes = {
|
||||
let mut padded_blob_bytes = self.blob_bytes.to_vec();
|
||||
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
|
||||
padded_blob_bytes
|
||||
};
|
||||
let challenge_digest = match version.codec {
|
||||
Codec::V6 => {
|
||||
// notice v6 do not use padded blob bytes
|
||||
<EnvelopeV6 as Envelope>::from_slice(self.blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
Codec::V7 => <EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash),
|
||||
};
|
||||
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
|
||||
|
||||
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
|
||||
};
|
||||
|
||||
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
|
||||
if let Some(k) = self.kzg_commitment {
|
||||
assert_eq!(k, kzg_commitment);
|
||||
}
|
||||
|
||||
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
|
||||
if let Some(c) = self.challenge_digest {
|
||||
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
|
||||
}
|
||||
|
||||
if let Some(p) = self.kzg_proof {
|
||||
assert_eq!(p, kzg_proof);
|
||||
}
|
||||
|
||||
Some(build_point_eval_witness(
|
||||
kzg_commitment.into_inner(),
|
||||
kzg_proof.into_inner(),
|
||||
))
|
||||
} else {
|
||||
assert!(self.kzg_proof.is_none(), "domain=validium has no blob-da");
|
||||
assert!(
|
||||
self.kzg_commitment.is_none(),
|
||||
"domain=validium has no blob-da"
|
||||
);
|
||||
assert!(
|
||||
self.challenge_digest.is_none(),
|
||||
"domain=validium has no blob-da"
|
||||
);
|
||||
|
||||
match &self.batch_header {
|
||||
BatchHeaderV::Validium(h) => assert_eq!(
|
||||
h.header.batch_hash(),
|
||||
h.batch_hash,
|
||||
"calculated batch hash match which from coordinator"
|
||||
),
|
||||
_ => panic!("unexpected header type"),
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(k) = self.kzg_commitment {
|
||||
assert_eq!(k, kzg_commitment);
|
||||
}
|
||||
|
||||
if let Some(c) = self.challenge_digest {
|
||||
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
|
||||
}
|
||||
|
||||
if let Some(p) = self.kzg_proof {
|
||||
assert_eq!(p, kzg_proof);
|
||||
}
|
||||
|
||||
let point_eval_witness = PointEvalWitness {
|
||||
kzg_commitment: kzg_commitment.into_inner(),
|
||||
kzg_proof: kzg_proof.into_inner(),
|
||||
let reference_header = match (version.domain, version.stf_version) {
|
||||
(Domain::Scroll, STFVersion::V6) => {
|
||||
ReferenceHeader::V6(*self.batch_header.must_v6_header())
|
||||
}
|
||||
// The da-codec for STF versions v7, v8, v9 is identical. In zkvm-prover we do not
|
||||
// create additional variants to indicate the identical behaviour of codec. Instead we
|
||||
// add a separate variant for the STF version.
|
||||
//
|
||||
// We handle the different STF versions here however build the same batch header since
|
||||
// that type does not change. The batch header's version byte constructed in the
|
||||
// coordinator actually defines the STF version (v7, v8 or v9) and we can derive the
|
||||
// hard-fork (feynman or galileo) and the codec from the version byte.
|
||||
//
|
||||
// Refer [`scroll_zkvm_types::public_inputs::Version`].
|
||||
(Domain::Scroll, STFVersion::V7 | STFVersion::V8 | STFVersion::V9) => {
|
||||
ReferenceHeader::V7_V8_V9(*self.batch_header.must_v7_v8_v9_header())
|
||||
}
|
||||
(Domain::Validium, STFVersion::V1) => {
|
||||
ReferenceHeader::Validium(*self.batch_header.must_validium_header())
|
||||
}
|
||||
(domain, stf_version) => {
|
||||
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")
|
||||
}
|
||||
};
|
||||
|
||||
let reference_header = self.batch_header.clone().into();
|
||||
// patch: ensure block_hash field is ZERO for scroll domain
|
||||
let chunk_infos = self
|
||||
.chunk_proofs
|
||||
.iter()
|
||||
.map(|p| {
|
||||
if version.domain == Domain::Scroll {
|
||||
ChunkInfo {
|
||||
prev_blockhash: B256::ZERO,
|
||||
post_blockhash: B256::ZERO,
|
||||
..p.metadata.chunk_info.clone()
|
||||
}
|
||||
} else {
|
||||
p.metadata.chunk_info.clone()
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
BatchWitness {
|
||||
fork_name,
|
||||
version: version.as_version_byte(),
|
||||
fork_name: version.fork,
|
||||
chunk_proofs: self.chunk_proofs.iter().map(|proof| proof.into()).collect(),
|
||||
chunk_infos: self
|
||||
.chunk_proofs
|
||||
.iter()
|
||||
.map(|p| p.metadata.chunk_info.clone())
|
||||
.collect(),
|
||||
chunk_infos,
|
||||
blob_bytes: self.blob_bytes.clone(),
|
||||
reference_header,
|
||||
point_eval_witness,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
|
||||
let fork_name = ForkName::from(self.fork_name.as_str());
|
||||
let (parent_state_root, state_root, chain_id, withdraw_root) = (
|
||||
self.chunk_proofs
|
||||
.first()
|
||||
.expect("at least one chunk in batch")
|
||||
.metadata
|
||||
.chunk_info
|
||||
.prev_state_root,
|
||||
self.chunk_proofs
|
||||
.last()
|
||||
.expect("at least one chunk in batch")
|
||||
.metadata
|
||||
.chunk_info
|
||||
.post_state_root,
|
||||
self.chunk_proofs
|
||||
.last()
|
||||
.expect("at least one chunk in batch")
|
||||
.metadata
|
||||
.chunk_info
|
||||
.chain_id,
|
||||
self.chunk_proofs
|
||||
.last()
|
||||
.expect("at least one chunk in batch")
|
||||
.metadata
|
||||
.chunk_info
|
||||
.withdraw_root,
|
||||
);
|
||||
let (parent_batch_hash, prev_msg_queue_hash, post_msg_queue_hash) = match self.batch_header
|
||||
{
|
||||
BatchHeaderV::V6(h) => {
|
||||
assert_eq!(
|
||||
fork_name,
|
||||
ForkName::EuclidV1,
|
||||
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
|
||||
ForkName::EuclidV1,
|
||||
);
|
||||
(h.parent_batch_hash, Default::default(), Default::default())
|
||||
}
|
||||
BatchHeaderV::V7(h) => {
|
||||
assert_eq!(
|
||||
fork_name,
|
||||
ForkName::EuclidV2,
|
||||
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
|
||||
ForkName::EuclidV2,
|
||||
);
|
||||
(
|
||||
h.parent_batch_hash,
|
||||
self.chunk_proofs
|
||||
.first()
|
||||
.expect("at least one chunk in batch")
|
||||
.metadata
|
||||
.chunk_info
|
||||
.prev_msg_queue_hash,
|
||||
self.chunk_proofs
|
||||
.last()
|
||||
.expect("at least one chunk in batch")
|
||||
.metadata
|
||||
.chunk_info
|
||||
.post_msg_queue_hash,
|
||||
)
|
||||
}
|
||||
};
|
||||
pub fn precheck(&self) -> Result<(BatchWitness, BatchInfo, B256)> {
|
||||
// for every aggregation task, there are two steps needed to build the metadata:
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let version = Version::from(self.version);
|
||||
let witness = self.build_guest_input(version);
|
||||
let metadata = BatchInfo::from(&witness);
|
||||
super::check_aggregation_proofs(
|
||||
witness.chunk_infos.as_slice(),
|
||||
Version::from(self.version),
|
||||
)?;
|
||||
let pi_hash = metadata.pi_hash_by_version(version);
|
||||
|
||||
let batch_hash = self.batch_header.batch_hash();
|
||||
|
||||
Ok(BatchInfo {
|
||||
parent_state_root,
|
||||
parent_batch_hash,
|
||||
state_root,
|
||||
batch_hash,
|
||||
chain_id,
|
||||
withdraw_root,
|
||||
prev_msg_queue_hash,
|
||||
post_msg_queue_hash,
|
||||
})
|
||||
Ok((witness, metadata, pi_hash))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserde_batch_header_v_validium() {
|
||||
use std::str::FromStr;
|
||||
|
||||
// Top-level JSON: flattened enum tag "V1" + batch_hash
|
||||
let json = r#"{
|
||||
"V1": {
|
||||
"version": 1,
|
||||
"batch_index": 42,
|
||||
"parent_batch_hash": "0x1111111111111111111111111111111111111111111111111111111111111111",
|
||||
"post_state_root": "0x2222222222222222222222222222222222222222222222222222222222222222",
|
||||
"withdraw_root": "0x3333333333333333333333333333333333333333333333333333333333333333",
|
||||
"commitment": "0x4444444444444444444444444444444444444444444444444444444444444444"
|
||||
},
|
||||
"batch_hash": "0x5555555555555555555555555555555555555555555555555555555555555555"
|
||||
}"#;
|
||||
|
||||
let parsed: BatchHeaderV = serde_json::from_str(json).expect("deserialize BatchHeaderV");
|
||||
|
||||
match parsed {
|
||||
BatchHeaderV::Validium(v) => {
|
||||
// Check the batch_hash field
|
||||
let expected_batch_hash = B256::from_str(
|
||||
"0x5555555555555555555555555555555555555555555555555555555555555555",
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(v.batch_hash, expected_batch_hash);
|
||||
|
||||
// Check the inner header variant and fields
|
||||
match v.header {
|
||||
BatchHeaderValidium::V1(h) => {
|
||||
assert_eq!(h.version, 1);
|
||||
assert_eq!(h.batch_index, 42);
|
||||
|
||||
let p = B256::from_str(
|
||||
"0x1111111111111111111111111111111111111111111111111111111111111111",
|
||||
)
|
||||
.unwrap();
|
||||
let s = B256::from_str(
|
||||
"0x2222222222222222222222222222222222222222222222222222222222222222",
|
||||
)
|
||||
.unwrap();
|
||||
let w = B256::from_str(
|
||||
"0x3333333333333333333333333333333333333333333333333333333333333333",
|
||||
)
|
||||
.unwrap();
|
||||
let c = B256::from_str(
|
||||
"0x4444444444444444444444444444444444444444444444444444444444444444",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(h.parent_batch_hash, p);
|
||||
assert_eq!(h.post_state_root, s);
|
||||
assert_eq!(h.withdraw_root, w);
|
||||
assert_eq!(h.commitment, c);
|
||||
|
||||
// Sanity: computed batch hash equals the provided one (if method available)
|
||||
// assert_eq!(v.header.batch_hash(), expected_batch_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => panic!("expected validium header variant"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ pub mod base64 {
|
||||
pub mod point_eval {
|
||||
use c_kzg;
|
||||
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
|
||||
use scroll_zkvm_types::util::sha256_rv32;
|
||||
use scroll_zkvm_types::utils::sha256_rv32;
|
||||
|
||||
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.
|
||||
///
|
||||
@@ -42,7 +42,8 @@ pub mod point_eval {
|
||||
|
||||
/// Get the KZG commitment from an EIP-4844 blob.
|
||||
pub fn blob_to_kzg_commitment(blob: &c_kzg::Blob) -> c_kzg::KzgCommitment {
|
||||
c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, c_kzg::ethereum_kzg_settings())
|
||||
c_kzg::ethereum_kzg_settings(0)
|
||||
.blob_to_kzg_commitment(blob)
|
||||
.expect("blob to kzg commitment should succeed")
|
||||
}
|
||||
|
||||
@@ -65,12 +66,9 @@ pub mod point_eval {
|
||||
pub fn get_kzg_proof(blob: &c_kzg::Blob, challenge: H256) -> (c_kzg::KzgProof, U256) {
|
||||
let challenge = get_x_from_challenge(challenge);
|
||||
|
||||
let (proof, y) = c_kzg::KzgProof::compute_kzg_proof(
|
||||
blob,
|
||||
&c_kzg::Bytes32::new(challenge.to_be_bytes()),
|
||||
c_kzg::ethereum_kzg_settings(),
|
||||
)
|
||||
.expect("kzg proof should succeed");
|
||||
let (proof, y) = c_kzg::ethereum_kzg_settings(0)
|
||||
.compute_kzg_proof(blob, &c_kzg::Bytes32::new(challenge.to_be_bytes()))
|
||||
.expect("kzg proof should succeed");
|
||||
|
||||
(proof, U256::from_be_slice(y.as_slice()))
|
||||
}
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
use crate::proofs::BatchProof;
|
||||
use eyre::Result;
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{
|
||||
bundle::{BundleInfo, BundleWitness},
|
||||
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
|
||||
public_inputs::{MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
|
||||
use crate::proofs::BatchProof;
|
||||
|
||||
/// Message indicating a sanity check failure.
|
||||
const BUNDLE_SANITY_MSG: &str = "bundle must have at least one batch";
|
||||
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
pub struct BundleProvingTask {
|
||||
/// The version of batches in the bundle.
|
||||
pub version: u8,
|
||||
/// The STARK proofs of each batch in the bundle.
|
||||
pub batch_proofs: Vec<BatchProof>,
|
||||
/// for sanity check
|
||||
pub bundle_info: Option<BundleInfo>,
|
||||
@@ -19,6 +25,30 @@ pub struct BundleProvingTask {
|
||||
}
|
||||
|
||||
impl BundleProvingTask {
|
||||
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BundleInfo, B256)> {
|
||||
let (witness, bundle_info, bundle_pi_hash) = self.precheck()?;
|
||||
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
|
||||
let legacy = LegacyBundleWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
let proving_task = ProvingTask {
|
||||
identifier: self.identifier(),
|
||||
fork_name: self.fork_name,
|
||||
aggregated_proofs: self
|
||||
.batch_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
};
|
||||
|
||||
Ok((proving_task, bundle_info, bundle_pi_hash))
|
||||
}
|
||||
|
||||
fn identifier(&self) -> String {
|
||||
assert!(!self.batch_proofs.is_empty(), "{BUNDLE_SANITY_MSG}",);
|
||||
|
||||
@@ -27,99 +57,45 @@ impl BundleProvingTask {
|
||||
.first()
|
||||
.expect(BUNDLE_SANITY_MSG)
|
||||
.metadata
|
||||
.batch_info
|
||||
.batch_hash,
|
||||
self.batch_proofs
|
||||
.last()
|
||||
.expect(BUNDLE_SANITY_MSG)
|
||||
.metadata
|
||||
.batch_info
|
||||
.batch_hash,
|
||||
);
|
||||
|
||||
format!("{first}-{last}")
|
||||
}
|
||||
|
||||
fn build_guest_input(&self) -> BundleWitness {
|
||||
fn build_guest_input(&self, version: Version) -> BundleWitness {
|
||||
BundleWitness {
|
||||
version: version.as_version_byte(),
|
||||
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
|
||||
batch_infos: self
|
||||
.batch_proofs
|
||||
.iter()
|
||||
.map(|wrapped_proof| wrapped_proof.metadata.batch_info.clone())
|
||||
.collect(),
|
||||
fork_name: version.fork,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
|
||||
use eyre::eyre;
|
||||
let err_prefix = format!("metadata_with_prechecks for task_id={}", self.identifier());
|
||||
fn precheck(&self) -> Result<(BundleWitness, BundleInfo, B256)> {
|
||||
// for every aggregation task, there are two steps needed to build the metadata:
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let version = Version::from(self.version);
|
||||
let witness = self.build_guest_input(version);
|
||||
let metadata = BundleInfo::from(&witness);
|
||||
super::check_aggregation_proofs(
|
||||
witness.batch_infos.as_slice(),
|
||||
Version::from(self.version),
|
||||
)?;
|
||||
let pi_hash = metadata.pi_hash_by_version(version);
|
||||
|
||||
for w in self.batch_proofs.windows(2) {
|
||||
if w[1].metadata.batch_info.chain_id != w[0].metadata.batch_info.chain_id {
|
||||
return Err(eyre!("{err_prefix}: chain_id mismatch"));
|
||||
}
|
||||
|
||||
if w[1].metadata.batch_info.parent_state_root != w[0].metadata.batch_info.state_root {
|
||||
return Err(eyre!("{err_prefix}: state_root not chained"));
|
||||
}
|
||||
|
||||
if w[1].metadata.batch_info.parent_batch_hash != w[0].metadata.batch_info.batch_hash {
|
||||
return Err(eyre!("{err_prefix}: batch_hash not chained"));
|
||||
}
|
||||
}
|
||||
|
||||
let (first_batch, last_batch) = (
|
||||
&self
|
||||
.batch_proofs
|
||||
.first()
|
||||
.expect("at least one batch in bundle")
|
||||
.metadata
|
||||
.batch_info,
|
||||
&self
|
||||
.batch_proofs
|
||||
.last()
|
||||
.expect("at least one batch in bundle")
|
||||
.metadata
|
||||
.batch_info,
|
||||
);
|
||||
|
||||
let chain_id = first_batch.chain_id;
|
||||
let num_batches = u32::try_from(self.batch_proofs.len()).expect("num_batches: u32");
|
||||
let prev_state_root = first_batch.parent_state_root;
|
||||
let prev_batch_hash = first_batch.parent_batch_hash;
|
||||
let post_state_root = last_batch.state_root;
|
||||
let batch_hash = last_batch.batch_hash;
|
||||
let withdraw_root = last_batch.withdraw_root;
|
||||
let msg_queue_hash = last_batch.post_msg_queue_hash;
|
||||
|
||||
Ok(BundleInfo {
|
||||
chain_id,
|
||||
msg_queue_hash,
|
||||
num_batches,
|
||||
prev_state_root,
|
||||
prev_batch_hash,
|
||||
post_state_root,
|
||||
batch_hash,
|
||||
withdraw_root,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<BundleProvingTask> for ProvingTask {
|
||||
type Error = eyre::Error;
|
||||
|
||||
fn try_from(value: BundleProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: value
|
||||
.batch_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
Ok((witness, metadata, pi_hash))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,26 @@
|
||||
use super::chunk_interpreter::*;
|
||||
use eyre::Result;
|
||||
use sbv_primitives::{types::BlockWitness, B256};
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{types::consensus::BlockHeader, B256};
|
||||
use scroll_zkvm_types::{
|
||||
chunk::{execute, ChunkInfo, ChunkWitness},
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
|
||||
public_inputs::{MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
|
||||
use super::chunk_interpreter::*;
|
||||
|
||||
/// The type aligned with coordinator's defination
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
pub struct ChunkTask {
|
||||
/// The version for the chunk, as per [`Version`].
|
||||
pub version: u8,
|
||||
/// block hashes for a series of block
|
||||
pub block_hashes: Vec<B256>,
|
||||
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The on-chain L1 msg queue hash after applying L1 msg txs from the chunk (for validate)
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// Fork name specify
|
||||
pub fork_name: String,
|
||||
}
|
||||
@@ -21,6 +28,7 @@ pub struct ChunkTask {
|
||||
impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
|
||||
fn try_from_with_interpret(
|
||||
value: ChunkTask,
|
||||
decryption_key: Option<&[u8]>,
|
||||
interpreter: impl ChunkInterpreter,
|
||||
) -> Result<Self> {
|
||||
let mut block_witnesses = Vec::new();
|
||||
@@ -30,10 +38,28 @@ impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
|
||||
block_witnesses.push(witness);
|
||||
}
|
||||
|
||||
let validium_txs = if Version::from(value.version).is_validium() {
|
||||
let mut validium_txs = Vec::new();
|
||||
for block_number in block_witnesses.iter().map(|w| w.header.number()) {
|
||||
validium_txs.push(interpreter.try_fetch_l1_msgs(block_number)?);
|
||||
}
|
||||
validium_txs
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
let validium_inputs = decryption_key.map(|secret_key| ValidiumInputs {
|
||||
validium_txs,
|
||||
secret_key: secret_key.into(),
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
version: value.version,
|
||||
block_witnesses,
|
||||
prev_msg_queue_hash: value.prev_msg_queue_hash,
|
||||
post_msg_queue_hash: value.post_msg_queue_hash,
|
||||
fork_name: value.fork_name,
|
||||
validium_inputs,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -47,12 +73,18 @@ const CHUNK_SANITY_MSG: &str = "chunk must have at least one block";
|
||||
/// - {first_block_number}-{last_block_number}
|
||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct ChunkProvingTask {
|
||||
/// The version for the chunk, as per [Version][scroll_zkvm_types::version::Version].
|
||||
pub version: u8,
|
||||
/// Witnesses for every block in the chunk.
|
||||
pub block_witnesses: Vec<BlockWitness>,
|
||||
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
|
||||
pub prev_msg_queue_hash: B256,
|
||||
/// The on-chain L1 msg queue hash after applying L1 msg txs from the chunk (for validate)
|
||||
pub post_msg_queue_hash: B256,
|
||||
/// Fork name specify
|
||||
pub fork_name: String,
|
||||
/// Optional inputs in case of domain=validium.
|
||||
pub validium_inputs: Option<ValidiumInputs>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -62,29 +94,13 @@ pub struct ChunkDetails {
|
||||
pub total_gas_used: u64,
|
||||
}
|
||||
|
||||
impl TryFrom<ChunkProvingTask> for ProvingTask {
|
||||
type Error = eyre::Error;
|
||||
|
||||
fn try_from(value: ChunkProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: Vec::new(),
|
||||
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ChunkProvingTask {
|
||||
pub fn stats(&self) -> ChunkDetails {
|
||||
let num_blocks = self.block_witnesses.len();
|
||||
let num_txs = self
|
||||
.block_witnesses
|
||||
.iter()
|
||||
.map(|b| b.transaction.len())
|
||||
.map(|b| b.transactions.len())
|
||||
.sum::<usize>();
|
||||
let total_gas_used = self
|
||||
.block_witnesses
|
||||
@@ -99,6 +115,26 @@ impl ChunkProvingTask {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, ChunkInfo, B256)> {
|
||||
let (witness, chunk_info, chunk_pi_hash) = self.precheck()?;
|
||||
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
|
||||
let legacy_witness = LegacyChunkWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
let proving_task = ProvingTask {
|
||||
identifier: self.identifier(),
|
||||
fork_name: self.fork_name,
|
||||
aggregated_proofs: Vec::new(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
};
|
||||
|
||||
Ok((proving_task, chunk_info, chunk_pi_hash))
|
||||
}
|
||||
|
||||
fn identifier(&self) -> String {
|
||||
assert!(!self.block_witnesses.is_empty(), "{CHUNK_SANITY_MSG}",);
|
||||
|
||||
@@ -118,34 +154,54 @@ impl ChunkProvingTask {
|
||||
format!("{first}-{last}")
|
||||
}
|
||||
|
||||
fn build_guest_input(&self) -> ChunkWitness {
|
||||
ChunkWitness {
|
||||
blocks: self.block_witnesses.to_vec(),
|
||||
prev_msg_queue_hash: self.prev_msg_queue_hash,
|
||||
fork_name: self.fork_name.to_lowercase().as_str().into(),
|
||||
fn build_guest_input(&self, version: Version) -> ChunkWitness {
|
||||
if version.is_validium() {
|
||||
assert!(self.validium_inputs.is_some());
|
||||
ChunkWitness::new(
|
||||
version.as_version_byte(),
|
||||
&self.block_witnesses,
|
||||
self.prev_msg_queue_hash,
|
||||
version.fork,
|
||||
self.validium_inputs.clone(),
|
||||
)
|
||||
} else {
|
||||
ChunkWitness::new_scroll(
|
||||
version.as_version_byte(),
|
||||
&self.block_witnesses,
|
||||
self.prev_msg_queue_hash,
|
||||
version.fork,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
|
||||
self.block_witnesses[0].states.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;
|
||||
fn precheck(&self) -> Result<(ChunkWitness, ChunkInfo, B256)> {
|
||||
let version = Version::from(self.version);
|
||||
let witness = self.build_guest_input(version);
|
||||
let chunk_info = ChunkInfo::try_from(witness.clone()).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
assert_eq!(chunk_info.post_msg_queue_hash, self.post_msg_queue_hash);
|
||||
let chunk_pi_hash = chunk_info.pi_hash_by_version(version);
|
||||
Ok((witness, chunk_info, chunk_pi_hash))
|
||||
}
|
||||
|
||||
impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
|
||||
fn try_from_with_interpret(
|
||||
value: &mut ChunkProvingTask,
|
||||
/// this method check the validate of current task (there may be missing storage node)
|
||||
/// and try fixing it until everything is ok
|
||||
#[deprecated]
|
||||
pub fn prepare_task_via_interpret(
|
||||
&mut self,
|
||||
interpreter: impl ChunkInterpreter,
|
||||
) -> eyre::Result<Self> {
|
||||
) -> eyre::Result<()> {
|
||||
use eyre::eyre;
|
||||
|
||||
let err_prefix = format!(
|
||||
"metadata_with_prechecks for task_id={:?}",
|
||||
value.identifier()
|
||||
self.identifier()
|
||||
);
|
||||
|
||||
if value.block_witnesses.is_empty() {
|
||||
if self.block_witnesses.is_empty() {
|
||||
return Err(eyre!(
|
||||
"{err_prefix}: chunk should contain at least one block",
|
||||
));
|
||||
@@ -156,8 +212,10 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
|
||||
let err_parse_re = regex::Regex::new(pattern)?;
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
match execute(&value.build_guest_input()) {
|
||||
Ok(chunk_info) => return Ok(chunk_info),
|
||||
let witness = self.build_guest_input(Version::euclid_v2());
|
||||
|
||||
match execute(witness) {
|
||||
Ok(_) => return Ok(()),
|
||||
Err(e) => {
|
||||
if let Some(caps) = err_parse_re.captures(&e) {
|
||||
let hash = caps[2].to_string();
|
||||
@@ -174,7 +232,7 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
|
||||
hash.parse::<sbv_primitives::B256>().expect("should be hex");
|
||||
let node = interpreter.try_fetch_storage_node(node_hash)?;
|
||||
tracing::warn!("missing node fetched: {node}");
|
||||
value.insert_state(node);
|
||||
self.insert_state(node);
|
||||
} else {
|
||||
return Err(eyre!("{err_prefix}: {e}"));
|
||||
}
|
||||
@@ -183,3 +241,5 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use eyre::Result;
|
||||
use sbv_primitives::{types::BlockWitness, Bytes, B256};
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{types::consensus::TxL1Message, Bytes, B256};
|
||||
|
||||
/// An interpreter which is cirtical in translating chunk data
|
||||
/// since we need to grep block witness and storage node data
|
||||
@@ -12,13 +13,22 @@ pub trait ChunkInterpreter {
|
||||
) -> Result<BlockWitness> {
|
||||
Err(eyre::eyre!("no implement"))
|
||||
}
|
||||
|
||||
fn try_fetch_storage_node(&self, _node_hash: B256) -> Result<Bytes> {
|
||||
Err(eyre::eyre!("no implement"))
|
||||
}
|
||||
|
||||
fn try_fetch_l1_msgs(&self, _block_number: u64) -> Result<Vec<TxL1Message>> {
|
||||
Err(eyre::eyre!("no implement"))
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TryFromWithInterpreter<T>: Sized {
|
||||
fn try_from_with_interpret(value: T, intepreter: impl ChunkInterpreter) -> Result<Self>;
|
||||
fn try_from_with_interpret(
|
||||
value: T,
|
||||
decryption_key: Option<&[u8]>,
|
||||
intepreter: impl ChunkInterpreter,
|
||||
) -> Result<Self>;
|
||||
}
|
||||
|
||||
pub struct DummyInterpreter {}
|
||||
|
||||
1
crates/libzkp/src/tasks/chunk_task.json
Normal file
1
crates/libzkp/src/tasks/chunk_task.json
Normal file
File diff suppressed because one or more lines are too long
@@ -1,10 +1,14 @@
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclidv2;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
mod universal;
|
||||
use eyre::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::Path,
|
||||
sync::{Arc, Mutex, OnceLock},
|
||||
};
|
||||
use universal::Verifier;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
@@ -31,48 +35,68 @@ pub struct VKDump {
|
||||
}
|
||||
|
||||
pub trait ProofVerifier {
|
||||
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
|
||||
fn verify(&self, task_type: TaskType, proof: &[u8]) -> Result<bool>;
|
||||
fn dump_vk(&self, file: &Path);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub version: u8,
|
||||
pub fork_name: String,
|
||||
pub assets_path: String,
|
||||
#[serde(default)]
|
||||
pub features: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifierConfig {
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub circuits: Vec<CircuitConfig>,
|
||||
}
|
||||
|
||||
type HardForkName = String;
|
||||
pub(crate) type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
type VerifierType = Arc<Mutex<dyn ProofVerifier + Send>>;
|
||||
static VERIFIERS: OnceLock<HashMap<HardForkName, VerifierType>> = OnceLock::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
config.high_version_circuit.fork_name,
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
let mut verifiers: HashMap<HardForkName, VerifierType> = Default::default();
|
||||
|
||||
for cfg in &config.circuits {
|
||||
let canonical_fork_name = cfg.fork_name.to_lowercase();
|
||||
|
||||
let verifier = Verifier::new(&cfg.assets_path, cfg.version);
|
||||
let ret = verifiers.insert(canonical_fork_name, Arc::new(Mutex::new(verifier)));
|
||||
assert!(
|
||||
ret.is_none(),
|
||||
"DO NOT init the same fork {} twice",
|
||||
cfg.fork_name
|
||||
);
|
||||
tracing::info!(
|
||||
"load verifier config for fork {} (ver {})",
|
||||
cfg.fork_name,
|
||||
cfg.version
|
||||
);
|
||||
}
|
||||
|
||||
let ret = VERIFIERS.set(verifiers).is_ok();
|
||||
assert!(ret);
|
||||
}
|
||||
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
|
||||
unsafe {
|
||||
if let Some(verifier) = VERIFIER_HIGH.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Arc<Mutex<dyn ProofVerifier>>> {
|
||||
if let Some(verifiers) = VERIFIERS.get() {
|
||||
if let Some(verifier) = verifiers.get(fork_name) {
|
||||
return Ok(verifier.clone());
|
||||
}
|
||||
|
||||
Err(eyre::eyre!(
|
||||
"failed to get verifier, key not found: {}, has {:?}",
|
||||
fork_name,
|
||||
verifiers.keys().collect::<Vec<_>>(),
|
||||
))
|
||||
} else {
|
||||
Err(eyre::eyre!(
|
||||
"failed to get verifier, not inited {}",
|
||||
fork_name
|
||||
))
|
||||
}
|
||||
Err(eyre::eyre!(
|
||||
"failed to get verifier, key not found, {}",
|
||||
fork_name
|
||||
))
|
||||
}
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use eyre::Result;
|
||||
|
||||
use crate::{
|
||||
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
|
||||
utils::panic_catch,
|
||||
};
|
||||
use scroll_zkvm_verifier_euclid::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV2,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier.verify_proof(proof.as_root_proof())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier.verify_proof(proof.as_root_proof())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.into_evm_proof())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
61
crates/libzkp/src/verifier/universal.rs
Normal file
61
crates/libzkp/src/verifier/universal.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use super::{ProofVerifier, TaskType};
|
||||
|
||||
use eyre::Result;
|
||||
|
||||
use crate::{
|
||||
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
|
||||
utils::panic_catch,
|
||||
};
|
||||
use scroll_zkvm_types::version::Version;
|
||||
use scroll_zkvm_verifier::verifier::UniversalVerifier;
|
||||
use std::path::Path;
|
||||
|
||||
pub struct Verifier {
|
||||
verifier: UniversalVerifier,
|
||||
version: Version,
|
||||
}
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(assets_dir: &str, ver_n: u8) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir);
|
||||
|
||||
Self {
|
||||
verifier: UniversalVerifier::setup(verifier_bin).expect("Setting up chunk verifier"),
|
||||
version: Version::from(ver_n),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: &[u8]) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
|
||||
assert!(proof.pi_hash_check(self.version));
|
||||
self.verifier
|
||||
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
|
||||
.unwrap()
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
|
||||
assert!(proof.pi_hash_check(self.version));
|
||||
self.verifier
|
||||
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
|
||||
.unwrap()
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
|
||||
assert!(proof.pi_hash_check(self.version));
|
||||
let vk = proof.vk.clone();
|
||||
let evm_proof = proof.into_evm_proof();
|
||||
self.verifier.verify_evm_proof(&evm_proof, &vk).unwrap()
|
||||
}
|
||||
})
|
||||
.map(|_| true)
|
||||
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, _file: &Path) {
|
||||
panic!("dump vk has been deprecated");
|
||||
}
|
||||
}
|
||||
@@ -11,4 +11,5 @@ crate-type = ["cdylib"]
|
||||
[dependencies]
|
||||
libzkp = { path = "../libzkp" }
|
||||
l2geth = { path = "../l2geth"}
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -5,6 +5,47 @@ use std::ffi::{c_char, CString};
|
||||
use libzkp::TaskType;
|
||||
use utils::{c_char_to_str, c_char_to_vec};
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
static LOG_SETTINGS: OnceLock<Result<(), String>> = OnceLock::new();
|
||||
|
||||
fn enable_dump() -> bool {
|
||||
static ZKVM_DEBUG_DUMP: OnceLock<bool> = OnceLock::new();
|
||||
*ZKVM_DEBUG_DUMP.get_or_init(|| {
|
||||
std::env::var("ZKVM_DEBUG")
|
||||
.or_else(|_| std::env::var("ZKVM_DEBUG_PROOF"))
|
||||
.map(|s| s.to_lowercase() == "true")
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_tracing() {
|
||||
use tracing_subscriber::filter::{EnvFilter, LevelFilter};
|
||||
|
||||
LOG_SETTINGS
|
||||
.get_or_init(|| {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
EnvFilter::builder()
|
||||
.with_default_directive(LevelFilter::INFO.into())
|
||||
.from_env_lossy(),
|
||||
)
|
||||
.with_ansi(false)
|
||||
.with_level(true)
|
||||
.with_target(true)
|
||||
.try_init()
|
||||
.map_err(|e| format!("{e}"))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.clone()
|
||||
.expect("Failed to initialize tracing subscriber");
|
||||
|
||||
tracing::info!("Tracing has been initialized normally");
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
|
||||
@@ -21,6 +62,7 @@ pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof_str = proof;
|
||||
let proof = c_char_to_vec(proof);
|
||||
|
||||
match libzkp::verify_proof(proof, fork_name_str, task_type) {
|
||||
@@ -28,7 +70,24 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
|
||||
tracing::error!("{:?} verify failed, error: {:#}", task_type, e);
|
||||
false as c_char
|
||||
}
|
||||
Ok(result) => result as c_char,
|
||||
Ok(result) => {
|
||||
if !result && enable_dump() {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
// Dump req.input to a temporary file
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let filename = format!("/tmp/proof_{}.json", timestamp);
|
||||
let cstr = unsafe { std::ffi::CStr::from_ptr(proof_str) };
|
||||
if let Err(e) = std::fs::write(&filename, cstr.to_bytes()) {
|
||||
eprintln!("Failed to write proof to file {}: {}", filename, e);
|
||||
} else {
|
||||
println!("Dumped failed proof to {}", filename);
|
||||
}
|
||||
}
|
||||
result as c_char
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,16 +150,31 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
task_type: i32,
|
||||
task: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
expected_vk: *const u8,
|
||||
expected_vk_len: usize,
|
||||
decryption_key: *const u8,
|
||||
decryption_key_len: usize,
|
||||
) -> HandlingResult {
|
||||
let mut interpreter = None;
|
||||
let task_json = if task_type == TaskType::Chunk as i32 {
|
||||
let pre_task_str = c_char_to_str(task);
|
||||
let cli = l2geth::get_client();
|
||||
match libzkp::checkout_chunk_task(pre_task_str, cli) {
|
||||
Ok(str) => {
|
||||
interpreter.replace(cli);
|
||||
str
|
||||
let decryption_key = if decryption_key_len > 0 {
|
||||
if decryption_key_len != 32 {
|
||||
tracing::error!(
|
||||
"gen_universal_task received {}-byte decryption key; expected 32",
|
||||
decryption_key_len
|
||||
);
|
||||
return failed_handling_result();
|
||||
}
|
||||
Some(std::slice::from_raw_parts(
|
||||
decryption_key,
|
||||
decryption_key_len,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
match libzkp::checkout_chunk_task(pre_task_str, decryption_key, cli) {
|
||||
Ok(str) => str,
|
||||
Err(e) => {
|
||||
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
|
||||
return failed_handling_result();
|
||||
@@ -109,10 +183,17 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
} else {
|
||||
c_char_to_str(task).to_string()
|
||||
};
|
||||
let ret =
|
||||
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), interpreter);
|
||||
|
||||
if let Ok((pi_hash, task_json, meta_json)) = ret {
|
||||
let expected_vk = if expected_vk_len > 0 {
|
||||
std::slice::from_raw_parts(expected_vk, expected_vk_len)
|
||||
} else {
|
||||
&[]
|
||||
};
|
||||
|
||||
let ret =
|
||||
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), expected_vk);
|
||||
|
||||
if let Ok((pi_hash, meta_json, task_json)) = ret {
|
||||
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
|
||||
HandlingResult {
|
||||
ok: true as c_char,
|
||||
@@ -121,6 +202,22 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
expected_pi_hash,
|
||||
}
|
||||
} else {
|
||||
if enable_dump() {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
// Dump req.input to a temporary file
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let c_str = unsafe { std::ffi::CStr::from_ptr(fork_name) };
|
||||
let filename = format!("/tmp/task_{}_{}.json", c_str.to_str().unwrap(), timestamp);
|
||||
if let Err(e) = std::fs::write(&filename, task_json.as_bytes()) {
|
||||
eprintln!("Failed to write task to file {}: {}", filename, e);
|
||||
} else {
|
||||
println!("Dumped failed task to {}", filename);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::error!("gen_universal_task failed, error: {:#}", ret.unwrap_err());
|
||||
failed_handling_result()
|
||||
}
|
||||
@@ -158,6 +255,19 @@ pub unsafe extern "C" fn gen_wrapped_proof(
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -> *mut c_char {
|
||||
let task_json_str = c_char_to_str(task_json);
|
||||
match libzkp::univ_task_compatibility_fix(task_json_str) {
|
||||
Ok(result) => CString::new(result).unwrap().into_raw(),
|
||||
Err(e) => {
|
||||
tracing::error!("univ_task_compability_fix failed, error: {:#}", e);
|
||||
std::ptr::null_mut()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
[package]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-prover-euclid.workspace = true
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "refactor/scroll" }
|
||||
scroll-zkvm-prover.workspace = true
|
||||
libzkp = { path = "../libzkp"}
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
once_cell.workspace =true
|
||||
@@ -17,8 +18,9 @@ tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
|
||||
eyre.workspace = true
|
||||
|
||||
futures = "0.3.30"
|
||||
futures-util = "0.3"
|
||||
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest = { version = "0.12.4", features = ["gzip", "stream"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
hex = "0.4.3"
|
||||
@@ -30,5 +32,9 @@ sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
ctor = "0.2.8"
|
||||
url = "2.5.4"
|
||||
url = { version = "2.5.4", features = ["serde",] }
|
||||
serde_bytes = "0.11.15"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
cuda = ["scroll-zkvm-prover/cuda"]
|
||||
10
crates/prover-bin/assets_url_preset.json
Normal file
10
crates/prover-bin/assets_url_preset.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"feynman": {
|
||||
"b68fdc3f28a5ce006280980df70cd3447e56913e5bca6054603ba85f0794c23a6618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/chunk/",
|
||||
"9a3f66370f11e3303f1a1248921025104e83253efea43a70d221cf4e15fc145bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/batch/",
|
||||
"1f8627277e1c1f6e1cc70c03e6fde06929e5ea27ca5b1d56e23b235dfeda282e22c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/bundle/",
|
||||
"7eb91f1885cc7a63cc848928f043fa56bf747161a74cd933d88c0456b90643346618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/chunk/",
|
||||
"dc653e7416628c612fa4d80b4724002bad4fde3653aef7316b80df0c19740a1bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/batch/",
|
||||
"14de1c74b663ed3c99acb03e90a5753b5923233c5c590864ad7746570297d16722c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/bundle/"
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,13 @@ mod prover;
|
||||
mod types;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use clap::{ArgAction, Parser};
|
||||
use clap::{ArgAction, Parser, Subcommand};
|
||||
use prover::{LocalProver, LocalProverConfig};
|
||||
use scroll_proving_sdk::{
|
||||
prover::ProverBuilder,
|
||||
prover::{types::ProofType, ProverBuilder},
|
||||
utils::{get_version, init_tracing},
|
||||
};
|
||||
use std::{fs::File, io::BufReader, path::Path};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(disable_version_flag = true)]
|
||||
@@ -16,6 +17,9 @@ struct Args {
|
||||
#[arg(long = "config", default_value = "conf/config.json")]
|
||||
config_file: String,
|
||||
|
||||
#[arg(long = "forkname")]
|
||||
fork_name: Option<String>,
|
||||
|
||||
/// Version of this prover
|
||||
#[arg(short, long, action = ArgAction::SetTrue)]
|
||||
version: bool,
|
||||
@@ -23,6 +27,24 @@ struct Args {
|
||||
/// Path of log file
|
||||
#[arg(long = "log.file")]
|
||||
log_file: Option<String>,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Option<Commands>,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Commands {
|
||||
Handle {
|
||||
/// path to save the verifier's asset
|
||||
task_path: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct HandleSet {
|
||||
chunks: Vec<String>,
|
||||
batches: Vec<String>,
|
||||
bundles: Vec<String>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -38,13 +60,52 @@ async fn main() -> eyre::Result<()> {
|
||||
|
||||
let cfg = LocalProverConfig::from_file(args.config_file)?;
|
||||
let sdk_config = cfg.sdk_config.clone();
|
||||
let local_prover = LocalProver::new(cfg);
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
|
||||
let local_prover = LocalProver::new(cfg.clone());
|
||||
|
||||
prover.run().await;
|
||||
match args.command {
|
||||
Some(Commands::Handle { task_path }) => {
|
||||
let file = File::open(Path::new(&task_path))?;
|
||||
let reader = BufReader::new(file);
|
||||
let handle_set: HandleSet = serde_json::from_reader(reader)?;
|
||||
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
|
||||
|
||||
let prover = std::sync::Arc::new(prover);
|
||||
println!("Handling task set 1: chunks ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.chunks, ProofType::Chunk)
|
||||
.await
|
||||
);
|
||||
println!("Done! Handling task set 2: batches ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.batches, ProofType::Batch)
|
||||
.await
|
||||
);
|
||||
println!("Done! Handling task set 3: bundles ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.bundles, ProofType::Bundle)
|
||||
.await
|
||||
);
|
||||
println!("All done!");
|
||||
}
|
||||
None => {
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
|
||||
|
||||
prover.run().await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::zk_circuits_handler::{euclidV2::EuclidV2Handler, CircuitsHandler};
|
||||
use crate::zk_circuits_handler::{universal::UniversalHandler, CircuitsHandler};
|
||||
use async_trait::async_trait;
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
@@ -9,18 +8,120 @@ use scroll_proving_sdk::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
},
|
||||
types::ProofType,
|
||||
ProvingService,
|
||||
},
|
||||
};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::File,
|
||||
sync::Arc,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, LazyLock},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct AssetsLocationData {
|
||||
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
|
||||
pub base_url: url::Url,
|
||||
#[serde(default)]
|
||||
/// a altered url for specififed vk
|
||||
pub asset_detours: HashMap<String, url::Url>,
|
||||
}
|
||||
|
||||
impl AssetsLocationData {
|
||||
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
|
||||
Ok(self.base_url.join(
|
||||
match proof_type {
|
||||
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
|
||||
ProofType::Batch => format!("batch/{vk_as_path}/"),
|
||||
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
|
||||
t => eyre::bail!("unrecognized proof type: {}", t as u8),
|
||||
}
|
||||
.as_str(),
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if !self.base_url.path().ends_with('/') {
|
||||
eyre::bail!(
|
||||
"base_url must have a trailing slash, got: {}",
|
||||
self.base_url
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_asset(
|
||||
&self,
|
||||
vk: &str,
|
||||
url_base: &url::Url,
|
||||
base_path: impl AsRef<Path>,
|
||||
) -> Result<PathBuf> {
|
||||
let download_files = ["app.vmexe", "openvm.toml"];
|
||||
|
||||
// Step 1: Create a local path for storage
|
||||
let storage_path = base_path.as_ref().join(vk);
|
||||
std::fs::create_dir_all(&storage_path)?;
|
||||
|
||||
// Step 2 & 3: Download each file if needed
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
for filename in download_files.iter() {
|
||||
let local_file_path = storage_path.join(filename);
|
||||
let download_url = url_base.join(filename)?;
|
||||
|
||||
// Check if file already exists
|
||||
if local_file_path.exists() {
|
||||
// Get file metadata to check size
|
||||
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
|
||||
// Make a HEAD request to get remote file size
|
||||
|
||||
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
|
||||
if let Some(content_length) = head_resp.headers().get("content-length") {
|
||||
if let Ok(remote_size) =
|
||||
content_length.to_str().unwrap_or("0").parse::<u64>()
|
||||
{
|
||||
// If sizes match, skip download
|
||||
if metadata.len() == remote_size {
|
||||
println!("File {} already exists with matching size, skipping download", filename);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Downloading {} from {}", filename, download_url);
|
||||
|
||||
let response = client.get(download_url).send().await?;
|
||||
if !response.status().is_success() {
|
||||
eyre::bail!(
|
||||
"Failed to download {}: HTTP status {}",
|
||||
filename,
|
||||
response.status()
|
||||
);
|
||||
}
|
||||
|
||||
// Stream the content directly to file instead of loading into memory
|
||||
let mut file = std::fs::File::create(&local_file_path)?;
|
||||
let mut stream = response.bytes_stream();
|
||||
|
||||
use futures_util::StreamExt;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
std::io::Write::write_all(&mut file, &chunk?)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Return the storage path
|
||||
Ok(storage_path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
@@ -44,7 +145,14 @@ impl LocalProverConfig {
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
/// The path to save assets for a specified hard fork phase
|
||||
pub workspace_path: String,
|
||||
#[serde(flatten)]
|
||||
/// The location data for dynamic loading
|
||||
pub location_data: AssetsLocationData,
|
||||
/// cached vk value to save some initial cost, for debugging only
|
||||
#[serde(default)]
|
||||
pub vks: HashMap<ProofType, String>,
|
||||
}
|
||||
|
||||
pub struct LocalProver {
|
||||
@@ -52,7 +160,7 @@ pub struct LocalProver {
|
||||
next_task_id: u64,
|
||||
current_task: Option<JoinHandle<Result<String>>>,
|
||||
|
||||
active_handler: Option<(String, Arc<dyn CircuitsHandler>)>,
|
||||
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -60,27 +168,15 @@ impl ProvingService for LocalProver {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
|
||||
let mut vks = vec![];
|
||||
for hard_fork_name in self.config.circuits.keys() {
|
||||
let handler = self.new_handler(hard_fork_name);
|
||||
for proof_type in &req.proof_types {
|
||||
let vk = handler.get_vk(*proof_type).await;
|
||||
|
||||
if let Some(vk) = vk {
|
||||
vks.push(BASE64_STANDARD.encode(vk));
|
||||
}
|
||||
}
|
||||
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
|
||||
// get vk has been deprecated in new prover with dynamic asset loading scheme
|
||||
GetVkResponse {
|
||||
vks: vec![],
|
||||
error: None,
|
||||
}
|
||||
|
||||
GetVkResponse { vks, error: None }
|
||||
}
|
||||
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
|
||||
self.set_active_handler(&req.hard_fork_name);
|
||||
match self
|
||||
.do_prove(req, self.active_handler.as_ref().unwrap().1.clone())
|
||||
.await
|
||||
{
|
||||
match self.do_prove(req).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
@@ -131,29 +227,93 @@ impl ProvingService for LocalProver {
|
||||
}
|
||||
}
|
||||
|
||||
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
|
||||
LazyLock::new(|| {
|
||||
const ASSETS_JSON: &str = include_str!("../assets_url_preset.json");
|
||||
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
|
||||
});
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(config: LocalProverConfig) -> Self {
|
||||
pub fn new(mut config: LocalProverConfig) -> Self {
|
||||
for (fork_name, circuit_config) in config.circuits.iter_mut() {
|
||||
// validate each base url
|
||||
circuit_config.location_data.validate().unwrap();
|
||||
let mut template_url_mapping = GLOBAL_ASSET_URLS
|
||||
.get(&fork_name.to_lowercase())
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
// apply default settings in template
|
||||
for (key, url) in circuit_config.location_data.asset_detours.drain() {
|
||||
template_url_mapping.insert(key, url);
|
||||
}
|
||||
|
||||
circuit_config.location_data.asset_detours = template_url_mapping;
|
||||
|
||||
// validate each detours url
|
||||
for url in circuit_config.location_data.asset_detours.values() {
|
||||
assert!(
|
||||
url.path().ends_with('/'),
|
||||
"url {} must be end with /",
|
||||
url.as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
config,
|
||||
next_task_id: 0,
|
||||
current_task: None,
|
||||
active_handler: None,
|
||||
handlers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(
|
||||
&mut self,
|
||||
req: ProveRequest,
|
||||
handler: Arc<dyn CircuitsHandler>,
|
||||
) -> Result<ProveResponse> {
|
||||
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
|
||||
self.next_task_id += 1;
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let req_clone = req.clone();
|
||||
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
|
||||
let is_openvm_13 = prover_task.use_openvm_13;
|
||||
let prover_task: ProvingTask = prover_task.into();
|
||||
let vk = hex::encode(&prover_task.vk);
|
||||
let handler = if let Some(handler) = self.handlers.get(&vk) {
|
||||
handler.clone()
|
||||
} else {
|
||||
let base_config = self
|
||||
.config
|
||||
.circuits
|
||||
.get(&req.hard_fork_name)
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"coordinator sent unexpected forkname {}",
|
||||
req.hard_fork_name
|
||||
)
|
||||
})?;
|
||||
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
|
||||
url.clone()
|
||||
} else {
|
||||
base_config
|
||||
.location_data
|
||||
.gen_asset_url(&vk, req.proof_type)?
|
||||
};
|
||||
let asset_path = base_config
|
||||
.location_data
|
||||
.get_asset(&vk, &url_base, &base_config.workspace_path)
|
||||
.await?;
|
||||
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
|
||||
&asset_path,
|
||||
is_openvm_13,
|
||||
)?));
|
||||
self.handlers.insert(vk, circuits_handler.clone());
|
||||
circuits_handler
|
||||
};
|
||||
|
||||
let handle = Handle::current();
|
||||
let task_handle =
|
||||
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
|
||||
let is_evm = req.proof_type == ProofType::Bundle;
|
||||
let task_handle = tokio::task::spawn_blocking(move || {
|
||||
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
|
||||
});
|
||||
self.current_task = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
@@ -167,26 +327,4 @@ impl LocalProver {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
fn set_active_handler(&mut self, hard_fork_name: &str) {
|
||||
if let Some(handler) = &self.active_handler {
|
||||
if handler.0 == hard_fork_name {
|
||||
return;
|
||||
}
|
||||
}
|
||||
self.active_handler = Some((hard_fork_name.to_string(), self.new_handler(hard_fork_name)));
|
||||
}
|
||||
|
||||
fn new_handler(&self, hard_fork_name: &str) -> Arc<dyn CircuitsHandler> {
|
||||
// if we got assigned a task for an unknown hard fork, there is something wrong in the
|
||||
// coordinator
|
||||
let config = self.config.circuits.get(hard_fork_name).unwrap();
|
||||
|
||||
match hard_fork_name {
|
||||
"euclidV2" => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(
|
||||
&config.workspace_path,
|
||||
)))) as Arc<dyn CircuitsHandler>,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
|
||||
@@ -1,67 +1,13 @@
|
||||
//pub mod euclid;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub mod euclidV2;
|
||||
pub mod universal;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
|
||||
use scroll_zkvm_prover_euclid::ProverConfig;
|
||||
use std::path::Path;
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
|
||||
#[async_trait]
|
||||
pub trait CircuitsHandler: Sync + Send {
|
||||
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum Phase {
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl Phase {
|
||||
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("chunk/app.vmexe");
|
||||
let path_app_config = workspace_path.join("chunk/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("batch/app.vmexe");
|
||||
let path_app_config = workspace_path.join("batch/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_config = workspace_path.join("bundle/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
match self {
|
||||
Phase::EuclidV2 => ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
segment_len,
|
||||
path_app_exe: workspace_path.join("bundle/app.vmexe"),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
async fn get_proof_data(&self, u_task: &ProvingTask, need_snark: bool) -> Result<String>;
|
||||
}
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use super::CircuitsHandler;
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_trait::async_trait;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
|
||||
use scroll_zkvm_prover_euclid::{
|
||||
task::{batch::BatchProvingTask, bundle::BundleProvingTask, chunk::ChunkProvingTask},
|
||||
BatchProver, BundleProverEuclidV1, ChunkProver, ProverConfig,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
pub struct EuclidHandler {
|
||||
chunk_prover: ChunkProver,
|
||||
batch_prover: BatchProver,
|
||||
bundle_prover: BundleProverEuclidV1,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum Phase {
|
||||
EuclidV1,
|
||||
EuclidV2,
|
||||
}
|
||||
|
||||
impl Phase {
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
Phase::EuclidV1 => "euclidv1",
|
||||
Phase::EuclidV2 => "euclidv2",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("chunk/app.vmexe");
|
||||
let path_app_config = workspace_path.join("chunk/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_exe = workspace_path.join("batch/app.vmexe");
|
||||
let path_app_config = workspace_path.join("batch/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
|
||||
let dir_cache = Some(workspace_path.join("cache"));
|
||||
let path_app_config = workspace_path.join("bundle/openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
match self {
|
||||
Phase::EuclidV1 => ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
segment_len,
|
||||
path_app_exe: workspace_path.join("bundle/app_euclidv1.vmexe"),
|
||||
..Default::default()
|
||||
},
|
||||
Phase::EuclidV2 => ProverConfig {
|
||||
dir_cache,
|
||||
path_app_config,
|
||||
segment_len,
|
||||
path_app_exe: workspace_path.join("bundle/app.vmexe"),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for EuclidHandler {}
|
||||
|
||||
impl EuclidHandler {
|
||||
pub fn new(workspace_path: &str) -> Self {
|
||||
let p = Phase::EuclidV1;
|
||||
let workspace_path = Path::new(workspace_path);
|
||||
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
|
||||
.expect("Failed to setup chunk prover");
|
||||
|
||||
let batch_prover = BatchProver::setup(p.phase_spec_batch(workspace_path))
|
||||
.expect("Failed to setup batch prover");
|
||||
|
||||
let bundle_prover = BundleProverEuclidV1::setup(p.phase_spec_bundle(workspace_path))
|
||||
.expect("Failed to setup bundle prover");
|
||||
|
||||
Self {
|
||||
chunk_prover,
|
||||
batch_prover,
|
||||
bundle_prover,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Arc<Mutex<EuclidHandler>> {
|
||||
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
Some(match task_type {
|
||||
ProofType::Chunk => self.try_lock().unwrap().chunk_prover.get_app_vk(),
|
||||
ProofType::Batch => self.try_lock().unwrap().batch_prover.get_app_vk(),
|
||||
ProofType::Bundle => self.try_lock().unwrap().bundle_prover.get_app_vk(),
|
||||
_ => unreachable!("Unsupported proof type"),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.proof_type {
|
||||
ProofType::Chunk => {
|
||||
let task: ChunkProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let proof = self.try_lock().unwrap().chunk_prover.gen_proof(&task)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
ProofType::Batch => {
|
||||
let task: BatchProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let proof = self.try_lock().unwrap().batch_prover.gen_proof(&task)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
ProofType::Bundle => {
|
||||
let batch_proofs: BundleProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
let proof = self
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.bundle_prover
|
||||
.gen_proof_evm(&batch_proofs)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
_ => Err(anyhow!("Unsupported proof type")),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use super::{CircuitsHandler, Phase};
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
|
||||
use scroll_zkvm_prover_euclid::{BatchProver, BundleProverEuclidV2, ChunkProver};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use tokio::sync::Mutex;
|
||||
pub struct EuclidV2Handler {
|
||||
chunk_prover: ChunkProver,
|
||||
batch_prover: BatchProver,
|
||||
bundle_prover: BundleProverEuclidV2,
|
||||
}
|
||||
|
||||
unsafe impl Send for EuclidV2Handler {}
|
||||
|
||||
impl EuclidV2Handler {
|
||||
pub fn new(workspace_path: &str) -> Self {
|
||||
let p = Phase::EuclidV2;
|
||||
let workspace_path = Path::new(workspace_path);
|
||||
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
|
||||
.expect("Failed to setup chunk prover");
|
||||
|
||||
let batch_prover = BatchProver::setup(p.phase_spec_batch(workspace_path))
|
||||
.expect("Failed to setup batch prover");
|
||||
|
||||
let bundle_prover = BundleProverEuclidV2::setup(p.phase_spec_bundle(workspace_path))
|
||||
.expect("Failed to setup bundle prover");
|
||||
|
||||
Self {
|
||||
chunk_prover,
|
||||
batch_prover,
|
||||
bundle_prover,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Arc<Mutex<EuclidV2Handler>> {
|
||||
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
Some(match task_type {
|
||||
ProofType::Chunk => self.try_lock().unwrap().chunk_prover.get_app_vk(),
|
||||
ProofType::Batch => self.try_lock().unwrap().batch_prover.get_app_vk(),
|
||||
ProofType::Bundle => self.try_lock().unwrap().bundle_prover.get_app_vk(),
|
||||
_ => unreachable!("Unsupported proof type"),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let u_task: ProvingTask = serde_json::from_str(&prove_request.input)?;
|
||||
|
||||
let proof = match prove_request.proof_type {
|
||||
ProofType::Chunk => self
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.chunk_prover
|
||||
.gen_proof_universal(&u_task, false)?,
|
||||
ProofType::Batch => self
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.batch_prover
|
||||
.gen_proof_universal(&u_task, false)?,
|
||||
ProofType::Bundle => self
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.bundle_prover
|
||||
.gen_proof_universal(&u_task, true)?,
|
||||
_ => return Err(eyre::eyre!("Unsupported proof type")),
|
||||
};
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
}
|
||||
56
crates/prover-bin/src/zk_circuits_handler/universal.rs
Normal file
56
crates/prover-bin/src/zk_circuits_handler/universal.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::path::Path;
|
||||
|
||||
use super::CircuitsHandler;
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use libzkp::ProvingTaskExt;
|
||||
use scroll_zkvm_prover::{Prover, ProverConfig};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use tokio::sync::Mutex;
|
||||
pub struct UniversalHandler {
|
||||
prover: Prover,
|
||||
}
|
||||
|
||||
/// Safe for current usage as `CircuitsHandler` trait (protected inside of Mutex and NEVER extract
|
||||
/// the instance out by `into_inner`)
|
||||
unsafe impl Send for UniversalHandler {}
|
||||
|
||||
impl UniversalHandler {
|
||||
pub fn new(workspace_path: impl AsRef<Path>, is_openvm_v13: bool) -> Result<Self> {
|
||||
let path_app_exe = workspace_path.as_ref().join("app.vmexe");
|
||||
let path_app_config = workspace_path.as_ref().join("openvm.toml");
|
||||
let segment_len = Some((1 << 21) - 100);
|
||||
let config = ProverConfig {
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
is_openvm_v13,
|
||||
};
|
||||
|
||||
let prover = Prover::setup(config, None)?;
|
||||
Ok(Self { prover })
|
||||
}
|
||||
|
||||
/// get_prover get the inner prover, later we would replace chunk/batch/bundle_prover with
|
||||
/// universal prover, before that, use bundle_prover as the represent one
|
||||
pub fn get_prover(&mut self) -> &mut Prover {
|
||||
&mut self.prover
|
||||
}
|
||||
|
||||
pub fn get_task_from_input(input: &str) -> Result<ProvingTaskExt> {
|
||||
Ok(serde_json::from_str(input)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Mutex<UniversalHandler> {
|
||||
async fn get_proof_data(&self, u_task: &ProvingTask, need_snark: bool) -> Result<String> {
|
||||
let mut handler_self = self.lock().await;
|
||||
|
||||
let proof = handler_self
|
||||
.get_prover()
|
||||
.gen_proof_universal(u_task, need_snark)?;
|
||||
|
||||
Ok(serde_json::to_string(&proof)?)
|
||||
}
|
||||
}
|
||||
1
database/.gitignore
vendored
1
database/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
/build/bin
|
||||
.idea
|
||||
localdbg
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user