Compare commits

..

15 Commits

Author SHA1 Message Date
Zhang Zhuo
daa1387208 circuit-0.5.2 (#1705)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 14:16:16 +08:00
Zhang Zhuo
67b05558e2 upgrade circuit to 0.5.2 (#1703)
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 10:52:08 +08:00
Ho
1e447b0fef [Fix] building failure in gpu image (#1702) 2025-07-21 20:26:39 +08:00
georgehao
f7c6ecadf4 bump to v4.5.31 (#1700) 2025-07-18 16:41:59 +08:00
Ho
9d94f943e5 [Upgrade] feynman 0.5.0rc1 (#1699) 2025-07-18 15:57:31 +08:00
Morty
de17ad43ff fix(blob-uploader): orm function InsertOrUpdateBlobUpload and s3 bucket region configuration (#1679)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-07-16 18:36:27 +08:00
colin
4233ad928c feat(rollup-relayer): support Validium (#1693)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-07-09 15:02:54 +08:00
georgehao
3050ccb40f update feynman prover makefile (#1691) 2025-07-05 10:33:08 +08:00
Ho
12e89201a1 feat: upgrading for feynman (#1690)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-07-04 16:59:48 +08:00
colin
a0ee508bbd fix(bridge-history): commit batch txns and blobs fetching (#1689) 2025-07-04 01:50:52 +08:00
georgehao
b8909d3795 update intermediate docker runs on (#1688) 2025-07-03 21:52:24 +08:00
Ho
b7a172a519 feat: upgrade zkvm-prover to feynman fork (#1686)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2025-07-03 18:48:33 +08:00
georgehao
80807dbb75 Feat/upgrade intermedidate (#1687) 2025-07-03 10:08:03 +08:00
georgehao
a776ca7c82 refactor: remove unused check (#1685) 2025-07-03 09:21:33 +08:00
Ho
ea38ae7e96 Refactor/zkvm 3 (#1684) 2025-07-01 06:39:27 +08:00
94 changed files with 15109 additions and 2086 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-12-06
toolchain: nightly-2025-02-14
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -33,7 +33,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-12-03
toolchain: nightly-2025-02-14
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -10,7 +10,8 @@ env:
jobs:
gas_oracle:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -55,7 +56,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup_relayer:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -100,7 +102,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
blob_uploader:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -145,7 +148,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -190,7 +194,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -235,7 +240,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -280,7 +286,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -325,7 +332,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -352,48 +360,6 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for repositories and clone them
run: |
mkdir -p ~/.ssh
chmod 700 ~/.ssh
# Setup for plonky3-gpu
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
chmod 600 ~/.ssh/plonky3_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
echo "Loaded plonky3-gpu key"
# Clone plonky3-gpu repository
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
# Setup for openvm-stark-gpu
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
chmod 600 ~/.ssh/openvm_stark_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
echo "Loaded openvm-stark-gpu key"
# Clone openvm-stark-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
# Setup for openvm-gpu
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
chmod 600 ~/.ssh/openvm_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
echo "Loaded openvm-gpu key"
# Clone openvm-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
# Show number of loaded keys
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
- name: Checkout specific commits
run: |
./build/dockerfiles/coordinator-api/checkout_all.sh
- name: Build and push
uses: docker/build-push-action@v3
env:
@@ -411,7 +377,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-cron:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -25,6 +25,7 @@ on:
- nightly-2023-12-03
- nightly-2022-12-10
- 1.86.0
- nightly-2025-02-14
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
@@ -69,7 +70,8 @@ defaults:
jobs:
build-and-publish-intermediate:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4

1
.gitignore vendored
View File

@@ -24,3 +24,4 @@ sftp-config.json
*~
target
zkvm-prover/config.json

3202
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,24 +17,23 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "0.11", default-features = false }
alloy-primitives = { version = "0.8", default-features = false }
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.2", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "0.8", default-features = false }
alloy-serde = { version = "1", default-features = false }
rkyv = "0.8"
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
@@ -43,24 +42,27 @@ itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
bincode_v1 = { version = "1.3", package = "bincode"}
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
"loader_halo2",
"halo2-axiom",
"display",
] }
once_cell = "1.20"
base64 = "0.22"
#TODO: upgrade when Feynman
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
[patch.crates-io]
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1
codegen-units = 1

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
@@ -252,6 +253,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
// Cache for the previous transaction to avoid duplicate fetches
var lastTxHash common.Hash
var lastTx *types.Transaction
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
@@ -261,11 +267,28 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
log.Error("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
// Get transaction, reuse if it's the same as previous
var commitTx *types.Transaction
if lastTxHash == vlog.TxHash && lastTx != nil {
commitTx = lastTx
} else {
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
// Create 10-second timeout context for transaction fetch
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
txCancel()
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
commitTx = fetchedTx
lastTxHash = vlog.TxHash
lastTx = commitTx
}
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
@@ -305,7 +328,13 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
}
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
// Create 20-second timeout context for blob processing
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
blobCancel()
if err != nil {
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)

View File

@@ -1,9 +1,9 @@
# Build libzkp dependency
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as chef
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
WORKDIR app
FROM chef as planner
COPY ./crates ./
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY ./rust-toolchain ./
RUN cargo chef prepare --recipe-path recipe.json
@@ -11,21 +11,15 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run scripts to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./crates ./
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY .git .git
RUN cargo build --release -p libzkp-c
# Download Go dependencies
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
@@ -45,7 +39,7 @@ RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_a
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353

View File

@@ -1,17 +0,0 @@
#!/bin/bash
set -uex
PLONKY3_GPU_COMMIT=261b322 # v0.2.0
OPENVM_STARK_GPU_COMMIT=3082234 # PR#48
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout plonky3-gpu
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
# checkout openvm-stark-gpu
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
# checkout openvm-gpu
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-gpu if not exists
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch --all --force

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-stark-gpu if not exists
if [ ! -d $DIR/openvm-stark-gpu ]; then
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
fi
cd $DIR/openvm-stark-gpu && git fetch --all --force

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone plonky3-gpu if not exists
if [ ! -d $DIR/plonky3-gpu ]; then
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
fi
cd $DIR/plonky3-gpu && git fetch --all --force

View File

@@ -1,92 +0,0 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
# stark-backend
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
# plonky3
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
# gpu crates
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }

View File

@@ -1,2 +0,0 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

3
common/.gitignore vendored
View File

@@ -1,4 +1,3 @@
/build/bin
.idea
libzkp/impl/target
libzkp/interface/*.a
libzkp

View File

@@ -4,5 +4,4 @@ test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
GOBIN=$(PWD)/build/bin go run ../build/lint.go

View File

@@ -10,12 +10,6 @@ import (
"github.com/scroll-tech/go-ethereum/common/hexutil"
)
const (
EuclidV2Fork = "euclidV2"
EuclidV2ForkNameForProver = "euclidv2"
)
// ProofType represents the type of task.
type ProofType uint8

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.25"
var tag = "v4.5.33"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1,4 +1,4 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
@@ -51,6 +51,7 @@ test-gpu-verifier: $(LIBZKP_PATH)
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd ../ && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -90,12 +90,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
AssetsPath: "",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
MinProverVersion: "v4.4.89",
Verifiers: []coordinatorConfig.AssetConfig{{
AssetsPath: "",
ForkName: "feynman",
},
},
}},
BatchCollectionTimeSec: 60,
ChunkCollectionTimeSec: 60,
SessionAttempts: 10,

View File

@@ -19,6 +19,7 @@ import (
)
var app *cli.App
var cfg *config.Config
func init() {
// Set up coordinator app info.
@@ -29,16 +30,29 @@ func init() {
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
if err := utils.LogSetup(ctx); err != nil {
return err
}
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
var err error
cfg, err = config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
return nil
}
// sub commands
app.Commands = []*cli.Command{
{
Name: "verify",
Usage: "verify an proof, specified by [forkname] <type> <proof path>",
Action: verify,
},
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)

View File

@@ -0,0 +1,109 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
)
func verify(cCtx *cli.Context) error {
var forkName, proofType, proofPath string
if cCtx.Args().Len() <= 2 {
forkName = cfg.ProverManager.Verifier.Verifiers[0].ForkName
proofType = cCtx.Args().First()
proofPath = cCtx.Args().Get(1)
} else {
forkName = cCtx.Args().First()
proofType = cCtx.Args().Get(1)
proofPath = cCtx.Args().Get(2)
}
log.Info("verify proof", "in", proofPath, "type", proofType, "forkName", forkName)
// Load the content of the proof file
data, err := os.ReadFile(filepath.Clean(proofPath))
if err != nil {
return fmt.Errorf("error reading file: %w", err)
}
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
return err
}
var ret bool
switch strings.ToLower(proofType) {
case "chunk":
proof := &message.OpenVMChunkProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.ChunkVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
if len(proof.Vk) != 0 {
if !bytes.Equal(proof.Vk, vk) {
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
base64.StdEncoding.EncodeToString(vk),
base64.StdEncoding.EncodeToString(proof.Vk),
)
}
} else {
proof.Vk = vk
}
ret, err = vf.VerifyChunkProof(proof, forkName)
case "batch":
proof := &message.OpenVMBatchProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.BatchVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
if len(proof.Vk) != 0 {
if !bytes.Equal(proof.Vk, vk) {
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
base64.StdEncoding.EncodeToString(vk),
base64.StdEncoding.EncodeToString(proof.Vk),
)
}
} else {
proof.Vk = vk
}
ret, err = vf.VerifyBatchProof(proof, forkName)
case "bundle":
proof := &message.OpenVMBundleProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.BundleVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
proof.Vk = vk
ret, err = vf.VerifyBundleProof(proof, forkName)
default:
return fmt.Errorf("unsupport proof type %s", proofType)
}
if err != nil {
return err
}
log.Info("verified:", "ret", ret)
return nil
}

View File

@@ -7,11 +7,17 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"high_version_circuit": {
"assets_path": "assets",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
"min_prover_version": "v4.4.45",
"verifiers": [
{
"assets_path": "assets",
"fork_name": "euclidV2"
},
{
"assets_path": "assets",
"fork_name": "feynman"
}
]
}
},
"db": {
@@ -21,7 +27,10 @@
"maxIdleNum": 20
},
"l2": {
"chain_id": 111
"chain_id": 111,
"l2geth": {
"endpoint": "not need to specified for mocking"
}
},
"auth": {
"secret": "prover secret key",

View File

@@ -46,6 +46,7 @@ require (
)
require (
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
@@ -55,28 +56,57 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/huin/goupnp v1.0.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,12 +1,18 @@
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
@@ -24,6 +30,9 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
@@ -45,16 +54,32 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
@@ -73,19 +98,31 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -93,13 +130,24 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
@@ -115,6 +163,7 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -129,14 +178,22 @@ github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
@@ -145,36 +202,55 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
@@ -187,9 +263,15 @@ github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKl
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -216,6 +298,8 @@ github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPD
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
@@ -227,11 +311,16 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@@ -240,7 +329,10 @@ golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -250,13 +342,25 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -269,36 +373,56 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -55,16 +55,17 @@ type Config struct {
Auth *Auth `json:"auth"`
}
// CircuitConfig circuit items.
type CircuitConfig struct {
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
type AssetConfig struct {
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
Vkfile string `json:"vk_file,omitempty"`
}
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
MinProverVersion string `json:"min_prover_version"`
Verifiers []AssetConfig `json:"verifiers"`
}
// NewConfig returns a new instance of Config.

View File

@@ -20,11 +20,11 @@ func TestConfig(t *testing.T) {
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"high_version_circuit": {
"min_prover_version": "v4.4.45",
"verifiers": [{
"assets_path": "assets",
"fork_name": "euclidV2",
"min_prover_version": "v4.4.45"
}
"fork_name": "feynman"
}]
},
"max_verifier_workers": 4
},

View File

@@ -1,12 +1,15 @@
package api
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/verifier"
)
@@ -29,7 +32,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
// TODO: enable this when the libzkp has been updated
/*l2cfg := cfg.L2.Endpoint
l2cfg := cfg.L2.Endpoint
if l2cfg == nil {
panic("l2geth is not specified")
}
@@ -37,9 +40,9 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
if err != nil {
panic(err)
}
libzkp.InitL2geth(string(l2cfgBytes))*/
libzkp.InitL2geth(string(l2cfgBytes))
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
}

View File

@@ -17,6 +17,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -25,13 +26,15 @@ type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
l2syncer *l2Syncer
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, verifier *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, verifier.ChunkVk, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, verifier.BatchVk, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, verifier.BundleVk, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -44,6 +47,13 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
if syncer, err := createL2Syncer(cfg); err != nil {
log.Crit("can not init l2 syncer", "err", err)
} else {
ptc.l2syncer = syncer
}
return ptc
}
@@ -78,6 +88,17 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return
}
if getTaskParameter.ProverHeight == 0 {
// help update the prover height with internal l2geth
if blk, err := ptc.l2syncer.getLatestBlockNumber(ctx); err == nil {
getTaskParameter.ProverHeight = blk
} else {
nerr := fmt.Errorf("inner l2geth failure, err:%w", err)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
}
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {

View File

@@ -0,0 +1,71 @@
//go:build !mock_verifier
package api
import (
"errors"
"fmt"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
)
type l2Syncer struct {
l2gethClient *ethclient.Client
lastBlockNumber struct {
sync.RWMutex
data uint64
t time.Time
}
}
func createL2Syncer(cfg *config.Config) (*l2Syncer, error) {
if cfg.L2 == nil || cfg.L2.Endpoint == nil {
return nil, fmt.Errorf("l2 endpoint is not set in config")
} else {
l2gethClient, err := ethclient.Dial(cfg.L2.Endpoint.Url)
if err != nil {
return nil, fmt.Errorf("dial l2geth endpoint fail, err: %s", err)
}
return &l2Syncer{
l2gethClient: l2gethClient,
}, nil
}
}
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
func (syncer *l2Syncer) getLatestBlockNumber(ctx *gin.Context) (uint64, error) {
// First check if we have a cached value that's still valid
syncer.lastBlockNumber.RLock()
if !syncer.lastBlockNumber.t.IsZero() && time.Since(syncer.lastBlockNumber.t) < time.Second*10 {
blockNumber := syncer.lastBlockNumber.data
syncer.lastBlockNumber.RUnlock()
return blockNumber, nil
}
syncer.lastBlockNumber.RUnlock()
// If not cached or expired, fetch from the client
if syncer.l2gethClient == nil {
return 0, errors.New("L2 geth client not initialized")
}
blockNumber, err := syncer.l2gethClient.BlockNumber(ctx)
if err != nil {
return 0, fmt.Errorf("failed to get latest block number: %w", err)
}
// Update the cache
syncer.lastBlockNumber.Lock()
syncer.lastBlockNumber.data = blockNumber
syncer.lastBlockNumber.t = time.Now()
syncer.lastBlockNumber.Unlock()
log.Debug("updated block height reference", "height", blockNumber)
return blockNumber, nil
}

View File

@@ -0,0 +1,20 @@
//go:build mock_verifier
package api
import (
"scroll-tech/coordinator/internal/config"
"github.com/gin-gonic/gin"
)
type l2Syncer struct{}
func createL2Syncer(_ *config.Config) (*l2Syncer, error) {
return &l2Syncer{}, nil
}
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
func (syncer *l2Syncer) getLatestBlockNumber(_ *gin.Context) (uint64, error) {
return 99999994, nil
}

View File

@@ -31,9 +31,11 @@ type LoginLogic struct {
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
var hardForks []string
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
hardForks = append(hardForks, cfg.ForkName)
}
proverVersionHardForkMap[cfg.ProverManager.Verifier.MinProverVersion] = hardForks
return &LoginLogic{
cfg: cfg,
@@ -56,8 +58,8 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
}
vks := make(map[string]struct{})

View File

@@ -11,11 +11,16 @@ import "C" //nolint:typecheck
import (
"fmt"
"os"
"strings"
"unsafe"
"scroll-tech/common/types/message"
)
func init() {
C.init_tracing()
}
// Helper function to convert Go string to C string and handle cleanup
func goToCString(s string) *C.char {
return C.CString(s)
@@ -34,18 +39,10 @@ func InitVerifier(configJSON string) {
C.init_verifier(cConfig)
}
// Initialize the verifier
func InitL2geth(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_l2geth(cConfig)
}
// Verify a chunk proof
func VerifyChunkProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
@@ -56,7 +53,7 @@ func VerifyChunkProof(proofData, forkName string) bool {
// Verify a batch proof
func VerifyBatchProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
@@ -67,7 +64,7 @@ func VerifyBatchProof(proofData, forkName string) bool {
// Verify a bundle proof
func VerifyBundleProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
@@ -96,8 +93,8 @@ func fromMessageTaskType(taskType int) int {
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, forkName)
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk)
}
// Generate wrapped proof
@@ -127,7 +124,7 @@ func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
// Dumps a verification key to a file
func DumpVk(forkName, filePath string) error {
cForkName := goToCString(forkName)
cForkName := goToCString(strings.ToLower(forkName))
cFilePath := goToCString(filePath)
defer freeCString(cForkName)
defer freeCString(cFilePath)

View File

@@ -8,6 +8,9 @@
#include <stddef.h> // For size_t
// Init log tracing
void init_tracing();
// Initialize the verifier with configuration
void init_verifier(char* config);
@@ -32,7 +35,13 @@ typedef struct {
// Generate a universal task based on task type and input JSON
// Returns a struct containing task data, metadata, and expected proof hash
HandlingResult gen_universal_task(int task_type, char* task, char* fork_name);
HandlingResult gen_universal_task(
int task_type,
char* task,
char* fork_name,
const unsigned char* expected_vk,
size_t expected_vk_len
);
// Release memory allocated for a HandlingResult returned by gen_universal_task
void release_task_result(HandlingResult result);

View File

@@ -11,7 +11,10 @@ import (
"github.com/scroll-tech/go-ethereum/common"
)
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
func InitL2geth(configJSON string) {
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
var metadata interface{}

View File

@@ -7,14 +7,29 @@ package libzkp
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import "unsafe"
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
// Initialize the handler for universal task
func InitL2geth(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_l2geth(cConfig)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)
defer freeCString(cTask)
defer freeCString(cForkName)
result := C.gen_universal_task(C.int(taskType), cTask, cForkName)
// Create a C array from Go slice
var cVk *C.uchar
if len(expectedVk) > 0 {
cVk = (*C.uchar)(unsafe.Pointer(&expectedVk[0]))
}
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)))
defer C.release_task_result(result)
// Check if the operation was successful

View File

@@ -36,12 +36,13 @@ type BatchProverTask struct {
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chainCfg: chainCfg,
expectedVk: expectedVk,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -83,10 +84,37 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
// if the assigned batch dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
@@ -114,29 +142,32 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
batchTask = tmpBatchTask
@@ -149,19 +180,24 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, batchTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
@@ -169,20 +205,23 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch")
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch", "err", err)
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask == nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
@@ -266,13 +305,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
ForkName: hardForkName,
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)

View File

@@ -33,12 +33,13 @@ type BundleProverTask struct {
}
// NewBundleProverTask new a bundle collector
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BundleProverTask {
bp := &BundleProverTask{
BaseProverTask: BaseProverTask{
db: db,
chainCfg: chainCfg,
cfg: cfg,
expectedVk: expectedVk,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -81,10 +82,37 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBundleTask *orm.Bundle
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
// if the assigned chunk dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBundleTask == nil {
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
@@ -112,31 +140,33 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
bundleTask = tmpBundleTask
break
}
@@ -147,19 +177,24 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
}
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
@@ -170,7 +205,7 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle")
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle", "err", err)
return nil, ErrCoordinatorInternalFailure
}
// bundle proof require snark
@@ -179,10 +214,12 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask == nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
@@ -209,9 +246,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
var prevStateRoot common.Hash
// this would be common in test cases: the first batch has empty parent
if batches[0].Index > 1 {
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
var batchProofs []*message.OpenVMBatchProof
@@ -225,18 +267,12 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
ForkName: hardForkName,
}
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
PrevStateRoot: prevStateRoot,
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),

View File

@@ -33,12 +33,13 @@ type ChunkProverTask struct {
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chainCfg: chainCfg,
expectedVk: expectedVk,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
@@ -79,12 +80,39 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get chunk has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
// if the assigned chunk dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
@@ -110,31 +138,33 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
chunkTask = tmpChunkTask
break
}
@@ -145,19 +175,24 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
taskMsg, err := cp.formatProverTask(ctx.Copy(), proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
@@ -169,16 +204,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
taskMsg, metadata, err = cp.applyUniversal(taskMsg)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk")
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk", "err", err)
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask == nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
@@ -197,20 +234,14 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
}
var taskDetailBytes []byte
taskDetail := message.ChunkTaskDetail{
BlockHashes: blockHashes,
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else {
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
ForkName: hardForkName,
}
var err error

View File

@@ -38,9 +38,10 @@ type ProverTask interface {
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
expectedVk map[string][]byte
batchOrm *orm.Batch
chunkOrm *orm.Chunk
@@ -57,10 +58,11 @@ type proverTaskContext struct {
ProverProviderType uint8
HardForkNames map[string]struct{}
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
hasAssignedTask *orm.ProverTask
}
// hardForkName get the chunk/batch/bundle hard fork name
@@ -175,19 +177,22 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
assigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
}
if isAssigned {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
ptc.hasAssignedTask = assigned
return &ptc, nil
}
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName)
expectedVk, ok := b.expectedVk[schema.HardForkName]
if !ok {
return nil, nil, fmt.Errorf("no expectedVk found from hardfork %s", schema.HardForkName)
}
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName, expectedVk)
if !ok {
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
}

View File

@@ -178,7 +178,20 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
if len(proverTask.Metadata) == 0 {
return errors.New("can not re-wrapping proof: no metadata has been recorded in advance")
}
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), []byte{})
var expected_vk []byte
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
expected_vk = m.verifier.ChunkVk[hardForkName]
case message.ProofTypeBatch:
expected_vk = m.verifier.BatchVk[hardForkName]
case message.ProofTypeBundle:
expected_vk = m.verifier.BundleVk[hardForkName]
}
if len(expected_vk) == 0 {
return errors.New("no vk specified match current hard fork, check your config")
}
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), expected_vk)
if proofParameter.Proof == "" {
return errors.New("can not re-wrapping proof, see coordinator log for reason")
}

View File

@@ -10,7 +10,13 @@ import (
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
return &Verifier{
cfg: cfg,
OpenVMVkMap: map[string]struct{}{"mock_vk": {}},
ChunkVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
BatchVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
BundleVk: map[string][]byte{},
}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.

View File

@@ -11,4 +11,7 @@ const InvalidTestProof = "this is a invalid proof"
type Verifier struct {
cfg *config.VerifierConfig
OpenVMVkMap map[string]struct{}
ChunkVk map[string][]byte
BatchVk map[string][]byte
BundleVk map[string][]byte
}

View File

@@ -5,10 +5,12 @@ package verifier
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/scroll-tech/go-ethereum/log"
@@ -18,7 +20,7 @@ import (
"scroll-tech/coordinator/internal/logic/libzkp"
)
// This struct maps to `CircuitConfig` in libzkp/impl/src/verifier.rs
// This struct maps to `CircuitConfig` in libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
@@ -26,24 +28,28 @@ type rustCircuitConfig struct {
AssetsPath string `json:"assets_path"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
func newRustCircuitConfig(cfg config.AssetConfig) *rustCircuitConfig {
return &rustCircuitConfig{
ForkName: cfg.ForkName,
AssetsPath: cfg.AssetsPath,
}
}
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/impl/src/verifier.rs
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
Circuits []*rustCircuitConfig `json:"circuits"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
out := &rustVerifierConfig{}
for _, cfg := range cfg.Verifiers {
out.Circuits = append(out.Circuits, newRustCircuitConfig(cfg))
}
return out
}
type rustVkDump struct {
@@ -65,10 +71,15 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
v := &Verifier{
cfg: cfg,
OpenVMVkMap: make(map[string]struct{}),
ChunkVk: make(map[string][]byte),
BatchVk: make(map[string][]byte),
BundleVk: make(map[string][]byte),
}
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
return nil, err
for _, cfg := range cfg.Verifiers {
if err := v.loadOpenVMVks(cfg); err != nil {
return nil, err
}
}
return v, nil
@@ -108,27 +119,25 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
func (v *Verifier) ReadVK(filePat string) (string, error) {
/*
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
*/
const blocked_vks = `
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
`
f, err := os.Open(filepath.Clean(filePat))
if err != nil {
return "", err
}
byt, err := io.ReadAll(f)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(byt), nil
}
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
err := libzkp.DumpVk(forkName, tempFile)
if err != nil {
return err
vkFileName := cfg.Vkfile
if vkFileName == "" {
vkFileName = "openVmVk.json"
}
vkFile := path.Join(cfg.AssetsPath, vkFileName)
f, err := os.Open(filepath.Clean(tempFile))
f, err := os.Open(filepath.Clean(vkFile))
if err != nil {
return err
}
@@ -141,8 +150,36 @@ func (v *Verifier) loadOpenVMVks(forkName string) error {
if err := json.Unmarshal(byt, &dump); err != nil {
return err
}
if strings.Contains(blocked_vks, dump.Chunk) {
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
}
if strings.Contains(blocked_vks, dump.Batch) {
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
}
if strings.Contains(blocked_vks, dump.Bundle) {
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
}
v.OpenVMVkMap[dump.Chunk] = struct{}{}
v.OpenVMVkMap[dump.Batch] = struct{}{}
v.OpenVMVkMap[dump.Bundle] = struct{}{}
log.Info("Load vks", "from", cfg.AssetsPath, "chunk", dump.Chunk, "batch", dump.Batch, "bundle", dump.Bundle)
decodedBytes, err := base64.StdEncoding.DecodeString(dump.Chunk)
if err != nil {
return err
}
v.ChunkVk[cfg.ForkName] = decodedBytes
decodedBytes, err = base64.StdEncoding.DecodeString(dump.Batch)
if err != nil {
return err
}
v.BatchVk[cfg.ForkName] = decodedBytes
decodedBytes, err = base64.StdEncoding.DecodeString(dump.Bundle)
if err != nil {
return err
}
v.BundleVk[cfg.ForkName] = decodedBytes
return nil
}

View File

@@ -29,11 +29,11 @@ func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
HighVersionCircuit: &config.CircuitConfig{
AssetsPath: *assetsPathHi,
ForkName: "euclidV2",
MinProverVersion: "",
},
MinProverVersion: "",
Verifiers: []config.AssetConfig{{
AssetsPath: *assetsPathHi,
ForkName: "euclidV2",
}},
}
v, err := NewVerifier(cfg)

View File

@@ -57,17 +57,17 @@ func (*ProverTask) TableName() string {
}
// IsProverAssigned checks if a prover with the given public key has been assigned a task.
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bool, error) {
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
var task ProverTask
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil
return nil, nil
}
return false, err
return nil, err
}
return true, nil
return &task, nil
}
// GetProverTasks get prover tasks
@@ -269,6 +269,24 @@ func (o *ProverTask) UpdateProverTaskProvingStatusAndFailureType(ctx context.Con
return nil
}
// UpdateProverTaskAssignedTime updates the assigned_at time of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskAssignedTime(ctx context.Context, uuid uuid.UUID, t time.Time, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("uuid = ?", uuid)
updates := make(map[string]interface{})
updates["assigned_at"] = t
if err := db.Updates(updates).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskAssignedTime error: %w, uuid:%s, status: %v", err, uuid, t)
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, uuid uuid.UUID, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -79,16 +79,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
tokenTimeout = 60
conf = &config.Config{
L2: &config.L2{
ChainID: 111,
ChainID: 111,
Endpoint: &config.L2Endpoint{},
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
HighVersionCircuit: &config.CircuitConfig{
AssetsPath: "",
ForkName: "euclidV2",
MinProverVersion: "v4.4.89",
},
MinProverVersion: "v4.4.89",
Verifiers: []config.AssetConfig{{
AssetsPath: "",
ForkName: "euclidV2",
}},
},
BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10,

View File

@@ -0,0 +1,45 @@
[patch."https://github.com/openvm-org/openvm.git"]
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.1" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }

11002
crates/gpu_override/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
.PHONY: build update clean
ZKVM_COMMIT ?= freebuild
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
GIT_REV ?= $(shell git rev-parse --short HEAD)
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
$(info ZK_GPU_VERSION is ${ZK_VERSION})
clean:
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# build gpu prover, never touch lock file
build:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# update Cargo.lock while override config has been updated
#update:
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock

View File

@@ -1,6 +1,6 @@
#!/bin/bash
config_file=~/.cargo/config.toml
config_file=.cargo/config.toml
plonky3_gpu_path=$(grep 'path.*plonky3-gpu' "$config_file" | cut -d'"' -f2 | head -n 1)
plonky3_gpu_path=$(dirname "$plonky3_gpu_path")

View File

@@ -76,7 +76,7 @@ impl RpcClientCore {
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
Ok(Self {
provider: ProviderBuilder::<_, _, Network>::default().on_client(client),
provider: ProviderBuilder::<_, _, Network>::default().connect_client(client),
rt,
})
}
@@ -100,46 +100,50 @@ impl ChunkInterpreter for RpcClient<'_> {
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
use alloy::network::primitives::BlockTransactionsKind;
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
let chain_id = provider.get_chain_id().await?;
let block = provider
.get_block_by_hash(block_hash, BlockTransactionsKind::Full)
.get_block_by_hash(block_hash)
.full()
.await?
.ok_or_else(|| eyre::eyre!("Block not found"))?;
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
let number = block.header.number;
let parent_hash = block.header.parent_hash;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let prev_state_root = if let Some(witness) = prev_witness {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
} else {
provider
.scroll_disk_root((number - 1).into())
.await?
.disk_root
};
let witness = WitnessBuilder::new()
let mut witness_builder = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?)
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
.prev_state_root(prev_state_root)
.build()?;
.execution_witness(provider.debug_execution_witness(number.into()).await?);
Ok(witness)
let prev_state_root = match prev_witness {
Some(witness) => {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
}
None => {
let parent_block = provider
.get_block_by_hash(parent_hash)
.await?
.expect("parent block should exist");
parent_block.header.state_root
}
};
witness_builder = witness_builder.prev_state_root(prev_state_root);
Ok(witness_builder.build()?)
}
tracing::debug!("fetch witness for {block_hash}");

View File

@@ -8,7 +8,8 @@ edition.workspace = true
scroll-zkvm-types.workspace = true
scroll-zkvm-verifier-euclid.workspace = true
sbv-primitives.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
@@ -19,5 +20,5 @@ eyre.workspace = true
git-version = "0.3.5"
serde_stacker = "0.1"
regex = "1.11"
c-kzg = { version = "1.0", features = ["serde"] }
c-kzg = { version = "2.0", features = ["serde"] }

View File

@@ -26,10 +26,12 @@ pub fn checkout_chunk_task(
}
/// Generate required staff for proving tasks
/// return (pi_hash, metadata, task)
pub fn gen_universal_task(
task_type: i32,
task_json: &str,
fork_name: &str,
fork_name_str: &str,
expected_vk: &[u8],
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
use proofs::*;
@@ -44,26 +46,49 @@ pub fn gen_universal_task(
Bundle(BundleProofMetadata),
}
let (pi_hash, metadata, u_task) = match task_type {
let (pi_hash, metadata, mut u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) =
gen_universal_chunk_task(task, fork_name.into(), interpreter)?;
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
// normailze fork name field in task
task.fork_name = task.fork_name.to_lowercase();
// always respect the fork_name_str (which has been normalized) being passed
// if the fork_name wrapped in task is not match, consider it a malformed task
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
})
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {
let task = serde_json::from_str::<BatchProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name.into())?;
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_batch_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Batch(metadata), u_task)
}
x if x == TaskType::Bundle as i32 => {
let task = serde_json::from_str::<BundleProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name.into())?;
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_bundle_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
}
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
};
u_task.vk = Vec::from(expected_vk);
Ok((
pi_hash,
serde_json::to_string(&metadata)?,
@@ -106,8 +131,7 @@ pub fn verifier_init(config: &str) -> eyre::Result<()> {
pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyre::Result<bool> {
let verifier = verifier::get_verifier(fork_name)?;
let ret = verifier.verify(task_type, proof)?;
let ret = verifier.lock().unwrap().verify(task_type, &proof)?;
Ok(ret)
}
@@ -115,7 +139,7 @@ pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyr
pub fn dump_vk(fork_name: &str, file: &str) -> eyre::Result<()> {
let verifier = verifier::get_verifier(fork_name)?;
verifier.dump_vk(Path::new(file));
verifier.lock().unwrap().dump_vk(Path::new(file));
Ok(())
}

View File

@@ -179,7 +179,7 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn sanity_check(&self, fork_name: ForkName) {
pub fn pi_hash_check(&self, fork_name: ForkName) -> bool {
let proof_pi = self.proof.public_values();
let expected_pi = self
@@ -192,10 +192,11 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
.map(|&v| v as u32)
.collect::<Vec<_>>();
assert_eq!(
expected_pi, proof_pi,
"pi mismatch: expected={expected_pi:?}, found={proof_pi:?}"
);
let ret = expected_pi == proof_pi;
if !ret {
tracing::warn!("pi mismatch: expected={expected_pi:?}, found={proof_pi:?}");
}
ret
}
}
@@ -213,11 +214,7 @@ impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
mod tests {
use base64::{prelude::BASE64_STANDARD, Engine};
use sbv_primitives::B256;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleInfoV1},
proof::EvmProof,
public_inputs::PublicInputs,
};
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof, public_inputs::ForkName};
use super::*;
@@ -244,7 +241,7 @@ mod tests {
fn test_dummy_proof() -> eyre::Result<()> {
// 1. Metadata
let metadata = {
let bundle_info: BundleInfoV1 = BundleInfo {
let bundle_info = BundleInfo {
chain_id: 12345,
num_batches: 12,
prev_state_root: B256::repeat_byte(1),
@@ -253,11 +250,10 @@ mod tests {
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
}
.into();
let bundle_pi_hash = bundle_info.pi_hash();
};
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
BundleProofMetadata {
bundle_info: bundle_info.0,
bundle_info,
bundle_pi_hash,
}
};

View File

@@ -9,13 +9,31 @@ pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use chunk_interpreter::{DummyInterpreter, TryFromWithInterpreter};
use sbv_primitives::B256;
use scroll_zkvm_types::{
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs},
use crate::{
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
utils::panic_catch,
};
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
fn check_aggregation_proofs<Metadata>(
proofs: &[proofs::WrappedProof<Metadata>],
fork_name: ForkName,
) -> eyre::Result<()>
where
Metadata: proofs::ProofMetadata,
{
panic_catch(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
}
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
Ok(())
}
/// Generate required staff for chunk proving
pub fn gen_universal_chunk_task(
@@ -23,11 +41,10 @@ pub fn gen_universal_chunk_task(
fork_name: ForkName,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
let chunk_info = if let Some(interpreter) = interpreter {
ChunkInfo::try_from_with_interpret(&mut task, interpreter)
} else {
ChunkInfo::try_from_with_interpret(&mut task, DummyInterpreter {})
}?;
if let Some(interpreter) = interpreter {
task.prepare_task_via_interpret(interpreter)?;
}
let chunk_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
Ok((

View File

@@ -4,8 +4,9 @@ use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, EnvelopeV6, EnvelopeV7,
PointEvalWitness, ReferenceHeader, N_BLOB_BYTES,
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
ToArchievedWitness, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
@@ -23,37 +24,35 @@ use utils::{base64, point_eval};
#[serde(untagged)]
pub enum BatchHeaderV {
V6(BatchHeaderV6),
V7(BatchHeaderV7),
}
impl From<BatchHeaderV> for ReferenceHeader {
fn from(value: BatchHeaderV) -> Self {
match value {
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
}
}
V7_8(BatchHeaderV7),
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7(h) => h.batch_hash(),
BatchHeaderV::V7_8(h) => h.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
_ => panic!("try to pick other header type"),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7(h) => h,
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
}
}
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
}
}
}
@@ -104,7 +103,7 @@ impl BatchProvingTask {
fn build_guest_input(&self) -> BatchWitness {
let fork_name = self.fork_name.to_lowercase().as_str().into();
// calculate point eval needed and compare with task input
// sanity check: calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
@@ -117,21 +116,31 @@ impl BatchProvingTask {
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
EnvelopeV6::from(self.blob_bytes.as_slice()).challenge_digest(versioned_hash)
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
BatchHeaderV::V7(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
EnvelopeV7::from(padded_blob_bytes.as_slice()).challenge_digest(versioned_hash)
match fork_name {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
f => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
f,
[ForkName::EuclidV2, ForkName::Feynman],
),
}
}
};
@@ -157,7 +166,11 @@ impl BatchProvingTask {
kzg_proof: kzg_proof.into_inner(),
};
let reference_header = self.batch_header.clone().into();
let reference_header = match fork_name {
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
ForkName::EuclidV2 => ReferenceHeader::V7(*self.batch_header.must_v7_header()),
ForkName::Feynman => ReferenceHeader::V8(*self.batch_header.must_v8_header()),
};
BatchWitness {
fork_name,
@@ -175,79 +188,19 @@ impl BatchProvingTask {
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
let (parent_state_root, state_root, chain_id, withdraw_root) = (
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.chain_id,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.withdraw_root,
);
let (parent_batch_hash, prev_msg_queue_hash, post_msg_queue_hash) = match self.batch_header
{
BatchHeaderV::V6(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
(h.parent_batch_hash, Default::default(), Default::default())
}
BatchHeaderV::V7(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
(
h.parent_batch_hash,
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_msg_queue_hash,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_msg_queue_hash,
)
}
};
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve batch witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved batch witness fail: {e}"))?;
let metadata: BatchInfo = archieved_witness.into();
let batch_hash = self.batch_header.batch_hash();
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;
Ok(BatchInfo {
parent_state_root,
parent_batch_hash,
state_root,
batch_hash,
chain_id,
withdraw_root,
prev_msg_queue_hash,
post_msg_queue_hash,
})
Ok(metadata)
}
}

View File

@@ -42,7 +42,8 @@ pub mod point_eval {
/// Get the KZG commitment from an EIP-4844 blob.
pub fn blob_to_kzg_commitment(blob: &c_kzg::Blob) -> c_kzg::KzgCommitment {
c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, c_kzg::ethereum_kzg_settings())
c_kzg::ethereum_kzg_settings(0)
.blob_to_kzg_commitment(blob)
.expect("blob to kzg commitment should succeed")
}
@@ -65,12 +66,9 @@ pub mod point_eval {
pub fn get_kzg_proof(blob: &c_kzg::Blob, challenge: H256) -> (c_kzg::KzgProof, U256) {
let challenge = get_x_from_challenge(challenge);
let (proof, y) = c_kzg::KzgProof::compute_kzg_proof(
blob,
&c_kzg::Bytes32::new(challenge.to_be_bytes()),
c_kzg::ethereum_kzg_settings(),
)
.expect("kzg proof should succeed");
let (proof, y) = c_kzg::ethereum_kzg_settings(0)
.compute_kzg_proof(blob, &c_kzg::Bytes32::new(challenge.to_be_bytes()))
.expect("kzg proof should succeed");
(proof, U256::from_be_slice(y.as_slice()))
}

View File

@@ -1,9 +1,9 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
bundle::{BundleInfo, BundleWitness, ToArchievedWitness},
public_inputs::ForkName,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// Message indicating a sanity check failure.
@@ -46,61 +46,26 @@ impl BundleProvingTask {
.iter()
.map(|wrapped_proof| wrapped_proof.metadata.batch_info.clone())
.collect(),
fork_name: self.fork_name.to_lowercase().as_str().into(),
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
use eyre::eyre;
let err_prefix = format!("metadata_with_prechecks for task_id={}", self.identifier());
let fork_name = ForkName::from(self.fork_name.as_str());
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve bundle witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved bundle witness fail: {e}"))?;
let metadata: BundleInfo = archieved_witness.into();
for w in self.batch_proofs.windows(2) {
if w[1].metadata.batch_info.chain_id != w[0].metadata.batch_info.chain_id {
return Err(eyre!("{err_prefix}: chain_id mismatch"));
}
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
if w[1].metadata.batch_info.parent_state_root != w[0].metadata.batch_info.state_root {
return Err(eyre!("{err_prefix}: state_root not chained"));
}
if w[1].metadata.batch_info.parent_batch_hash != w[0].metadata.batch_info.batch_hash {
return Err(eyre!("{err_prefix}: batch_hash not chained"));
}
}
let (first_batch, last_batch) = (
&self
.batch_proofs
.first()
.expect("at least one batch in bundle")
.metadata
.batch_info,
&self
.batch_proofs
.last()
.expect("at least one batch in bundle")
.metadata
.batch_info,
);
let chain_id = first_batch.chain_id;
let num_batches = u32::try_from(self.batch_proofs.len()).expect("num_batches: u32");
let prev_state_root = first_batch.parent_state_root;
let prev_batch_hash = first_batch.parent_batch_hash;
let post_state_root = last_batch.state_root;
let batch_hash = last_batch.batch_hash;
let withdraw_root = last_batch.withdraw_root;
let msg_queue_hash = last_batch.post_msg_queue_hash;
Ok(BundleInfo {
chain_id,
msg_queue_hash,
num_batches,
prev_state_root,
prev_batch_hash,
post_state_root,
batch_hash,
withdraw_root,
})
Ok(metadata)
}
}
@@ -118,7 +83,7 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
.into_iter()
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
vk: Vec::new(),
})
}

View File

@@ -2,9 +2,8 @@ use super::chunk_interpreter::*;
use eyre::Result;
use sbv_primitives::{types::BlockWitness, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness},
chunk::{execute, ChunkInfo, ChunkWitness, ToArchievedWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// The type aligned with coordinator's defination
@@ -72,7 +71,7 @@ impl TryFrom<ChunkProvingTask> for ProvingTask {
identifier: value.identifier(),
fork_name: value.fork_name,
aggregated_proofs: Vec::new(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
vk: Vec::new(),
})
}
@@ -119,33 +118,43 @@ impl ChunkProvingTask {
}
fn build_guest_input(&self) -> ChunkWitness {
ChunkWitness {
blocks: self.block_witnesses.to_vec(),
prev_msg_queue_hash: self.prev_msg_queue_hash,
fork_name: self.fork_name.to_lowercase().as_str().into(),
}
ChunkWitness::new(
&self.block_witnesses,
self.prev_msg_queue_hash,
self.fork_name.to_lowercase().as_str().into(),
)
}
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
self.block_witnesses[0].states.push(node);
}
}
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
fn try_from_with_interpret(
value: &mut ChunkProvingTask,
let ret = ChunkInfo::try_from(archieved_witness).map_err(|e| eyre::eyre!("{e}"))?;
Ok(ret)
}
/// this method check the validate of current task (there may be missing storage node)
/// and try fixing it until everything is ok
pub fn prepare_task_via_interpret(
&mut self,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<Self> {
) -> eyre::Result<()> {
use eyre::eyre;
let err_prefix = format!(
"metadata_with_prechecks for task_id={:?}",
value.identifier()
self.identifier()
);
if value.block_witnesses.is_empty() {
if self.block_witnesses.is_empty() {
return Err(eyre!(
"{err_prefix}: chunk should contain at least one block",
));
@@ -156,8 +165,15 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
let err_parse_re = regex::Regex::new(pattern)?;
let mut attempts = 0;
loop {
match execute(&value.build_guest_input()) {
Ok(chunk_info) => return Ok(chunk_info),
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
match execute(archieved_witness) {
Ok(_) => return Ok(()),
Err(e) => {
if let Some(caps) = err_parse_re.captures(&e) {
let hash = caps[2].to_string();
@@ -174,7 +190,7 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
hash.parse::<sbv_primitives::B256>().expect("should be hex");
let node = interpreter.try_fetch_storage_node(node_hash)?;
tracing::warn!("missing node fetched: {node}");
value.insert_state(node);
self.insert_state(node);
} else {
return Err(eyre!("{err_prefix}: {e}"));
}
@@ -183,3 +199,5 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
}
}
}
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;

File diff suppressed because one or more lines are too long

View File

@@ -4,7 +4,11 @@ mod euclidv2;
use euclidv2::EuclidV2Verifier;
use eyre::Result;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
use std::{
collections::HashMap,
path::Path,
sync::{Arc, Mutex, OnceLock},
};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
@@ -31,7 +35,7 @@ pub struct VKDump {
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
fn verify(&self, task_type: TaskType, proof: &[u8]) -> Result<bool>;
fn dump_vk(&self, file: &Path);
}
@@ -43,36 +47,49 @@ pub struct CircuitConfig {
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub high_version_circuit: CircuitConfig,
pub circuits: Vec<CircuitConfig>,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
type VerifierType = Arc<Mutex<dyn ProofVerifier + Send>>;
static VERIFIERS: OnceLock<HashMap<HardForkName, VerifierType>> = OnceLock::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
let mut verifiers: HashMap<HardForkName, VerifierType> = Default::default();
for cfg in &config.circuits {
let canonical_fork_name = cfg.fork_name.to_lowercase();
let verifier = EuclidV2Verifier::new(&cfg.assets_path, canonical_fork_name.as_str().into());
let ret = verifiers.insert(canonical_fork_name, Arc::new(Mutex::new(verifier)));
assert!(
ret.is_none(),
"DO NOT init the same fork {} twice",
cfg.fork_name
);
tracing::info!("load verifier config for fork {}", cfg.fork_name);
}
let ret = VERIFIERS.set(verifiers).is_ok();
assert!(ret);
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
pub fn get_verifier(fork_name: &str) -> Result<Arc<Mutex<dyn ProofVerifier>>> {
if let Some(verifiers) = VERIFIERS.get() {
if let Some(verifier) = verifiers.get(fork_name) {
return Ok(verifier.clone());
}
Err(eyre::eyre!(
"failed to get verifier, key not found: {}, has {:?}",
fork_name,
verifiers.keys().collect::<Vec<_>>(),
))
} else {
Err(eyre::eyre!(
"failed to get verifier, not inited {}",
fork_name
))
}
Err(eyre::eyre!(
"failed to get verifier, key not found, {}",
fork_name
))
}

View File

@@ -1,4 +1,4 @@
use super::{ProofVerifier, TaskType, VKDump};
use super::{ProofVerifier, TaskType};
use eyre::Result;
@@ -6,61 +6,64 @@ use crate::{
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
utils::panic_catch,
};
use scroll_zkvm_verifier_euclid::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
use scroll_zkvm_types::public_inputs::ForkName;
use scroll_zkvm_verifier_euclid::verifier::UniversalVerifier;
use std::path::Path;
pub struct EuclidV2Verifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV2,
verifier: UniversalVerifier,
fork: ForkName,
}
impl EuclidV2Verifier {
pub fn new(assets_dir: &str) -> Self {
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
verifier: UniversalVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
fork,
}
}
}
impl ProofVerifier for EuclidV2Verifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
fn verify(&self, task_type: super::TaskType, proof: &[u8]) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier.verify_proof(proof.as_root_proof())
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
if !proof.pi_hash_check(self.fork) {
return false;
}
self.verifier
.verify_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier.verify_proof(proof.as_root_proof())
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
if !proof.pi_hash_check(self.fork) {
return false;
}
self.verifier
.verify_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.into_evm_proof())
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
if !proof.pi_hash_check(self.fork) {
return false;
}
let vk = proof.vk.clone();
let evm_proof = proof.into_evm_proof();
self.verifier.verify_proof_evm(&evm_proof, &vk).unwrap()
}
})
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
}
fn dump_vk(&self, file: &Path) {
use base64::{prelude::BASE64_STANDARD, Engine};
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
fn dump_vk(&self, _file: &Path) {
panic!("dump vk has been deprecated");
}
}

View File

@@ -11,4 +11,5 @@ crate-type = ["cdylib"]
[dependencies]
libzkp = { path = "../libzkp" }
l2geth = { path = "../l2geth"}
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing.workspace = true

View File

@@ -5,6 +5,47 @@ use std::ffi::{c_char, CString};
use libzkp::TaskType;
use utils::{c_char_to_str, c_char_to_vec};
use std::sync::OnceLock;
static LOG_SETTINGS: OnceLock<Result<(), String>> = OnceLock::new();
fn enable_dump() -> bool {
static ZKVM_DEBUG_DUMP: OnceLock<bool> = OnceLock::new();
*ZKVM_DEBUG_DUMP.get_or_init(|| {
std::env::var("ZKVM_DEBUG")
.or_else(|_| std::env::var("ZKVM_DEBUG_PROOF"))
.map(|s| s.to_lowercase() == "true")
.unwrap_or(false)
})
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_tracing() {
use tracing_subscriber::filter::{EnvFilter, LevelFilter};
LOG_SETTINGS
.get_or_init(|| {
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env_lossy(),
)
.with_ansi(false)
.with_level(true)
.with_target(true)
.try_init()
.map_err(|e| format!("{e}"))?;
Ok(())
})
.clone()
.expect("Failed to initialize tracing subscriber");
tracing::info!("Tracing has been initialized normally");
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
@@ -21,6 +62,7 @@ pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof_str = proof;
let proof = c_char_to_vec(proof);
match libzkp::verify_proof(proof, fork_name_str, task_type) {
@@ -28,7 +70,24 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
tracing::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
Ok(result) => {
if !result && enable_dump() {
use std::time::{SystemTime, UNIX_EPOCH};
// Dump req.input to a temporary file
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let filename = format!("/tmp/proof_{}.json", timestamp);
let cstr = unsafe { std::ffi::CStr::from_ptr(proof_str) };
if let Err(e) = std::fs::write(&filename, cstr.to_bytes()) {
eprintln!("Failed to write proof to file {}: {}", filename, e);
} else {
println!("Dumped failed proof to {}", filename);
}
}
result as c_char
}
}
}
@@ -91,6 +150,8 @@ pub unsafe extern "C" fn gen_universal_task(
task_type: i32,
task: *const c_char,
fork_name: *const c_char,
expected_vk: *const u8,
expected_vk_len: usize,
) -> HandlingResult {
let mut interpreter = None;
let task_json = if task_type == TaskType::Chunk as i32 {
@@ -102,6 +163,7 @@ pub unsafe extern "C" fn gen_universal_task(
str
}
Err(e) => {
println!("gen_universal_task failed at pre interpret step, error: {e}");
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
return failed_handling_result();
}
@@ -109,10 +171,22 @@ pub unsafe extern "C" fn gen_universal_task(
} else {
c_char_to_str(task).to_string()
};
let ret =
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), interpreter);
if let Ok((pi_hash, task_json, meta_json)) = ret {
let expected_vk = if expected_vk_len > 0 {
std::slice::from_raw_parts(expected_vk, expected_vk_len)
} else {
&[]
};
let ret = libzkp::gen_universal_task(
task_type,
&task_json,
c_char_to_str(fork_name),
expected_vk,
interpreter,
);
if let Ok((pi_hash, meta_json, task_json)) = ret {
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
HandlingResult {
ok: true as c_char,
@@ -121,6 +195,22 @@ pub unsafe extern "C" fn gen_universal_task(
expected_pi_hash,
}
} else {
if enable_dump() {
use std::time::{SystemTime, UNIX_EPOCH};
// Dump req.input to a temporary file
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let c_str = unsafe { std::ffi::CStr::from_ptr(fork_name) };
let filename = format!("/tmp/task_{}_{}.json", c_str.to_str().unwrap(), timestamp);
if let Err(e) = std::fs::write(&filename, task_json.as_bytes()) {
eprintln!("Failed to write task to file {}: {}", filename, e);
} else {
println!("Dumped failed task to {}", filename);
}
}
tracing::error!("gen_universal_task failed, error: {:#}", ret.unwrap_err());
failed_handling_result()
}

View File

@@ -1,7 +1,7 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -2,12 +2,13 @@ mod prover;
mod types;
mod zk_circuits_handler;
use clap::{ArgAction, Parser};
use clap::{ArgAction, Parser, Subcommand};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::ProverBuilder,
prover::{types::ProofType, ProverBuilder},
utils::{get_version, init_tracing},
};
use std::{fs::File, io::BufReader, path::Path};
#[derive(Parser, Debug)]
#[command(disable_version_flag = true)]
@@ -16,6 +17,9 @@ struct Args {
#[arg(long = "config", default_value = "conf/config.json")]
config_file: String,
#[arg(long = "forkname")]
fork_name: Option<String>,
/// Version of this prover
#[arg(short, long, action = ArgAction::SetTrue)]
version: bool,
@@ -23,6 +27,29 @@ struct Args {
/// Path of log file
#[arg(long = "log.file")]
log_file: Option<String>,
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand, Debug)]
enum Commands {
/// Dump vk of this prover
Dump {
/// path to save the verifier's asset
asset_path: String,
},
Handle {
/// path to save the verifier's asset
task_path: String,
},
}
#[derive(Debug, serde::Deserialize)]
struct HandleSet {
chunks: Vec<String>,
batches: Vec<String>,
bundles: Vec<String>,
}
#[tokio::main]
@@ -37,14 +64,59 @@ async fn main() -> eyre::Result<()> {
}
let cfg = LocalProverConfig::from_file(args.config_file)?;
let default_fork_name = cfg.circuits.keys().next().unwrap().clone();
let sdk_config = cfg.sdk_config.clone();
let local_prover = LocalProver::new(cfg);
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
let local_prover = LocalProver::new(cfg.clone());
prover.run().await;
match args.command {
Some(Commands::Dump { asset_path }) => {
let fork_name = args.fork_name.unwrap_or(default_fork_name);
println!("dump assets for {fork_name} into {asset_path}");
local_prover.dump_verifier_assets(&fork_name, asset_path.as_ref())?;
}
Some(Commands::Handle { task_path }) => {
let file = File::open(Path::new(&task_path))?;
let reader = BufReader::new(file);
let handle_set: HandleSet = serde_json::from_reader(reader)?;
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
let prover = std::sync::Arc::new(prover);
println!("Handling task set 1: chunks ...");
assert!(
prover
.clone()
.one_shot(&handle_set.chunks, ProofType::Chunk)
.await
);
println!("Done! Handling task set 2: batches ...");
assert!(
prover
.clone()
.one_shot(&handle_set.batches, ProofType::Batch)
.await
);
println!("Done! Handling task set 3: bundles ...");
assert!(
prover
.clone()
.one_shot(&handle_set.bundles, ProofType::Bundle)
.await
);
println!("All done!");
}
None => {
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
prover.run().await;
}
}
Ok(())
}

View File

@@ -1,6 +1,5 @@
use crate::zk_circuits_handler::{euclidV2::EuclidV2Handler, CircuitsHandler};
use async_trait::async_trait;
use base64::{prelude::BASE64_STANDARD, Engine};
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
@@ -9,6 +8,7 @@ use scroll_proving_sdk::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
types::ProofType,
ProvingService,
},
};
@@ -16,7 +16,8 @@ use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs::File,
sync::Arc,
path::Path,
sync::{Arc, OnceLock},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
@@ -45,6 +46,9 @@ impl LocalProverConfig {
pub struct CircuitConfig {
pub hard_fork_name: String,
pub workspace_path: String,
/// cached vk value to save some initial cost, for debugging only
#[serde(default)]
pub vks: HashMap<ProofType, String>,
}
pub struct LocalProver {
@@ -52,7 +56,7 @@ pub struct LocalProver {
next_task_id: u64,
current_task: Option<JoinHandle<Result<String>>>,
active_handler: Option<(String, Arc<dyn CircuitsHandler>)>,
handlers: HashMap<String, OnceLock<Arc<dyn CircuitsHandler>>>,
}
#[async_trait]
@@ -62,13 +66,13 @@ impl ProvingService for LocalProver {
}
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
let mut vks = vec![];
for hard_fork_name in self.config.circuits.keys() {
let handler = self.new_handler(hard_fork_name);
for (hard_fork_name, cfg) in self.config.circuits.iter() {
for proof_type in &req.proof_types {
let vk = handler.get_vk(*proof_type).await;
if let Some(vk) = vk {
vks.push(BASE64_STANDARD.encode(vk));
if let Some(vk) = cfg.vks.get(proof_type) {
vks.push(vk.clone())
} else {
let handler = self.get_or_init_handler(hard_fork_name);
vks.push(handler.get_vk(*proof_type).await);
}
}
}
@@ -76,11 +80,8 @@ impl ProvingService for LocalProver {
GetVkResponse { vks, error: None }
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
self.set_active_handler(&req.hard_fork_name);
match self
.do_prove(req, self.active_handler.as_ref().unwrap().1.clone())
.await
{
let handler = self.get_or_init_handler(&req.hard_fork_name);
match self.do_prove(req, handler).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
@@ -133,11 +134,16 @@ impl ProvingService for LocalProver {
impl LocalProver {
pub fn new(config: LocalProverConfig) -> Self {
let handlers = config
.circuits
.keys()
.map(|k| (k.clone(), OnceLock::new()))
.collect();
Self {
config,
next_task_id: 0,
current_task: None,
active_handler: None,
handlers,
}
}
@@ -168,25 +174,76 @@ impl LocalProver {
})
}
fn set_active_handler(&mut self, hard_fork_name: &str) {
if let Some(handler) = &self.active_handler {
if handler.0 == hard_fork_name {
return;
}
}
self.active_handler = Some((hard_fork_name.to_string(), self.new_handler(hard_fork_name)));
fn get_or_init_handler(&self, hard_fork_name: &str) -> Arc<dyn CircuitsHandler> {
let lk = self
.handlers
.get(hard_fork_name)
.expect("coordinator should never sent unexpected forkname");
lk.get_or_init(|| self.new_handler(hard_fork_name)).clone()
}
fn new_handler(&self, hard_fork_name: &str) -> Arc<dyn CircuitsHandler> {
pub fn new_handler(&self, hard_fork_name: &str) -> Arc<dyn CircuitsHandler> {
// if we got assigned a task for an unknown hard fork, there is something wrong in the
// coordinator
let config = self.config.circuits.get(hard_fork_name).unwrap();
match hard_fork_name {
"euclidV2" => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,
_ => unreachable!(),
// The new EuclidV2Handler is a universal handler
// We can add other handler implements if needed
"some future forkname" => unreachable!(),
_ => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(config))))
as Arc<dyn CircuitsHandler>,
}
}
pub fn dump_verifier_assets(&self, hard_fork_name: &str, out_path: &Path) -> Result<()> {
let config = self
.config
.circuits
.get(hard_fork_name)
.ok_or_else(|| eyre::eyre!("no corresponding config for fork {hard_fork_name}"))?;
if !config.vks.is_empty() {
eyre::bail!("clean vks cache first or we will have wrong dumped vk");
}
let workspace_path = &config.workspace_path;
let universal_prover = EuclidV2Handler::new(config);
let _ = universal_prover
.get_prover()
.dump_universal_verifier(Some(out_path))?;
#[derive(Debug, serde::Serialize)]
struct VKDump {
pub chunk_vk: String,
pub batch_vk: String,
pub bundle_vk: String,
}
let dump = VKDump {
chunk_vk: universal_prover.get_vk_and_cache(ProofType::Chunk),
batch_vk: universal_prover.get_vk_and_cache(ProofType::Batch),
bundle_vk: universal_prover.get_vk_and_cache(ProofType::Bundle),
};
let f = File::create(out_path.join("openVmVk.json"))?;
serde_json::to_writer(f, &dump)?;
// Copy verifier.bin from workspace bundle directory to output path
let bundle_verifier_path = Path::new(workspace_path)
.join("bundle")
.join("verifier.bin");
if bundle_verifier_path.exists() {
let dest_path = out_path.join("verifier.bin");
std::fs::copy(&bundle_verifier_path, &dest_path)
.map_err(|e| eyre::eyre!("Failed to copy verifier.bin: {}", e))?;
} else {
eprintln!(
"Warning: verifier.bin not found at {:?}",
bundle_verifier_path
);
}
Ok(())
}
}

View File

@@ -11,7 +11,7 @@ use std::path::Path;
#[async_trait]
pub trait CircuitsHandler: Sync + Send {
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
async fn get_vk(&self, task_type: ProofType) -> String;
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
}
@@ -54,14 +54,12 @@ impl Phase {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_config = workspace_path.join("bundle/openvm.toml");
let segment_len = Some((1 << 22) - 100);
match self {
Phase::EuclidV2 => ProverConfig {
dir_cache,
path_app_config,
segment_len,
path_app_exe: workspace_path.join("bundle/app.vmexe"),
..Default::default()
},
ProverConfig {
dir_cache,
path_app_config,
segment_len,
path_app_exe: workspace_path.join("bundle/app.vmexe"),
..Default::default()
}
}
}

View File

@@ -1,7 +1,13 @@
use std::{path::Path, sync::Arc};
use std::{
collections::HashMap,
path::Path,
sync::{Arc, OnceLock},
};
use super::{CircuitsHandler, Phase};
use crate::prover::CircuitConfig;
use async_trait::async_trait;
use base64::{prelude::BASE64_STANDARD, Engine};
use eyre::Result;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover_euclid::{BatchProver, BundleProverEuclidV2, ChunkProver};
@@ -11,12 +17,14 @@ pub struct EuclidV2Handler {
chunk_prover: ChunkProver,
batch_prover: BatchProver,
bundle_prover: BundleProverEuclidV2,
cached_vks: HashMap<ProofType, OnceLock<String>>,
}
unsafe impl Send for EuclidV2Handler {}
impl EuclidV2Handler {
pub fn new(workspace_path: &str) -> Self {
pub fn new(cfg: &CircuitConfig) -> Self {
let workspace_path = &cfg.workspace_path;
let p = Phase::EuclidV2;
let workspace_path = Path::new(workspace_path);
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
@@ -28,46 +36,84 @@ impl EuclidV2Handler {
let bundle_prover = BundleProverEuclidV2::setup(p.phase_spec_bundle(workspace_path))
.expect("Failed to setup bundle prover");
let build_vk_cache = |proof_type: ProofType| {
let vk = if let Some(vk) = cfg.vks.get(&proof_type) {
OnceLock::from(vk.clone())
} else {
OnceLock::new()
};
(proof_type, vk)
};
Self {
chunk_prover,
batch_prover,
bundle_prover,
cached_vks: HashMap::from([
build_vk_cache(ProofType::Chunk),
build_vk_cache(ProofType::Batch),
build_vk_cache(ProofType::Bundle),
]),
}
}
/// get_prover get the inner prover, later we would replace chunk/batch/bundle_prover with
/// universal prover, before that, use bundle_prover as the represent one
pub fn get_prover(&self) -> &BundleProverEuclidV2 {
&self.bundle_prover
}
pub fn get_vk_and_cache(&self, task_type: ProofType) -> String {
match task_type {
ProofType::Chunk => self.cached_vks[&ProofType::Chunk]
.get_or_init(|| BASE64_STANDARD.encode(self.chunk_prover.get_app_vk())),
ProofType::Batch => self.cached_vks[&ProofType::Batch]
.get_or_init(|| BASE64_STANDARD.encode(self.batch_prover.get_app_vk())),
ProofType::Bundle => self.cached_vks[&ProofType::Bundle]
.get_or_init(|| BASE64_STANDARD.encode(self.bundle_prover.get_app_vk())),
_ => unreachable!("Unsupported proof type {:?}", task_type),
}
.clone()
}
}
#[async_trait]
impl CircuitsHandler for Arc<Mutex<EuclidV2Handler>> {
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
Some(match task_type {
ProofType::Chunk => self.try_lock().unwrap().chunk_prover.get_app_vk(),
ProofType::Batch => self.try_lock().unwrap().batch_prover.get_app_vk(),
ProofType::Bundle => self.try_lock().unwrap().bundle_prover.get_app_vk(),
_ => unreachable!("Unsupported proof type"),
})
async fn get_vk(&self, task_type: ProofType) -> String {
self.lock().await.get_vk_and_cache(task_type)
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
let handler_self = self.lock().await;
let u_task: ProvingTask = serde_json::from_str(&prove_request.input)?;
let expected_vk = handler_self.get_vk_and_cache(prove_request.proof_type);
if BASE64_STANDARD.encode(&u_task.vk) != expected_vk {
eyre::bail!(
"vk is not match!, prove type {:?}, expected {}, get {}",
prove_request.proof_type,
expected_vk,
BASE64_STANDARD.encode(&u_task.vk),
);
}
let proof = match prove_request.proof_type {
ProofType::Chunk => self
.try_lock()
.unwrap()
ProofType::Chunk => handler_self
.chunk_prover
.gen_proof_universal(&u_task, false)?,
ProofType::Batch => self
.try_lock()
.unwrap()
ProofType::Batch => handler_self
.batch_prover
.gen_proof_universal(&u_task, false)?,
ProofType::Bundle => self
.try_lock()
.unwrap()
ProofType::Bundle => handler_self
.bundle_prover
.gen_proof_universal(&u_task, true)?,
_ => return Err(eyre::eyre!("Unsupported proof type")),
_ => {
return Err(eyre::eyre!(
"Unsupported proof type {:?}",
prove_request.proof_type
))
}
};
//TODO: check expected PI
Ok(serde_json::to_string(&proof)?)
}
}

1
database/.gitignore vendored
View File

@@ -1,2 +1,3 @@
/build/bin
.idea
localdbg

File diff suppressed because one or more lines are too long

View File

@@ -70,7 +70,7 @@ func action(ctx *cli.Context) error {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
go utils.Loop(subCtx, 1*time.Second, blobUploader.UploadBlobToS3)
// Finish start all blob-uploader functions.
log.Info("Start blob-uploader successfully", "version", version.Version)

View File

@@ -107,7 +107,7 @@ func action(ctx *cli.Context) error {
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)

View File

@@ -36,6 +36,7 @@
"endpoint": "https://rpc.scroll.io",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"validium_mode": false,
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
@@ -123,4 +124,4 @@
"maxOpenNum": 200,
"maxIdleNum": 20
}
}
}

View File

@@ -53,6 +53,8 @@ type ChainMonitor struct {
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
type RelayerConfig struct {
// ValidiumMode indicates if the relayer is in validium mode.
ValidiumMode bool `json:"validium_mode"`
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// GasPriceOracleContractAddress store the scroll messenger contract address.
@@ -73,8 +75,6 @@ type RelayerConfig struct {
// Indicates if bypass features specific to testing environments are enabled.
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
}

View File

@@ -25,8 +25,6 @@ type S3Uploader struct {
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
// load AWS config
var opts []func(*awsconfig.LoadOptions) error
opts = append(opts, awsconfig.WithRegion(cfg.Region))
// if AccessKey && SecretKey provided, use it
if cfg.AccessKey != "" && cfg.SecretKey != "" {
opts = append(opts, awsconfig.WithCredentialsProvider(
@@ -38,6 +36,10 @@ func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
)
}
if cfg.Region != "" {
opts = append(opts, awsconfig.WithRegion(cfg.Region))
}
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to load default config: %w", err)

View File

@@ -79,6 +79,7 @@ type Layer2Relayer struct {
commitSender *sender.Sender
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
validiumABI *abi.ABI
l2GasOracleABI *abi.ABI
@@ -172,6 +173,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
commitSender: commitSender,
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
validiumABI: bridgeAbi.ValidiumABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: strategy,
@@ -239,10 +241,11 @@ func (r *Layer2Relayer) initializeGenesis() error {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
Blocks: chunk.Blocks,
}
var dbBatch *orm.Batch
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{}, dbTX)
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{ValidiumMode: r.cfg.ValidiumMode}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -274,10 +277,23 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
var calldata []byte
var packErr error
if r.cfg.ValidiumMode {
// validium mode: only pass batchHeader
calldata, packErr = r.validiumABI.Pack("importGenesisBatch", batchHeader)
if packErr != nil {
return fmt.Errorf("failed to pack validium importGenesisBatch with batch header: %v. error: %v", common.Bytes2Hex(batchHeader), packErr)
}
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
} else {
// rollup mode: pass batchHeader and stateRoot
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
}
log.Info("Rollup importGenesis", "calldata", common.Bytes2Hex(calldata), "stateRoot", stateRoot)
}
// submit genesis batch to L1 rollup contract
@@ -285,7 +301,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash, "validium", r.cfg.ValidiumMode)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -310,20 +326,23 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if !confirmation.IsSuccessful {
return errors.New("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String(), "validium", r.cfg.ValidiumMode)
return nil
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
// Pending batchess are submitted if one of the following conditions is met:
// Pending batches are submitted if one of the following conditions is met:
// - the first batch is too old -> forceSubmit
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
// - we have at least minBatches AND price hits a desired target price
func (r *Layer2Relayer) ProcessPendingBatches() {
// Get effective batch limits based on whether validium mode is enabled.
minBatches, maxBatches := r.getEffectiveBatchLimits()
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, maxBatches)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -432,21 +451,21 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
break
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
if batchesToSubmitLen < maxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
if len(batchesToSubmit) >= r.cfg.BatchSubmission.MaxBatches {
if len(batchesToSubmit) >= maxBatches {
break
}
}
// we only submit batches if we have a timeout or if we have enough batches to submit
if !forceSubmit && len(batchesToSubmit) < r.cfg.BatchSubmission.MinBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", r.cfg.BatchSubmission.MinBatches, "maxBatches", r.cfg.BatchSubmission.MaxBatches)
if !forceSubmit && len(batchesToSubmit) < minBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", minBatches, "maxBatches", maxBatches)
return
}
@@ -466,10 +485,22 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7, encoding.CodecV8:
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
if r.cfg.ValidiumMode {
if len(batchesToSubmit) != 1 {
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
return
}
calldata, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadValidium(batchesToSubmit[0])
if err != nil {
log.Error("failed to construct validium payload", "codecVersion", codecVersion, "index", batchesToSubmit[0].Batch.Index, "err", err)
return
}
} else {
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
}
default:
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
@@ -522,6 +553,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled.
func (r *Layer2Relayer) getEffectiveBatchLimits() (int, int) {
if r.cfg.ValidiumMode {
return 1, 1 // minBatches=1, maxBatches=1
}
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MaxBatches
}
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
for _, batch := range batches {
@@ -690,9 +729,16 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7, encoding.CodecV8:
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
if r.cfg.ValidiumMode {
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct validium finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
} else {
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct normal finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
}
default:
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
@@ -951,6 +997,35 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
// Calculate metrics
var maxBlockHeight uint64
var totalGasUsed uint64
for _, c := range batch.Chunks {
if c.EndBlockNumber > maxBlockHeight {
maxBlockHeight = c.EndBlockNumber
}
totalGasUsed += c.TotalL2TxGas
}
// Get the commitment from the batch data: for validium mode, we use the last L2 block hash as the commitment to the off-chain data
// Get the last chunk from the batch to find the end block hash
// TODO: This is a temporary solution, we might use a larger commitment in the future
if len(batch.Chunks) == 0 {
return nil, 0, 0, fmt.Errorf("last batch has no chunks")
}
lastChunk := batch.Chunks[len(batch.Chunks)-1]
commitment := common.HexToHash(lastChunk.EndBlockHash)
version := encoding.CodecVersion(batch.Batch.CodecVersion)
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
if err != nil {
return nil, 0, 0, fmt.Errorf("failed to pack commitBatch: %w", err)
}
log.Info("Validium commitBatch", "maxBlockHeight", maxBlockHeight, "commitment", commitment.Hex())
return calldata, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
@@ -967,7 +1042,8 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
fmt.Println("packing finalizeBundlePostEuclidV2NoProof", len(dbBatch.BatchHeader), dbBatch.CodecVersion, dbBatch.BatchHeader, new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk), common.HexToHash(dbBatch.StateRoot), common.HexToHash(dbBatch.WithdrawRoot))
log.Info("Packing finalizeBundlePostEuclidV2NoProof", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot)
// finalizeBundle without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2NoProof",
@@ -982,6 +1058,26 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadValidium(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
log.Info("Packing validium finalizeBundle", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot, "withProof", aggProof != nil)
var proof []byte
if aggProof != nil {
proof = aggProof.Proof()
}
calldata, packErr := r.validiumABI.Pack(
"finalizeBundle",
dbBatch.BatchHeader,
new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk),
proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack validium finalizeBundle: %w", packErr)
}
return calldata, nil
}
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {

View File

@@ -32,6 +32,7 @@ type BatchProposer struct {
cfg *config.BatchProposerConfig
replayMode bool
validiumMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -53,7 +54,7 @@ type BatchProposer struct {
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, validiumMode bool, reg prometheus.Registerer) *BatchProposer {
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
p := &BatchProposer{
@@ -63,7 +64,8 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
cfg: cfg,
replayMode: false,
replayMode: false, // default is false, set to true when using proposer tool
validiumMode: validiumMode,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -171,7 +173,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
// recalculate batch metrics after truncation
var calcErr error
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion)
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion, p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr)
}
@@ -287,7 +289,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
@@ -312,7 +314,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
}
@@ -322,7 +324,7 @@ func (p *BatchProposer) proposeBatch() error {
}
}
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}

View File

@@ -100,7 +100,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
}, db, false /* rollup mode */, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -178,7 +178,7 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
for i := 0; i < 2; i++ {
bp.TryProposeBatch()
@@ -246,7 +246,7 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -335,7 +335,7 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32, // No chunk count limit
BatchTimeoutSec: math.MaxUint32, // No timeout limit
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
}, encoding.CodecV8, chainConfig, db, nil)
}, encoding.CodecV8, chainConfig, db, false /* rollup mode */, nil)
bp.TryProposeBatch()

View File

@@ -103,7 +103,7 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
cp.TryProposeChunk() // chunk1 contains block1
bap.TryProposeBatch() // batch1 contains chunk1

View File

@@ -125,7 +125,7 @@ func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
chunkProposer.SetReplayDB(dbForReplay)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, false /* rollup mode */, nil)
batchProposer.SetReplayDB(dbForReplay)
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)

View File

@@ -285,7 +285,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
startChunkIndex = parentBatch.EndChunkIndex + 1
}
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion, metrics.ValidiumMode)
if err != nil {
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)

View File

@@ -115,7 +115,8 @@ func (o *BlobUpload) InsertOrUpdateBlobUpload(ctx context.Context, batchIndex ui
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload query error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}
if err := db.Model(&existing).Update("status", int16(status)).Error; err != nil {
if err := db.Model(&existing).Where("batch_index = ? AND batch_hash = ? AND platform = ? AND deleted_at IS NULL",
batchIndex, batchHash, int16(platform)).Update("status", int16(status)).Error; err != nil {
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload update error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}

View File

@@ -1,11 +1,13 @@
package utils
import (
"encoding/binary"
"fmt"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// ChunkMetrics indicates the metrics for proposing a chunk.
@@ -60,15 +62,18 @@ type BatchMetrics struct {
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
ValidiumMode bool // default false: rollup mode
// timing metrics
EstimateBlobSizeTime time.Duration
}
// CalculateBatchMetrics calculates batch metrics.
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetrics, error) {
metrics := &BatchMetrics{
NumChunks: uint64(len(batch.Chunks)),
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
ValidiumMode: validiumMode,
}
codec, err := encoding.CodecFromVersion(codecVersion)
@@ -119,8 +124,59 @@ type BatchMetadata struct {
ChallengeDigest common.Hash
}
// encodeBatchHeaderValidium encodes batch header for validium mode and returns both encoded bytes and hash
func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVersion) ([]byte, common.Hash, error) {
if b == nil {
return nil, common.Hash{}, fmt.Errorf("batch is nil, version: %v, index: %v", codecVersion, b.Index)
}
if len(b.Blocks) == 0 {
return nil, common.Hash{}, fmt.Errorf("batch contains no blocks, version: %v, index: %v", codecVersion, b.Index)
}
// For validium mode, use the last block hash as commitment to the off-chain data
// TODO: This is a temporary solution, we might use a larger commitment in the future
lastBlock := b.Blocks[len(b.Blocks)-1]
commitment := lastBlock.Header.Hash()
// Batch header field sizes
const (
versionSize = 1
indexSize = 8
parentHashSize = 32
stateRootSize = 32
withdrawRootSize = 32
commitmentSize = 32 // TODO: 32 bytes for now, might use larger commitment in the future
// Total size of validium batch header
validiumBatchHeaderSize = versionSize + indexSize + parentHashSize + stateRootSize + withdrawRootSize + commitmentSize
)
batchBytes := make([]byte, validiumBatchHeaderSize)
// Define offsets for each field
var (
versionOffset = 0
indexOffset = versionOffset + versionSize
parentHashOffset = indexOffset + indexSize
stateRootOffset = parentHashOffset + parentHashSize
withdrawRootOffset = stateRootOffset + stateRootSize
commitmentOffset = withdrawRootOffset + withdrawRootSize
)
batchBytes[versionOffset] = uint8(codecVersion) // version
binary.BigEndian.PutUint64(batchBytes[indexOffset:indexOffset+indexSize], b.Index) // batch index
copy(batchBytes[parentHashOffset:parentHashOffset+parentHashSize], b.ParentBatchHash[0:parentHashSize]) // parentBatchHash
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], b.StateRoot().Bytes()[0:stateRootSize]) // postStateRoot
copy(batchBytes[withdrawRootOffset:withdrawRootOffset+withdrawRootSize], b.WithdrawRoot().Bytes()[0:withdrawRootSize]) // postWithdrawRoot
copy(batchBytes[commitmentOffset:commitmentOffset+commitmentSize], commitment[0:commitmentSize]) // data commitment
hash := crypto.Keccak256Hash(batchBytes)
return batchBytes, hash, nil
}
// GetBatchMetadata retrieves the metadata of a batch.
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetadata, error) {
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
@@ -139,9 +195,17 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
ChallengeDigest: daBatch.ChallengeDigest(),
}
// If this function is used in Validium, we encode the batch header differently.
if validiumMode {
batchMeta.BatchBytes, batchMeta.BatchHash, err = encodeBatchHeaderValidium(batch, codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to encode batch header for validium, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
}
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof, version: %v, err: %w", codecVersion, err)
return nil, fmt.Errorf("failed to get blob data proof, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
numChunks := len(batch.Chunks)

View File

@@ -128,7 +128,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
MaxBatchNumPerBundle: 2,

View File

@@ -1,32 +0,0 @@
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.0" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }

9
zkvm-prover/.work/.gitignore vendored Normal file
View File

@@ -0,0 +1,9 @@
*.vmexe
openvm.toml
*.bin
*.sol
cache
db
*.json
?
root-verifier*

View File

@@ -1,5 +1,10 @@
.PHONY: prover lint tests_binary
RUST_MIN_STACK ?= 16777216
export RUST_MIN_STACK
CIRCUIT_STUFF = .work/euclid/chunk/app.vmexe .work/feynman/chunk/app.vmexe
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
@@ -16,7 +21,7 @@ endif
ZKVM_COMMIT=$(shell echo ${ZKVM_VERSION} | cut -d " " -f2)
$(info ZKVM_COMMIT is ${ZKVM_COMMIT})
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_VERSION is ${PLONKY3_VERSION})
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
@@ -27,16 +32,17 @@ else
$(info GO_TAG is ${GO_TAG})
endif
ifeq (${PLONKY3_GPU_VERSION},)
# use plonky3 with CPU
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
endif
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
E2E_HANDLE_SET = ../tests/prover-e2e/testset.json
DUMP_DIR = .work
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override build
prover_cpu:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
tests_binary:
cargo clean && cargo test --release --no-run
@@ -46,3 +52,17 @@ lint:
cargo check --all-features
cargo clippy --all-features --all-targets -- -D warnings
cargo fmt --all
$(CIRCUIT_STUFF):
@echo "Download stuff with download-release.sh, and put them into correct directory";
@exit 1;
test_run: $(CIRCUIT_STUFF)
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
test_e2e_run: $(CIRCUIT_STUFF) ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
gen_verifier_stuff:
mkdir -p ${DUMP_DIR}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json --forkname feynman dump ${DUMP_DIR}

View File

@@ -1,31 +0,0 @@
{
"sdk_config": {
"prover_name_prefix": "prover-1",
"keys_dir": "keys",
"coordinator": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999"
},
"prover": {
"circuit_type": 2,
"supported_proof_types": [
1,
2,
3
],
"circuit_version": "v0.13.1"
},
"db_path": "unique-db-path-for-prover-1"
},
"circuits": {
"euclidV2": {
"hard_fork_name": "euclidV2",
"workspace_path": "/home/ubuntu/prover-workdir"
}
}
}

View File

@@ -20,12 +20,17 @@
],
"circuit_version": "v0.13.1"
},
"health_listener_addr": "127.0.0.1:10080",
"db_path": ".work/db"
},
"circuits": {
"euclidV2": {
"hard_fork_name": "euclidV2",
"workspace_path": ".work"
}
"workspace_path": ".work/euclid"
},
"feynman": {
"hard_fork_name": "feynman",
"workspace_path": ".work/feynman1"
},
}
}

View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Define version mapping
declare -A VERSION_MAP
VERSION_MAP["euclid"]="0.4.3"
VERSION_MAP["feynman"]="0.5.0rc1"
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
# Check if first argument is provided and matches a known version name
if [ -n "$1" ] && [ -n "${VERSION_MAP[$1]}" ]; then
SCROLL_ZKVM_VERSION=${VERSION_MAP[$1]}
echo "Setting SCROLL_ZKVM_VERSION to ${SCROLL_ZKVM_VERSION} based on '$1' argument"
else
# Default version if no argument or not recognized
SCROLL_ZKVM_VERSION=0.5.0rc0
fi
fi
echo $SCROLL_ZKVM_VERSION
mkdir -p .work/chunk
mkdir -p .work/batch
mkdir -p .work/bundle
# chunk-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/app.vmexe -O .work/chunk/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/openvm.toml -O .work/chunk/openvm.toml
# batch-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/app.vmexe -O .work/batch/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/openvm.toml -O .work/batch/openvm.toml
# bundle-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app.vmexe -O .work/bundle/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/openvm.toml -O .work/bundle/openvm.toml

View File

@@ -0,0 +1,14 @@
#!/bin/bash
# release version
SCROLL_ZKVM_STUFFDIR ?= `realpath .work`
SCROLL_ZKVM_VERSION ?= 0.5.0rc1
DIR_OUTPUT="releases/${SCROLL_ZKVM_VERSION}/verifier"
STUFF_FILES=('root-verifier-committed-exe' 'root-verifier-vm-config' 'verifier.bin' 'openVmVk.json')
for stuff_file in "${STUFF_FILES[@]}"; do
SRC="${SCROLL_ZKVM_STUFFDIR}/${stuff_file}"
TARGET="${DIR_OUTPUT}/${stuff_file}"
aws --profile default s3 cp $SRC s3://circuit-release/scroll-zkvm/$TARGET
done