Compare commits

..

15 Commits

Author SHA1 Message Date
georgehao
d21fa36803 change l2watcher from w.GetBlockByNumberOrHash to BlockByNumber (#1715) 2025-07-31 18:29:50 +08:00
colin
fc75299eb3 fix(gas-oracle): nonce too low when resubmission (#1712) 2025-07-30 14:50:41 +08:00
Morty
4bfcd35d0c fix(bridge-history): update dependency go-ethereum version (#1713) 2025-07-30 14:32:12 +08:00
colin
6d62f8e5fa fix(gas-oracle): typos in config file example (#1711) 2025-07-28 18:12:50 +08:00
Morty
392ae07736 feat(blob-uploader): support codec v8 (#1707) 2025-07-24 01:34:46 +08:00
colin
db80b47820 fix(rollup-relayer): upgrade boundary message queue hash initialization (#1706) 2025-07-23 18:51:56 +08:00
Zhang Zhuo
daa1387208 circuit-0.5.2 (#1705)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 14:16:16 +08:00
Zhang Zhuo
67b05558e2 upgrade circuit to 0.5.2 (#1703)
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 10:52:08 +08:00
Ho
1e447b0fef [Fix] building failure in gpu image (#1702) 2025-07-21 20:26:39 +08:00
georgehao
f7c6ecadf4 bump to v4.5.31 (#1700) 2025-07-18 16:41:59 +08:00
Ho
9d94f943e5 [Upgrade] feynman 0.5.0rc1 (#1699) 2025-07-18 15:57:31 +08:00
Morty
de17ad43ff fix(blob-uploader): orm function InsertOrUpdateBlobUpload and s3 bucket region configuration (#1679)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-07-16 18:36:27 +08:00
colin
4233ad928c feat(rollup-relayer): support Validium (#1693)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-07-09 15:02:54 +08:00
georgehao
3050ccb40f update feynman prover makefile (#1691) 2025-07-05 10:33:08 +08:00
Ho
12e89201a1 feat: upgrading for feynman (#1690)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-07-04 16:59:48 +08:00
60 changed files with 12360 additions and 908 deletions

View File

@@ -10,7 +10,8 @@ env:
jobs:
gas_oracle:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -55,7 +56,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup_relayer:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -100,7 +102,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
blob_uploader:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -145,7 +148,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -190,7 +194,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -235,7 +240,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -280,7 +286,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -325,7 +332,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -352,48 +360,6 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for repositories and clone them
run: |
mkdir -p ~/.ssh
chmod 700 ~/.ssh
# Setup for plonky3-gpu
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
chmod 600 ~/.ssh/plonky3_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
echo "Loaded plonky3-gpu key"
# Clone plonky3-gpu repository
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
# Setup for openvm-stark-gpu
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
chmod 600 ~/.ssh/openvm_stark_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
echo "Loaded openvm-stark-gpu key"
# Clone openvm-stark-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
# Setup for openvm-gpu
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
chmod 600 ~/.ssh/openvm_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
echo "Loaded openvm-gpu key"
# Clone openvm-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
# Show number of loaded keys
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
- name: Checkout specific commits
run: |
./build/dockerfiles/coordinator-api/checkout_all.sh
- name: Build and push
uses: docker/build-push-action@v3
env:
@@ -411,7 +377,8 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-cron:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4

857
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,12 +17,12 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
metrics = "0.23.0"
metrics-util = "0.17"
@@ -46,18 +46,18 @@ once_cell = "1.20"
base64 = "0.22"
[patch.crates-io]
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
@@ -65,4 +65,4 @@ alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch =
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1
codegen-units = 1

View File

@@ -11,14 +11,14 @@ require (
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c // It's a hotfix for the header hash incompatibility issue, pls change this with caution
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect

View File

@@ -311,8 +311,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c h1:IpEBKM6O+xOK2qZVZztGxcobFXkKMb5hAkBEVzfXjVg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.22.12-rust-nightly-2025-02-14 as chef
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
WORKDIR app
FROM chef as planner
@@ -11,21 +11,15 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run scripts to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY .git .git
RUN cargo build --release -p libzkp-c
# Download Go dependencies
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.22.12-rust-nightly-2025-02-14 as base
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
@@ -45,7 +39,7 @@ RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_a
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353

View File

@@ -1,17 +0,0 @@
#!/bin/bash
set -uex
PLONKY3_GPU_COMMIT=450ec18 # feynman
OPENVM_STARK_GPU_COMMIT=e3b2d6 # branch: sync/upstream-250702
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout plonky3-gpu
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
# checkout openvm-stark-gpu
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
# checkout openvm-gpu
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-gpu if not exists
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch --all --force

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-stark-gpu if not exists
if [ ! -d $DIR/openvm-stark-gpu ]; then
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
fi
cd $DIR/openvm-stark-gpu && git fetch --all --force

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone plonky3-gpu if not exists
if [ ! -d $DIR/plonky3-gpu ]; then
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
fi
cd $DIR/plonky3-gpu && git fetch --all --force

View File

@@ -1,92 +0,0 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
# stark-backend
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
# plonky3
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
# gpu crates
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }

View File

@@ -1,2 +0,0 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.26"
var tag = "v4.5.37"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1,4 +1,4 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..

View File

@@ -86,6 +86,10 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
var tmpBatchTask *orm.Batch
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
@@ -95,6 +99,14 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBatchTask == nil {

View File

@@ -84,6 +84,10 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
var tmpBundleTask *orm.Bundle
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
@@ -93,6 +97,14 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBundleTask == nil {
@@ -234,9 +246,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
var prevStateRoot common.Hash
// this would be common in test cases: the first batch has empty parent
if batches[0].Index > 1 {
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
var batchProofs []*message.OpenVMBatchProof
@@ -255,7 +272,7 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
PrevStateRoot: prevStateRoot,
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),

View File

@@ -80,7 +80,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
@@ -91,6 +96,14 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpChunkTask == nil {
@@ -221,7 +234,7 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
}
var taskDetailBytes []byte

View File

@@ -5,10 +5,12 @@ package verifier
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/scroll-tech/go-ethereum/log"
@@ -117,6 +119,16 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
/*
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
*/
const blocked_vks = `
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
`
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
vkFileName := cfg.Vkfile
@@ -138,6 +150,16 @@ func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
if err := json.Unmarshal(byt, &dump); err != nil {
return err
}
if strings.Contains(blocked_vks, dump.Chunk) {
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
}
if strings.Contains(blocked_vks, dump.Batch) {
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
}
if strings.Contains(blocked_vks, dump.Bundle) {
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
}
v.OpenVMVkMap[dump.Chunk] = struct{}{}
v.OpenVMVkMap[dump.Batch] = struct{}{}
v.OpenVMVkMap[dump.Bundle] = struct{}{}

View File

@@ -0,0 +1,45 @@
[patch."https://github.com/openvm-org/openvm.git"]
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.1" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }

11002
crates/gpu_override/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
.PHONY: build update clean
ZKVM_COMMIT ?= freebuild
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
GIT_REV ?= $(shell git rev-parse --short HEAD)
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
$(info ZK_GPU_VERSION is ${ZK_VERSION})
clean:
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# build gpu prover, never touch lock file
build:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# update Cargo.lock while override config has been updated
#update:
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock

View File

@@ -1,6 +1,6 @@
#!/bin/bash
config_file=~/.cargo/config.toml
config_file=.cargo/config.toml
plonky3_gpu_path=$(grep 'path.*plonky3-gpu' "$config_file" | cut -d'"' -f2 | head -n 1)
plonky3_gpu_path=$(dirname "$plonky3_gpu_path")

View File

@@ -108,38 +108,42 @@ impl ChunkInterpreter for RpcClient<'_> {
.get_block_by_hash(block_hash)
.full()
.await?
.ok_or_else(|| eyre::eyre!("Block not found"))?;
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
let number = block.header.number;
let parent_hash = block.header.parent_hash;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let prev_state_root = if let Some(witness) = prev_witness {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
} else {
provider
.scroll_disk_root((number - 1).into())
.await?
.disk_root
};
let witness = WitnessBuilder::new()
let mut witness_builder = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?)
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
.prev_state_root(prev_state_root)
.build()?;
.execution_witness(provider.debug_execution_witness(number.into()).await?);
Ok(witness)
let prev_state_root = match prev_witness {
Some(witness) => {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
}
None => {
let parent_block = provider
.get_block_by_hash(parent_hash)
.await?
.expect("parent block should exist");
parent_block.header.state_root
}
};
witness_builder = witness_builder.prev_state_root(prev_state_root);
Ok(witness_builder.build()?)
}
tracing::debug!("fetch witness for {block_hash}");

View File

@@ -9,7 +9,7 @@ scroll-zkvm-types.workspace = true
scroll-zkvm-verifier-euclid.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"
sbv-primitives.workspace = true
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true

View File

@@ -30,7 +30,7 @@ pub fn checkout_chunk_task(
pub fn gen_universal_task(
task_type: i32,
task_json: &str,
fork_name: &str,
fork_name_str: &str,
expected_vk: &[u8],
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
@@ -48,19 +48,40 @@ pub fn gen_universal_task(
let (pi_hash, metadata, mut u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) =
gen_universal_chunk_task(task, fork_name.into(), interpreter)?;
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
// normailze fork name field in task
task.fork_name = task.fork_name.to_lowercase();
// always respect the fork_name_str (which has been normalized) being passed
// if the fork_name wrapped in task is not match, consider it a malformed task
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
})
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {
let task = serde_json::from_str::<BatchProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name.into())?;
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_batch_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Batch(metadata), u_task)
}
x if x == TaskType::Bundle as i32 => {
let task = serde_json::from_str::<BundleProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name.into())?;
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_bundle_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
}
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
@@ -111,24 +132,6 @@ pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyr
let verifier = verifier::get_verifier(fork_name)?;
let ret = verifier.lock().unwrap().verify(task_type, &proof)?;
if let Ok(debug_value) = std::env::var("ZKVM_DEBUG_PROOF") {
use std::time::{SystemTime, UNIX_EPOCH};
if !ret && debug_value.to_lowercase() == "true" {
// Dump req.input to a temporary file
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let filename = format!("/tmp/proof_{}.json", timestamp);
if let Err(e) = std::fs::write(&filename, &proof) {
eprintln!("Failed to write proof to file {}: {}", filename, e);
} else {
println!("Dumped failed proof to {}", filename);
}
}
}
Ok(ret)
}

View File

@@ -9,7 +9,10 @@ pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use crate::{
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
utils::panic_catch,
};
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
@@ -20,25 +23,14 @@ fn check_aggregation_proofs<Metadata>(
where
Metadata: proofs::ProofMetadata,
{
use std::panic::{self, AssertUnwindSafe};
panic::catch_unwind(AssertUnwindSafe(|| {
panic_catch(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
}
}))
.map_err(|e| {
let error_msg = if let Some(string) = e.downcast_ref::<String>() {
string.clone()
} else if let Some(str) = e.downcast_ref::<&str>() {
str.to_string()
} else {
"Unknown validation error occurred".to_string()
};
eyre::eyre!("Chunk data validation failed: {}", error_msg)
})?;
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
Ok(())
}

View File

@@ -4,8 +4,9 @@ use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, Envelope, EnvelopeV6,
EnvelopeV7, PointEvalWitness, ReferenceHeader, ToArchievedWitness, N_BLOB_BYTES,
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
ToArchievedWitness, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
@@ -23,37 +24,35 @@ use utils::{base64, point_eval};
#[serde(untagged)]
pub enum BatchHeaderV {
V6(BatchHeaderV6),
V7(BatchHeaderV7),
}
impl From<BatchHeaderV> for ReferenceHeader {
fn from(value: BatchHeaderV) -> Self {
match value {
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
}
}
V7_8(BatchHeaderV7),
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7(h) => h.batch_hash(),
BatchHeaderV::V7_8(h) => h.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
_ => panic!("try to pick other header type"),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7(h) => h,
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
}
}
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
}
}
}
@@ -120,20 +119,28 @@ impl BatchProvingTask {
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
BatchHeaderV::V7(_) => {
match fork_name {
ForkName::EuclidV2 => (),
_ => unreachable!("hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
[ForkName::EuclidV2],
),
}
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
EnvelopeV7::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
match fork_name {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
f => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
f,
[ForkName::EuclidV2, ForkName::Feynman],
),
}
}
};
@@ -159,7 +166,11 @@ impl BatchProvingTask {
kzg_proof: kzg_proof.into_inner(),
};
let reference_header = self.batch_header.clone().into();
let reference_header = match fork_name {
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
ForkName::EuclidV2 => ReferenceHeader::V7(*self.batch_header.must_v7_header()),
ForkName::Feynman => ReferenceHeader::V8(*self.batch_header.must_v8_header()),
};
BatchWitness {
fork_name,

View File

@@ -9,6 +9,16 @@ use std::sync::OnceLock;
static LOG_SETTINGS: OnceLock<Result<(), String>> = OnceLock::new();
fn enable_dump() -> bool {
static ZKVM_DEBUG_DUMP: OnceLock<bool> = OnceLock::new();
*ZKVM_DEBUG_DUMP.get_or_init(|| {
std::env::var("ZKVM_DEBUG")
.or_else(|_| std::env::var("ZKVM_DEBUG_PROOF"))
.map(|s| s.to_lowercase() == "true")
.unwrap_or(false)
})
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_tracing() {
@@ -52,6 +62,7 @@ pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof_str = proof;
let proof = c_char_to_vec(proof);
match libzkp::verify_proof(proof, fork_name_str, task_type) {
@@ -59,7 +70,24 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
tracing::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
Ok(result) => {
if !result && enable_dump() {
use std::time::{SystemTime, UNIX_EPOCH};
// Dump req.input to a temporary file
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let filename = format!("/tmp/proof_{}.json", timestamp);
let cstr = unsafe { std::ffi::CStr::from_ptr(proof_str) };
if let Err(e) = std::fs::write(&filename, cstr.to_bytes()) {
eprintln!("Failed to write proof to file {}: {}", filename, e);
} else {
println!("Dumped failed proof to {}", filename);
}
}
result as c_char
}
}
}
@@ -167,6 +195,22 @@ pub unsafe extern "C" fn gen_universal_task(
expected_pi_hash,
}
} else {
if enable_dump() {
use std::time::{SystemTime, UNIX_EPOCH};
// Dump req.input to a temporary file
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let c_str = unsafe { std::ffi::CStr::from_ptr(fork_name) };
let filename = format!("/tmp/task_{}_{}.json", c_str.to_str().unwrap(), timestamp);
if let Err(e) = std::fs::write(&filename, task_json.as_bytes()) {
eprintln!("Failed to write task to file {}: {}", filename, e);
} else {
println!("Dumped failed task to {}", filename);
}
}
tracing::error!("gen_universal_task failed, error: {:#}", ret.unwrap_err());
failed_handling_result()
}

View File

@@ -1,32 +0,0 @@
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.0" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }

View File

@@ -5,9 +5,10 @@ mod zk_circuits_handler;
use clap::{ArgAction, Parser, Subcommand};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::ProverBuilder,
prover::{types::ProofType, ProverBuilder},
utils::{get_version, init_tracing},
};
use std::{fs::File, io::BufReader, path::Path};
#[derive(Parser, Debug)]
#[command(disable_version_flag = true)]
@@ -38,6 +39,17 @@ enum Commands {
/// path to save the verifier's asset
asset_path: String,
},
Handle {
/// path to save the verifier's asset
task_path: String,
},
}
#[derive(Debug, serde::Deserialize)]
struct HandleSet {
chunks: Vec<String>,
batches: Vec<String>,
bundles: Vec<String>,
}
#[tokio::main]
@@ -62,6 +74,40 @@ async fn main() -> eyre::Result<()> {
println!("dump assets for {fork_name} into {asset_path}");
local_prover.dump_verifier_assets(&fork_name, asset_path.as_ref())?;
}
Some(Commands::Handle { task_path }) => {
let file = File::open(Path::new(&task_path))?;
let reader = BufReader::new(file);
let handle_set: HandleSet = serde_json::from_reader(reader)?;
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
let prover = std::sync::Arc::new(prover);
println!("Handling task set 1: chunks ...");
assert!(
prover
.clone()
.one_shot(&handle_set.chunks, ProofType::Chunk)
.await
);
println!("Done! Handling task set 2: batches ...");
assert!(
prover
.clone()
.one_shot(&handle_set.batches, ProofType::Batch)
.await
);
println!("Done! Handling task set 3: bundles ...");
assert!(
prover
.clone()
.one_shot(&handle_set.bundles, ProofType::Bundle)
.await
);
println!("All done!");
}
None => {
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()

View File

@@ -203,6 +203,10 @@ impl LocalProver {
.get(hard_fork_name)
.ok_or_else(|| eyre::eyre!("no corresponding config for fork {hard_fork_name}"))?;
if !config.vks.is_empty() {
eyre::bail!("clean vks cache first or we will have wrong dumped vk");
}
let workspace_path = &config.workspace_path;
let universal_prover = EuclidV2Handler::new(config);
let _ = universal_prover

View File

@@ -1083,9 +1083,7 @@ github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
@@ -1122,7 +1120,6 @@ github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE=
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E=
@@ -1228,7 +1225,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
@@ -1240,7 +1236,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
@@ -1418,6 +1413,8 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@@ -1454,7 +1451,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k=
github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
@@ -1481,7 +1477,6 @@ github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/a
github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
@@ -1716,7 +1711,6 @@ golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1741,11 +1735,9 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1811,7 +1803,6 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

File diff suppressed because one or more lines are too long

View File

@@ -70,7 +70,7 @@ func action(ctx *cli.Context) error {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
go utils.Loop(subCtx, 1*time.Second, blobUploader.UploadBlobToS3)
// Finish start all blob-uploader functions.
log.Info("Start blob-uploader successfully", "version", version.Version)

View File

@@ -107,7 +107,7 @@ func action(ctx *cli.Context) error {
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)

View File

@@ -3,7 +3,7 @@
"endpoint": "https://rpc.ankr.com/eth",
"start_height": 0,
"relayer_config": {
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.scroll.io",
"escalate_blocks": 1,
@@ -36,8 +36,8 @@
"endpoint": "https://rpc.scroll.io",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"validium_mode": false,
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://rpc.ankr.com/eth",
"escalate_blocks": 1,
@@ -123,4 +123,4 @@
"maxOpenNum": 200,
"maxIdleNum": 20
}
}
}

View File

@@ -53,6 +53,8 @@ type ChainMonitor struct {
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
type RelayerConfig struct {
// ValidiumMode indicates if the relayer is in validium mode.
ValidiumMode bool `json:"validium_mode"`
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// GasPriceOracleContractAddress store the scroll messenger contract address.
@@ -73,8 +75,6 @@ type RelayerConfig struct {
// Indicates if bypass features specific to testing environments are enabled.
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
}

View File

@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
Chunks: chunks,
}
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
encodingBatch = &encoding.Batch{
Index: dbBatch.Index,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),

View File

@@ -25,8 +25,6 @@ type S3Uploader struct {
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
// load AWS config
var opts []func(*awsconfig.LoadOptions) error
opts = append(opts, awsconfig.WithRegion(cfg.Region))
// if AccessKey && SecretKey provided, use it
if cfg.AccessKey != "" && cfg.SecretKey != "" {
opts = append(opts, awsconfig.WithCredentialsProvider(
@@ -38,6 +36,10 @@ func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
)
}
if cfg.Region != "" {
opts = append(opts, awsconfig.WithRegion(cfg.Region))
}
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to load default config: %w", err)

View File

@@ -79,6 +79,7 @@ type Layer2Relayer struct {
commitSender *sender.Sender
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
validiumABI *abi.ABI
l2GasOracleABI *abi.ABI
@@ -172,6 +173,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
commitSender: commitSender,
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
validiumABI: bridgeAbi.ValidiumABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: strategy,
@@ -239,10 +241,11 @@ func (r *Layer2Relayer) initializeGenesis() error {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
Blocks: chunk.Blocks,
}
var dbBatch *orm.Batch
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{}, dbTX)
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{ValidiumMode: r.cfg.ValidiumMode}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -274,10 +277,23 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
var calldata []byte
var packErr error
if r.cfg.ValidiumMode {
// validium mode: only pass batchHeader
calldata, packErr = r.validiumABI.Pack("importGenesisBatch", batchHeader)
if packErr != nil {
return fmt.Errorf("failed to pack validium importGenesisBatch with batch header: %v. error: %v", common.Bytes2Hex(batchHeader), packErr)
}
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
} else {
// rollup mode: pass batchHeader and stateRoot
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
}
log.Info("Rollup importGenesis", "calldata", common.Bytes2Hex(calldata), "stateRoot", stateRoot)
}
// submit genesis batch to L1 rollup contract
@@ -285,7 +301,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash, "validium", r.cfg.ValidiumMode)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -310,20 +326,23 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if !confirmation.IsSuccessful {
return errors.New("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String(), "validium", r.cfg.ValidiumMode)
return nil
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
// Pending batchess are submitted if one of the following conditions is met:
// Pending batches are submitted if one of the following conditions is met:
// - the first batch is too old -> forceSubmit
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
// - we have at least minBatches AND price hits a desired target price
func (r *Layer2Relayer) ProcessPendingBatches() {
// Get effective batch limits based on whether validium mode is enabled.
minBatches, maxBatches := r.getEffectiveBatchLimits()
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, maxBatches)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -432,21 +451,21 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
break
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
if batchesToSubmitLen < maxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
if len(batchesToSubmit) >= r.cfg.BatchSubmission.MaxBatches {
if len(batchesToSubmit) >= maxBatches {
break
}
}
// we only submit batches if we have a timeout or if we have enough batches to submit
if !forceSubmit && len(batchesToSubmit) < r.cfg.BatchSubmission.MinBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", r.cfg.BatchSubmission.MinBatches, "maxBatches", r.cfg.BatchSubmission.MaxBatches)
if !forceSubmit && len(batchesToSubmit) < minBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", minBatches, "maxBatches", maxBatches)
return
}
@@ -466,10 +485,22 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7, encoding.CodecV8:
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
if r.cfg.ValidiumMode {
if len(batchesToSubmit) != 1 {
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
return
}
calldata, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadValidium(batchesToSubmit[0])
if err != nil {
log.Error("failed to construct validium payload", "codecVersion", codecVersion, "index", batchesToSubmit[0].Batch.Index, "err", err)
return
}
} else {
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
}
default:
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
@@ -522,6 +553,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled.
func (r *Layer2Relayer) getEffectiveBatchLimits() (int, int) {
if r.cfg.ValidiumMode {
return 1, 1 // minBatches=1, maxBatches=1
}
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MaxBatches
}
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
for _, batch := range batches {
@@ -690,9 +729,16 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7, encoding.CodecV8:
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
if r.cfg.ValidiumMode {
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct validium finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
} else {
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct normal finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
}
default:
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
@@ -951,6 +997,35 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
// Calculate metrics
var maxBlockHeight uint64
var totalGasUsed uint64
for _, c := range batch.Chunks {
if c.EndBlockNumber > maxBlockHeight {
maxBlockHeight = c.EndBlockNumber
}
totalGasUsed += c.TotalL2TxGas
}
// Get the commitment from the batch data: for validium mode, we use the last L2 block hash as the commitment to the off-chain data
// Get the last chunk from the batch to find the end block hash
// TODO: This is a temporary solution, we might use a larger commitment in the future
if len(batch.Chunks) == 0 {
return nil, 0, 0, fmt.Errorf("last batch has no chunks")
}
lastChunk := batch.Chunks[len(batch.Chunks)-1]
commitment := common.HexToHash(lastChunk.EndBlockHash)
version := encoding.CodecVersion(batch.Batch.CodecVersion)
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
if err != nil {
return nil, 0, 0, fmt.Errorf("failed to pack commitBatch: %w", err)
}
log.Info("Validium commitBatch", "maxBlockHeight", maxBlockHeight, "commitment", commitment.Hex())
return calldata, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
@@ -967,7 +1042,8 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
fmt.Println("packing finalizeBundlePostEuclidV2NoProof", len(dbBatch.BatchHeader), dbBatch.CodecVersion, dbBatch.BatchHeader, new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk), common.HexToHash(dbBatch.StateRoot), common.HexToHash(dbBatch.WithdrawRoot))
log.Info("Packing finalizeBundlePostEuclidV2NoProof", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot)
// finalizeBundle without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2NoProof",
@@ -982,6 +1058,26 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadValidium(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
log.Info("Packing validium finalizeBundle", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot, "withProof", aggProof != nil)
var proof []byte
if aggProof != nil {
proof = aggProof.Proof()
}
calldata, packErr := r.validiumABI.Pack(
"finalizeBundle",
dbBatch.BatchHeader,
new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk),
proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack validium finalizeBundle: %w", packErr)
}
return calldata, nil
}
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {

View File

@@ -63,7 +63,7 @@ type FeeData struct {
gasLimit uint64
}
// Sender Transaction sender to send transaction to l1/l2 geth
// Sender Transaction sender to send transaction to l1/l2
type Sender struct {
config *config.SenderConfig
gethClient *gethclient.Client
@@ -105,13 +105,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
return nil, fmt.Errorf("failed to create transaction signer, err: %w", err)
}
// Set pending nonce
nonce, err := client.PendingNonceAt(ctx, transactionSigner.GetAddr())
if err != nil {
return nil, fmt.Errorf("failed to get pending nonce for address %s, err: %w", transactionSigner.GetAddr(), err)
}
transactionSigner.SetNonce(nonce)
// Create sender instance first and then initialize nonce
sender := &Sender{
ctx: ctx,
config: config,
@@ -127,8 +121,13 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
service: service,
senderType: senderType,
}
sender.metrics = initSenderMetrics(reg)
// Initialize nonce using the new method
if err := sender.resetNonce(); err != nil {
return nil, fmt.Errorf("failed to reset nonce: %w", err)
}
sender.metrics = initSenderMetrics(reg)
go sender.loop(ctx)
return sender, nil
@@ -242,7 +241,10 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
// Check if contain nonce, and reset nonce
// only reset nonce when it is not from resubmit
if strings.Contains(err.Error(), "nonce too low") {
s.resetNonce(context.Background())
if err := s.resetNonce(); err != nil {
log.Warn("failed to reset nonce after failed send transaction", "address", s.transactionSigner.GetAddr().String(), "err", err)
return common.Hash{}, 0, fmt.Errorf("failed to reset nonce after failed send transaction, err: %w", err)
}
}
return common.Hash{}, 0, fmt.Errorf("failed to send transaction, err: %w", err)
}
@@ -327,14 +329,46 @@ func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte,
return signedTx, nil
}
// resetNonce reset nonce if send signed tx failed.
func (s *Sender) resetNonce(ctx context.Context) {
nonce, err := s.client.PendingNonceAt(ctx, s.transactionSigner.GetAddr())
// initializeNonce initializes the nonce by taking the maximum of database nonce and pending nonce.
func (s *Sender) initializeNonce() (uint64, error) {
// Get maximum nonce from database
dbNonce, err := s.pendingTransactionOrm.GetMaxNonceBySenderAddress(s.ctx, s.transactionSigner.GetAddr().Hex())
if err != nil {
log.Warn("failed to reset nonce", "address", s.transactionSigner.GetAddr().String(), "err", err)
return
return 0, fmt.Errorf("failed to get max nonce from database for address %s, err: %w", s.transactionSigner.GetAddr().Hex(), err)
}
// Get pending nonce from the client
pendingNonce, err := s.client.PendingNonceAt(s.ctx, s.transactionSigner.GetAddr())
if err != nil {
return 0, fmt.Errorf("failed to get pending nonce for address %s, err: %w", s.transactionSigner.GetAddr().Hex(), err)
}
// Take the maximum of pending nonce and (db nonce + 1)
// Database stores the used nonce, so the next available nonce should be dbNonce + 1
// When dbNonce is -1 (no records), dbNonce + 1 = 0, which is correct
nextDbNonce := uint64(dbNonce + 1)
var finalNonce uint64
if pendingNonce > nextDbNonce {
finalNonce = pendingNonce
} else {
finalNonce = nextDbNonce
}
log.Info("nonce initialization", "address", s.transactionSigner.GetAddr().Hex(), "maxDbNonce", dbNonce, "nextDbNonce", nextDbNonce, "pendingNonce", pendingNonce, "finalNonce", finalNonce)
return finalNonce, nil
}
// resetNonce reset nonce if send signed tx failed.
func (s *Sender) resetNonce() error {
nonce, err := s.initializeNonce()
if err != nil {
log.Error("failed to reset nonce", "address", s.transactionSigner.GetAddr().String(), "err", err)
return fmt.Errorf("failed to reset nonce, err: %w", err)
}
log.Info("reset nonce", "address", s.transactionSigner.GetAddr().String(), "nonce", nonce)
s.transactionSigner.SetNonce(nonce)
return nil
}
func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) {
@@ -612,6 +646,16 @@ func (s *Sender) checkPendingTransaction() {
}
if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil {
if strings.Contains(err.Error(), "nonce too low") {
// When we receive a 'nonce too low' error but cannot find the transaction receipt, it indicates another transaction with this nonce has already been processed, so this transaction will never be mined and should be marked as failed.
log.Warn("nonce too low detected, marking all non-confirmed transactions with same nonce as failed", "nonce", originalTx.Nonce(), "address", s.transactionSigner.GetAddr().Hex(), "txHash", originalTx.Hash().Hex(), "newTxHash", newSignedTx.Hash().Hex(), "err", err)
txHashes := []string{originalTx.Hash().Hex(), newSignedTx.Hash().Hex()}
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHashes(s.ctx, txHashes, types.TxStatusConfirmedFailed); updateErr != nil {
log.Error("failed to update transaction status", "hashes", txHashes, "err", updateErr)
return
}
return
}
// SendTransaction failed, need to rollback the previous database changes
if rollbackErr := s.db.Transaction(func(tx *gorm.DB) error {
// Restore original transaction status back to pending

View File

@@ -32,6 +32,7 @@ type BatchProposer struct {
cfg *config.BatchProposerConfig
replayMode bool
validiumMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -53,7 +54,7 @@ type BatchProposer struct {
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, validiumMode bool, reg prometheus.Registerer) *BatchProposer {
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
p := &BatchProposer{
@@ -63,7 +64,8 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
cfg: cfg,
replayMode: false,
replayMode: false, // default is false, set to true when using proposer tool
validiumMode: validiumMode,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -171,7 +173,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
// recalculate batch metrics after truncation
var calcErr error
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion)
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion, p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr)
}
@@ -287,7 +289,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
@@ -312,7 +314,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
}
@@ -322,7 +324,7 @@ func (p *BatchProposer) proposeBatch() error {
}
}
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}

View File

@@ -100,7 +100,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
}, db, false /* rollup mode */, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -178,7 +178,7 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
for i := 0; i < 2; i++ {
bp.TryProposeBatch()
@@ -246,7 +246,7 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -335,7 +335,7 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32, // No chunk count limit
BatchTimeoutSec: math.MaxUint32, // No timeout limit
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
}, encoding.CodecV8, chainConfig, db, nil)
}, encoding.CodecV8, chainConfig, db, false /* rollup mode */, nil)
bp.TryProposeBatch()

View File

@@ -103,7 +103,7 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
cp.TryProposeChunk() // chunk1 contains block1
bap.TryProposeBatch() // batch1 contains chunk1

View File

@@ -268,13 +268,9 @@ func (p *ChunkProposer) proposeChunk() error {
return fmt.Errorf("failed to get parent chunk: %w", err)
}
// Currently rollup-relayer only supports >= v7 codec version, it checks the minimum codec version after start.
// In EuclidV2 transition, empty PostL1MessageQueueHash will be naturally initialized to the first chunk's PrevL1MessageQueueHash.
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
chunk.PrevL1MessageQueueHash = common.Hash{}
}
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
var previousPostL1MessageQueueHash common.Hash

View File

@@ -88,9 +88,9 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
var blocks []*encoding.Block
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err := w.GetBlockByNumberOrHash(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(number)))
block, err := w.BlockByNumber(ctx, new(big.Int).SetUint64(number))
if err != nil {
return fmt.Errorf("failed to GetBlockByNumberOrHash: %v. number: %v", err, number)
return fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
}
var count int

View File

@@ -125,7 +125,7 @@ func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
chunkProposer.SetReplayDB(dbForReplay)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, false /* rollup mode */, nil)
batchProposer.SetReplayDB(dbForReplay)
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)

View File

@@ -285,7 +285,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
startChunkIndex = parentBatch.EndChunkIndex + 1
}
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion, metrics.ValidiumMode)
if err != nil {
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)

View File

@@ -115,7 +115,8 @@ func (o *BlobUpload) InsertOrUpdateBlobUpload(ctx context.Context, batchIndex ui
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload query error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}
if err := db.Model(&existing).Update("status", int16(status)).Error; err != nil {
if err := db.Model(&existing).Where("batch_index = ? AND batch_hash = ? AND platform = ? AND deleted_at IS NULL",
batchIndex, batchHash, int16(platform)).Update("status", int16(status)).Error; err != nil {
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload update error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}

View File

@@ -597,3 +597,61 @@ func TestPendingTransactionOrm(t *testing.T) {
err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), common.HexToHash("0x123"))
assert.Error(t, err) // Should return error for non-existent transaction
}
func TestPendingTransaction_GetMaxNonceBySenderAddress(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
// When there are no transactions for this sender address, should return -1
maxNonce, err := pendingTransactionOrm.GetMaxNonceBySenderAddress(context.Background(), "0xdeadbeef")
assert.NoError(t, err)
assert.Equal(t, int64(-1), maxNonce)
// Insert two transactions with different nonces for the same sender address
senderMeta := &SenderMeta{
Name: "testName",
Service: "testService",
Address: common.HexToAddress("0xdeadbeef"),
Type: types.SenderTypeCommitBatch,
}
tx0 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
Nonce: 1,
To: &common.Address{},
Data: []byte{},
Gas: 21000,
AccessList: gethTypes.AccessList{},
Value: big.NewInt(0),
ChainID: big.NewInt(1),
GasTipCap: big.NewInt(0),
GasFeeCap: big.NewInt(1),
V: big.NewInt(0),
R: big.NewInt(0),
S: big.NewInt(0),
})
tx1 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
Nonce: 3,
To: &common.Address{},
Data: []byte{},
Gas: 22000,
AccessList: gethTypes.AccessList{},
Value: big.NewInt(0),
ChainID: big.NewInt(1),
GasTipCap: big.NewInt(1),
GasFeeCap: big.NewInt(2),
V: big.NewInt(0),
R: big.NewInt(0),
S: big.NewInt(0),
})
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx0, 0)
assert.NoError(t, err)
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0)
assert.NoError(t, err)
// Now the max nonce for this sender should be 3
maxNonce, err = pendingTransactionOrm.GetMaxNonceBySenderAddress(context.Background(), senderMeta.Address.String())
assert.NoError(t, err)
assert.Equal(t, int64(3), maxNonce)
}

View File

@@ -3,6 +3,7 @@ package orm
import (
"bytes"
"context"
"errors"
"fmt"
"time"
@@ -191,6 +192,25 @@ func (o *PendingTransaction) UpdateTransactionStatusByTxHash(ctx context.Context
return nil
}
// UpdateTransactionStatusByTxHashes updates the status of multiple transactions by their hashes in one SQL statement
func (o *PendingTransaction) UpdateTransactionStatusByTxHashes(ctx context.Context, txHashes []string, status types.TxStatus, dbTX ...*gorm.DB) error {
if len(txHashes) == 0 {
return nil
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&PendingTransaction{})
db = db.Where("hash IN ?", txHashes)
if err := db.Update("status", status).Error; err != nil {
return fmt.Errorf("failed to update transaction status for hashes %v to status %d: %w", txHashes, status, err)
}
return nil
}
// UpdateOtherTransactionsAsFailedByNonce updates the status of all transactions to TxStatusConfirmedFailed for a specific nonce and sender address, excluding a specified transaction hash.
func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.Context, senderAddress string, nonce uint64, hash common.Hash, dbTX ...*gorm.DB) error {
db := o.db
@@ -207,3 +227,27 @@ func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.
}
return nil
}
// GetMaxNonceBySenderAddress retrieves the maximum nonce for a specific sender address.
// Returns -1 if no transactions are found for the given address.
func (o *PendingTransaction) GetMaxNonceBySenderAddress(ctx context.Context, senderAddress string) (int64, error) {
var result struct {
Nonce int64 `gorm:"column:nonce"`
}
err := o.db.WithContext(ctx).
Model(&PendingTransaction{}).
Select("nonce").
Where("sender_address = ?", senderAddress).
Order("nonce DESC").
First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return -1, nil
}
return -1, fmt.Errorf("failed to get max nonce by sender address, address: %s, err: %w", senderAddress, err)
}
return result.Nonce, nil
}

View File

@@ -1,11 +1,13 @@
package utils
import (
"encoding/binary"
"fmt"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// ChunkMetrics indicates the metrics for proposing a chunk.
@@ -60,15 +62,18 @@ type BatchMetrics struct {
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
ValidiumMode bool // default false: rollup mode
// timing metrics
EstimateBlobSizeTime time.Duration
}
// CalculateBatchMetrics calculates batch metrics.
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetrics, error) {
metrics := &BatchMetrics{
NumChunks: uint64(len(batch.Chunks)),
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
ValidiumMode: validiumMode,
}
codec, err := encoding.CodecFromVersion(codecVersion)
@@ -119,8 +124,59 @@ type BatchMetadata struct {
ChallengeDigest common.Hash
}
// encodeBatchHeaderValidium encodes batch header for validium mode and returns both encoded bytes and hash
func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVersion) ([]byte, common.Hash, error) {
if b == nil {
return nil, common.Hash{}, fmt.Errorf("batch is nil, version: %v, index: %v", codecVersion, b.Index)
}
if len(b.Blocks) == 0 {
return nil, common.Hash{}, fmt.Errorf("batch contains no blocks, version: %v, index: %v", codecVersion, b.Index)
}
// For validium mode, use the last block hash as commitment to the off-chain data
// TODO: This is a temporary solution, we might use a larger commitment in the future
lastBlock := b.Blocks[len(b.Blocks)-1]
commitment := lastBlock.Header.Hash()
// Batch header field sizes
const (
versionSize = 1
indexSize = 8
parentHashSize = 32
stateRootSize = 32
withdrawRootSize = 32
commitmentSize = 32 // TODO: 32 bytes for now, might use larger commitment in the future
// Total size of validium batch header
validiumBatchHeaderSize = versionSize + indexSize + parentHashSize + stateRootSize + withdrawRootSize + commitmentSize
)
batchBytes := make([]byte, validiumBatchHeaderSize)
// Define offsets for each field
var (
versionOffset = 0
indexOffset = versionOffset + versionSize
parentHashOffset = indexOffset + indexSize
stateRootOffset = parentHashOffset + parentHashSize
withdrawRootOffset = stateRootOffset + stateRootSize
commitmentOffset = withdrawRootOffset + withdrawRootSize
)
batchBytes[versionOffset] = uint8(codecVersion) // version
binary.BigEndian.PutUint64(batchBytes[indexOffset:indexOffset+indexSize], b.Index) // batch index
copy(batchBytes[parentHashOffset:parentHashOffset+parentHashSize], b.ParentBatchHash[0:parentHashSize]) // parentBatchHash
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], b.StateRoot().Bytes()[0:stateRootSize]) // postStateRoot
copy(batchBytes[withdrawRootOffset:withdrawRootOffset+withdrawRootSize], b.WithdrawRoot().Bytes()[0:withdrawRootSize]) // postWithdrawRoot
copy(batchBytes[commitmentOffset:commitmentOffset+commitmentSize], commitment[0:commitmentSize]) // data commitment
hash := crypto.Keccak256Hash(batchBytes)
return batchBytes, hash, nil
}
// GetBatchMetadata retrieves the metadata of a batch.
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetadata, error) {
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
@@ -139,9 +195,17 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
ChallengeDigest: daBatch.ChallengeDigest(),
}
// If this function is used in Validium, we encode the batch header differently.
if validiumMode {
batchMeta.BatchBytes, batchMeta.BatchHash, err = encodeBatchHeaderValidium(batch, codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to encode batch header for validium, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
}
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof, version: %v, err: %w", codecVersion, err)
return nil, fmt.Errorf("failed to get blob data proof, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
numChunks := len(batch.Chunks)

View File

@@ -128,7 +128,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
MaxBatchNumPerBundle: 2,

View File

@@ -1,7 +1,9 @@
*.vmexe
openvm.toml
*.bin
*.sol
cache
db
*.json
?
?
root-verifier*

View File

@@ -1,35 +0,0 @@
[app_fri_params.fri_params]
log_blowup = 1
log_final_poly_len = 0
num_queries = 100
proof_of_work_bits = 16
[app_vm_config.rv32i]
[app_vm_config.rv32m]
[app_vm_config.io]
[app_vm_config.keccak]
[app_vm_config.castf]
[app_vm_config.modular]
supported_moduli = [
"4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787",
"52435875175126190479447740508185965837690552500527637822603658699938581184513",
]
[app_vm_config.native]
[app_vm_config.pairing]
supported_curves = ["Bls12_381"]
[app_vm_config.sha256]
[app_vm_config.fp2]
supported_moduli = [
["Bls12_381Fp2","4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787"]
]
[[app_vm_config.ecc.supported_curves]]
struct_name = "Bls12_381G1Affine"
modulus = "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787"
scalar = "52435875175126190479447740508185965837690552500527637822603658699938581184513"
a = "0"
b = "4"

View File

@@ -1,17 +0,0 @@
[app_fri_params.fri_params]
log_blowup = 1
log_final_poly_len = 0
num_queries = 100
proof_of_work_bits = 16
[app_vm_config.rv32i]
[app_vm_config.rv32m]
[app_vm_config.io]
[app_vm_config.keccak]
[app_vm_config.castf]
[app_vm_config.native]

View File

@@ -1,58 +0,0 @@
[app_fri_params.fri_params]
log_blowup = 1
log_final_poly_len = 0
num_queries = 100
proof_of_work_bits = 16
[app_vm_config.rv32i]
[app_vm_config.io]
[app_vm_config.keccak]
[app_vm_config.rv32m]
range_tuple_checker_sizes = [256, 8192]
[app_vm_config.bigint]
range_tuple_checker_sizes = [256, 8192]
[app_vm_config.modular]
supported_moduli = [
"21888242871839275222246405745257275088696311157297823662689037894645226208583",
"21888242871839275222246405745257275088548364400416034343698204186575808495617",
"115792089237316195423570985008687907853269984665640564039457584007908834671663",
"115792089237316195423570985008687907852837564279074904382605163141518161494337",
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
"115792089210356248762697446949407573529996955224135760342422259061068512044369"
]
[app_vm_config.fp2]
supported_moduli = [
["Bn254Fp2","21888242871839275222246405745257275088696311157297823662689037894645226208583"]
]
[app_vm_config.pairing]
supported_curves = ["Bn254"]
[app_vm_config.sha256]
[[app_vm_config.ecc.supported_curves]]
struct_name = "Secp256k1Point"
modulus = "115792089237316195423570985008687907853269984665640564039457584007908834671663"
scalar = "115792089237316195423570985008687907852837564279074904382605163141518161494337"
a = "0"
b = "7"
[[app_vm_config.ecc.supported_curves]]
struct_name = "P256Point"
modulus = "115792089210356248762697446949407573530086143415290314195533631308867097853951"
scalar = "115792089210356248762697446949407573529996955224135760342422259061068512044369"
a = "115792089210356248762697446949407573530086143415290314195533631308867097853948"
b = "41058363725152142129326129780047268409114441015993725554835256314039467401291"
[[app_vm_config.ecc.supported_curves]]
struct_name = "Bn254G1Affine"
modulus = "21888242871839275222246405745257275088696311157297823662689037894645226208583"
scalar = "21888242871839275222246405745257275088548364400416034343698204186575808495617"
a = "0"
b = "3"

View File

@@ -3,7 +3,7 @@
RUST_MIN_STACK ?= 16777216
export RUST_MIN_STACK
CIRCUIT_STUFF = .work/chunk/app.vmexe .work/batch/app.vmexe .work/bundle/app.vmexe
CIRCUIT_STUFF = .work/euclid/chunk/app.vmexe .work/feynman/chunk/app.vmexe
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
@@ -21,7 +21,7 @@ endif
ZKVM_COMMIT=$(shell echo ${ZKVM_VERSION} | cut -d " " -f2)
$(info ZKVM_COMMIT is ${ZKVM_COMMIT})
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_VERSION is ${PLONKY3_VERSION})
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
@@ -32,16 +32,17 @@ else
$(info GO_TAG is ${GO_TAG})
endif
ifeq (${PLONKY3_GPU_VERSION},)
# use plonky3 with CPU
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
endif
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
E2E_HANDLE_SET = ../tests/prover-e2e/testset.json
DUMP_DIR = .work
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cd ../crates/prover-bin && cargo build --release
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override build
prover_cpu:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
tests_binary:
cargo clean && cargo test --release --no-run
@@ -53,7 +54,15 @@ lint:
cargo fmt --all
$(CIRCUIT_STUFF):
bash .work/download-release.sh
@echo "Download stuff with download-release.sh, and put them into correct directory";
@exit 1;
test_run: $(CIRCUIT_STUFF)
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
test_e2e_run: $(CIRCUIT_STUFF) ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
gen_verifier_stuff:
mkdir -p ${DUMP_DIR}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json --forkname feynman dump ${DUMP_DIR}

View File

@@ -1,20 +1,36 @@
#!/bin/bash
# Define version mapping
declare -A VERSION_MAP
VERSION_MAP["euclid"]="0.4.3"
VERSION_MAP["feynman"]="0.5.0rc1"
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
SCROLL_ZKVM_VERSION=$($SHELL ./print_high_zkvm_version.sh | cut -d' ' -f1|cut -c2-)
# Check if first argument is provided and matches a known version name
if [ -n "$1" ] && [ -n "${VERSION_MAP[$1]}" ]; then
SCROLL_ZKVM_VERSION=${VERSION_MAP[$1]}
echo "Setting SCROLL_ZKVM_VERSION to ${SCROLL_ZKVM_VERSION} based on '$1' argument"
else
# Default version if no argument or not recognized
SCROLL_ZKVM_VERSION=0.5.0rc0
fi
fi
echo $SCROLL_ZKVM_VERSION
mkdir -p .work/chunk
mkdir -p .work/batch
mkdir -p .work/bundle
# chunk-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/app.vmexe -O .work/chunk/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/openvm.toml -O .work/chunk/openvm.toml
# batch-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/app.vmexe -O .work/batch/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/openvm.toml -O .work/batch/openvm.toml
# bundle-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app.vmexe -O .work/bundle/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/openvm.toml -O .work/bundle/openvm.toml
# bundle-circuit exe, legacy version, may not exist
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app_euclidv1.vmexe -O .work/bundle/app_euclidv1.vmexe || echo "legacy app not exist for $SCROLL_ZKVM_VERSION"

View File

@@ -0,0 +1,14 @@
#!/bin/bash
# release version
SCROLL_ZKVM_STUFFDIR ?= `realpath .work`
SCROLL_ZKVM_VERSION ?= 0.5.0rc1
DIR_OUTPUT="releases/${SCROLL_ZKVM_VERSION}/verifier"
STUFF_FILES=('root-verifier-committed-exe' 'root-verifier-vm-config' 'verifier.bin' 'openVmVk.json')
for stuff_file in "${STUFF_FILES[@]}"; do
SRC="${SCROLL_ZKVM_STUFFDIR}/${stuff_file}"
TARGET="${DIR_OUTPUT}/${stuff_file}"
aws --profile default s3 cp $SRC s3://circuit-release/scroll-zkvm/$TARGET
done