mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
17 Commits
fix/coordi
...
libzkp/deb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0a4b0c054c | ||
|
|
6897cc54bd | ||
|
|
d21fa36803 | ||
|
|
fc75299eb3 | ||
|
|
4bfcd35d0c | ||
|
|
6d62f8e5fa | ||
|
|
392ae07736 | ||
|
|
db80b47820 | ||
|
|
daa1387208 | ||
|
|
67b05558e2 | ||
|
|
1e447b0fef | ||
|
|
f7c6ecadf4 | ||
|
|
9d94f943e5 | ||
|
|
de17ad43ff | ||
|
|
4233ad928c | ||
|
|
3050ccb40f | ||
|
|
12e89201a1 |
69
.github/workflows/docker.yml
vendored
69
.github/workflows/docker.yml
vendored
@@ -10,7 +10,8 @@ env:
|
||||
|
||||
jobs:
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -55,7 +56,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -100,7 +102,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -145,7 +148,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -190,7 +194,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -235,7 +240,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -280,7 +286,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -325,7 +332,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -352,48 +360,6 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for repositories and clone them
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
# Setup for plonky3-gpu
|
||||
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
|
||||
chmod 600 ~/.ssh/plonky3_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
|
||||
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
echo "Loaded plonky3-gpu key"
|
||||
|
||||
# Clone plonky3-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
|
||||
|
||||
# Setup for openvm-stark-gpu
|
||||
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_stark_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-stark-gpu key"
|
||||
|
||||
# Clone openvm-stark-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
|
||||
|
||||
# Setup for openvm-gpu
|
||||
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-gpu key"
|
||||
|
||||
# Clone openvm-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
|
||||
|
||||
# Show number of loaded keys
|
||||
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
|
||||
|
||||
- name: Checkout specific commits
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/checkout_all.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
@@ -411,7 +377,8 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
857
Cargo.lock
generated
857
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
36
Cargo.toml
36
Cargo.toml
@@ -17,12 +17,12 @@ repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.8"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428", package = "scroll-zkvm-prover" }
|
||||
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428", package = "scroll-zkvm-verifier" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428" }
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-prover" }
|
||||
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-verifier" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade", features = ["scroll"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade" }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
@@ -46,18 +46,18 @@ once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
[patch.crates-io]
|
||||
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
|
||||
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
|
||||
@@ -65,4 +65,4 @@ alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch =
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
codegen-units = 1
|
||||
|
||||
@@ -11,14 +11,14 @@ require (
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
|
||||
@@ -311,8 +311,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c h1:IpEBKM6O+xOK2qZVZztGxcobFXkKMb5hAkBEVzfXjVg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.22.12-rust-nightly-2025-02-14 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -11,21 +11,15 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run scripts to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY .git .git
|
||||
RUN cargo build --release -p libzkp-c
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -45,7 +39,7 @@ RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_a
|
||||
RUN mv coordinator/internal/logic/libzkp/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
|
||||
FROM ubuntu:20.04
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
# ENV CHAIN_ID=534353
|
||||
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
PLONKY3_GPU_COMMIT=450ec18 # feynman
|
||||
OPENVM_STARK_GPU_COMMIT=e3b2d6 # branch: sync/upstream-250702
|
||||
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout plonky3-gpu
|
||||
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-stark-gpu
|
||||
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-gpu
|
||||
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch --all --force
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-stark-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-stark-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
|
||||
fi
|
||||
cd $DIR/openvm-stark-gpu && git fetch --all --force
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone plonky3-gpu if not exists
|
||||
if [ ! -d $DIR/plonky3-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
|
||||
fi
|
||||
cd $DIR/plonky3-gpu && git fetch --all --force
|
||||
@@ -1,92 +0,0 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
|
||||
# stark-backend
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
# plonky3
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
|
||||
# gpu crates
|
||||
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
|
||||
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
|
||||
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
|
||||
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
|
||||
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
|
||||
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
@@ -1,2 +0,0 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["rollup_relayer"]
|
||||
@@ -0,0 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.26"
|
||||
var tag = "v4.5.38"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
@@ -86,6 +86,10 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
var tmpBatchTask *orm.Batch
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
|
||||
@@ -95,6 +99,14 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBatchTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBatchTask == nil {
|
||||
|
||||
@@ -84,6 +84,10 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
var tmpBundleTask *orm.Bundle
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
|
||||
@@ -93,6 +97,14 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBundleTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBundleTask == nil {
|
||||
@@ -234,9 +246,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
|
||||
}
|
||||
|
||||
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
var prevStateRoot common.Hash
|
||||
// this would be common in test cases: the first batch has empty parent
|
||||
if batches[0].Index > 1 {
|
||||
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
|
||||
}
|
||||
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
@@ -255,7 +272,7 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
|
||||
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
|
||||
PrevStateRoot: prevStateRoot,
|
||||
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
|
||||
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
|
||||
NumBatches: uint32(len(batches)),
|
||||
|
||||
@@ -80,7 +80,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
@@ -91,6 +96,14 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpChunkTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpChunkTask == nil {
|
||||
@@ -221,7 +234,7 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
if dbErr != nil || len(blockHashes) == 0 {
|
||||
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
|
||||
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
|
||||
}
|
||||
|
||||
var taskDetailBytes []byte
|
||||
|
||||
@@ -5,10 +5,12 @@ package verifier
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
@@ -117,6 +119,16 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
|
||||
return libzkp.VerifyBundleProof(string(buf), forkName), nil
|
||||
}
|
||||
|
||||
/*
|
||||
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
|
||||
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
|
||||
*/
|
||||
const blocked_vks = `
|
||||
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
|
||||
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
|
||||
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
|
||||
`
|
||||
|
||||
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
|
||||
|
||||
vkFileName := cfg.Vkfile
|
||||
@@ -138,6 +150,16 @@ func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
|
||||
if err := json.Unmarshal(byt, &dump); err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(blocked_vks, dump.Chunk) {
|
||||
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
|
||||
}
|
||||
if strings.Contains(blocked_vks, dump.Batch) {
|
||||
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
|
||||
}
|
||||
if strings.Contains(blocked_vks, dump.Bundle) {
|
||||
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
|
||||
}
|
||||
|
||||
v.OpenVMVkMap[dump.Chunk] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Batch] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Bundle] = struct{}{}
|
||||
|
||||
45
crates/gpu_override/.cargo/config.toml
Normal file
45
crates/gpu_override/.cargo/config.toml
Normal file
@@ -0,0 +1,45 @@
|
||||
|
||||
[patch."https://github.com/openvm-org/openvm.git"]
|
||||
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
|
||||
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.2.1" }
|
||||
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
11002
crates/gpu_override/Cargo.lock
generated
Normal file
11002
crates/gpu_override/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
21
crates/gpu_override/Makefile
Normal file
21
crates/gpu_override/Makefile
Normal file
@@ -0,0 +1,21 @@
|
||||
.PHONY: build update clean
|
||||
|
||||
ZKVM_COMMIT ?= freebuild
|
||||
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
|
||||
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
|
||||
|
||||
GIT_REV ?= $(shell git rev-parse --short HEAD)
|
||||
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
|
||||
$(info ZK_GPU_VERSION is ${ZK_VERSION})
|
||||
|
||||
clean:
|
||||
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
|
||||
# build gpu prover, never touch lock file
|
||||
build:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
|
||||
# update Cargo.lock while override config has been updated
|
||||
#update:
|
||||
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_file=~/.cargo/config.toml
|
||||
config_file=.cargo/config.toml
|
||||
plonky3_gpu_path=$(grep 'path.*plonky3-gpu' "$config_file" | cut -d'"' -f2 | head -n 1)
|
||||
plonky3_gpu_path=$(dirname "$plonky3_gpu_path")
|
||||
|
||||
@@ -108,38 +108,42 @@ impl ChunkInterpreter for RpcClient<'_> {
|
||||
.get_block_by_hash(block_hash)
|
||||
.full()
|
||||
.await?
|
||||
.ok_or_else(|| eyre::eyre!("Block not found"))?;
|
||||
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
|
||||
|
||||
let number = block.header.number;
|
||||
let parent_hash = block.header.parent_hash;
|
||||
if number == 0 {
|
||||
eyre::bail!("no number in header or use block 0");
|
||||
}
|
||||
|
||||
let prev_state_root = if let Some(witness) = prev_witness {
|
||||
if witness.header.number != number - 1 {
|
||||
eyre::bail!(
|
||||
"the ref witness is not the previous block, expected {} get {}",
|
||||
number - 1,
|
||||
witness.header.number,
|
||||
);
|
||||
}
|
||||
witness.header.state_root
|
||||
} else {
|
||||
provider
|
||||
.scroll_disk_root((number - 1).into())
|
||||
.await?
|
||||
.disk_root
|
||||
};
|
||||
|
||||
let witness = WitnessBuilder::new()
|
||||
let mut witness_builder = WitnessBuilder::new()
|
||||
.block(block)
|
||||
.chain_id(chain_id)
|
||||
.execution_witness(provider.debug_execution_witness(number.into()).await?)
|
||||
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
|
||||
.prev_state_root(prev_state_root)
|
||||
.build()?;
|
||||
.execution_witness(provider.debug_execution_witness(number.into()).await?);
|
||||
|
||||
Ok(witness)
|
||||
let prev_state_root = match prev_witness {
|
||||
Some(witness) => {
|
||||
if witness.header.number != number - 1 {
|
||||
eyre::bail!(
|
||||
"the ref witness is not the previous block, expected {} get {}",
|
||||
number - 1,
|
||||
witness.header.number,
|
||||
);
|
||||
}
|
||||
witness.header.state_root
|
||||
}
|
||||
None => {
|
||||
let parent_block = provider
|
||||
.get_block_by_hash(parent_hash)
|
||||
.await?
|
||||
.expect("parent block should exist");
|
||||
|
||||
parent_block.header.state_root
|
||||
}
|
||||
};
|
||||
witness_builder = witness_builder.prev_state_root(prev_state_root);
|
||||
|
||||
Ok(witness_builder.build()?)
|
||||
}
|
||||
|
||||
tracing::debug!("fetch witness for {block_hash}");
|
||||
|
||||
1
crates/libzkp/.gitignore
vendored
Normal file
1
crates/libzkp/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
testdata/*.json
|
||||
@@ -9,7 +9,7 @@ scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-verifier-euclid.workspace = true
|
||||
|
||||
alloy-primitives.workspace = true #depress the effect of "native-keccak"
|
||||
sbv-primitives.workspace = true
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
|
||||
base64.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive.workspace = true
|
||||
|
||||
@@ -30,7 +30,7 @@ pub fn checkout_chunk_task(
|
||||
pub fn gen_universal_task(
|
||||
task_type: i32,
|
||||
task_json: &str,
|
||||
fork_name: &str,
|
||||
fork_name_str: &str,
|
||||
expected_vk: &[u8],
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
) -> eyre::Result<(B256, String, String)> {
|
||||
@@ -48,19 +48,40 @@ pub fn gen_universal_task(
|
||||
|
||||
let (pi_hash, metadata, mut u_task) = match task_type {
|
||||
x if x == TaskType::Chunk as i32 => {
|
||||
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
|
||||
let (pi_hash, metadata, u_task) =
|
||||
gen_universal_chunk_task(task, fork_name.into(), interpreter)?;
|
||||
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
|
||||
// normailze fork name field in task
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
// always respect the fork_name_str (which has been normalized) being passed
|
||||
// if the fork_name wrapped in task is not match, consider it a malformed task
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
|
||||
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
|
||||
})
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Batch as i32 => {
|
||||
let task = serde_json::from_str::<BatchProvingTask>(task_json)?;
|
||||
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name.into())?;
|
||||
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_batch_task(task, fork_name_str.into()))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Batch(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Bundle as i32 => {
|
||||
let task = serde_json::from_str::<BundleProvingTask>(task_json)?;
|
||||
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name.into())?;
|
||||
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_bundle_task(task, fork_name_str.into()))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
|
||||
}
|
||||
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
|
||||
@@ -111,24 +132,6 @@ pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyr
|
||||
let verifier = verifier::get_verifier(fork_name)?;
|
||||
|
||||
let ret = verifier.lock().unwrap().verify(task_type, &proof)?;
|
||||
|
||||
if let Ok(debug_value) = std::env::var("ZKVM_DEBUG_PROOF") {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
if !ret && debug_value.to_lowercase() == "true" {
|
||||
// Dump req.input to a temporary file
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let filename = format!("/tmp/proof_{}.json", timestamp);
|
||||
if let Err(e) = std::fs::write(&filename, &proof) {
|
||||
eprintln!("Failed to write proof to file {}: {}", filename, e);
|
||||
} else {
|
||||
println!("Dumped failed proof to {}", filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,10 @@ pub use chunk::{ChunkProvingTask, ChunkTask};
|
||||
pub use chunk_interpreter::ChunkInterpreter;
|
||||
pub use scroll_zkvm_types::task::ProvingTask;
|
||||
|
||||
use crate::proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
|
||||
use crate::{
|
||||
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
|
||||
utils::panic_catch,
|
||||
};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
|
||||
|
||||
@@ -20,25 +23,14 @@ fn check_aggregation_proofs<Metadata>(
|
||||
where
|
||||
Metadata: proofs::ProofMetadata,
|
||||
{
|
||||
use std::panic::{self, AssertUnwindSafe};
|
||||
|
||||
panic::catch_unwind(AssertUnwindSafe(|| {
|
||||
panic_catch(|| {
|
||||
for w in proofs.windows(2) {
|
||||
w[1].metadata
|
||||
.pi_hash_info()
|
||||
.validate(w[0].metadata.pi_hash_info(), fork_name);
|
||||
}
|
||||
}))
|
||||
.map_err(|e| {
|
||||
let error_msg = if let Some(string) = e.downcast_ref::<String>() {
|
||||
string.clone()
|
||||
} else if let Some(str) = e.downcast_ref::<&str>() {
|
||||
str.to_string()
|
||||
} else {
|
||||
"Unknown validation error occurred".to_string()
|
||||
};
|
||||
eyre::eyre!("Chunk data validation failed: {}", error_msg)
|
||||
})?;
|
||||
})
|
||||
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -4,8 +4,9 @@ use eyre::Result;
|
||||
use sbv_primitives::{B256, U256};
|
||||
use scroll_zkvm_types::{
|
||||
batch::{
|
||||
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, Envelope, EnvelopeV6,
|
||||
EnvelopeV7, PointEvalWitness, ReferenceHeader, ToArchievedWitness, N_BLOB_BYTES,
|
||||
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
|
||||
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
|
||||
ToArchievedWitness, N_BLOB_BYTES,
|
||||
},
|
||||
public_inputs::ForkName,
|
||||
task::ProvingTask,
|
||||
@@ -23,37 +24,35 @@ use utils::{base64, point_eval};
|
||||
#[serde(untagged)]
|
||||
pub enum BatchHeaderV {
|
||||
V6(BatchHeaderV6),
|
||||
V7(BatchHeaderV7),
|
||||
}
|
||||
|
||||
impl From<BatchHeaderV> for ReferenceHeader {
|
||||
fn from(value: BatchHeaderV) -> Self {
|
||||
match value {
|
||||
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
|
||||
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
|
||||
}
|
||||
}
|
||||
V7_8(BatchHeaderV7),
|
||||
}
|
||||
|
||||
impl BatchHeaderV {
|
||||
pub fn batch_hash(&self) -> B256 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7_8(h) => h.batch_hash(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h,
|
||||
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
|
||||
_ => panic!("try to pick other header type"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
|
||||
match self {
|
||||
BatchHeaderV::V7(h) => h,
|
||||
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
|
||||
BatchHeaderV::V7_8(h) => h,
|
||||
_ => panic!("try to pick other header type"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
|
||||
match self {
|
||||
BatchHeaderV::V7_8(h) => h,
|
||||
_ => panic!("try to pick other header type"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -120,20 +119,28 @@ impl BatchProvingTask {
|
||||
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
BatchHeaderV::V7(_) => {
|
||||
match fork_name {
|
||||
ForkName::EuclidV2 => (),
|
||||
_ => unreachable!("hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
|
||||
[ForkName::EuclidV2],
|
||||
),
|
||||
}
|
||||
BatchHeaderV::V7_8(_) => {
|
||||
let padded_blob_bytes = {
|
||||
let mut padded_blob_bytes = self.blob_bytes.to_vec();
|
||||
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
|
||||
padded_blob_bytes
|
||||
};
|
||||
EnvelopeV7::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
|
||||
match fork_name {
|
||||
ForkName::EuclidV2 => {
|
||||
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
ForkName::Feynman => {
|
||||
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
f => unreachable!(
|
||||
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
|
||||
f,
|
||||
[ForkName::EuclidV2, ForkName::Feynman],
|
||||
),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -159,7 +166,11 @@ impl BatchProvingTask {
|
||||
kzg_proof: kzg_proof.into_inner(),
|
||||
};
|
||||
|
||||
let reference_header = self.batch_header.clone().into();
|
||||
let reference_header = match fork_name {
|
||||
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
|
||||
ForkName::EuclidV2 => ReferenceHeader::V7(*self.batch_header.must_v7_header()),
|
||||
ForkName::Feynman => ReferenceHeader::V8(*self.batch_header.must_v8_header()),
|
||||
};
|
||||
|
||||
BatchWitness {
|
||||
fork_name,
|
||||
|
||||
69
crates/libzkp/tests/tasks.rs
Normal file
69
crates/libzkp/tests/tasks.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use libzkp::{gen_universal_task, tasks::chunk_interpreter::ChunkInterpreter, TaskType};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use std::{fs, path::Path};
|
||||
|
||||
// Global constant for testdata directory
|
||||
const TESTDATA_DIR: &str = "./testdata";
|
||||
|
||||
// Mock interpreter for testing chunk tasks
|
||||
#[derive(Clone)]
|
||||
struct MockChunkInterpreter;
|
||||
|
||||
impl ChunkInterpreter for MockChunkInterpreter {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_gen_universal_task(task_file: &str, task_type: TaskType) -> eyre::Result<ProvingTask> {
|
||||
// Load chunk task JSON from testdata file
|
||||
let testdata_path = Path::new(TESTDATA_DIR).join(task_file);
|
||||
let task_json = fs::read_to_string(&testdata_path)?;
|
||||
|
||||
// Parse task_json as raw JSON object and extract fork_name field
|
||||
let raw_json: serde_json::Value = serde_json::from_str(&task_json)?;
|
||||
let fork_name = raw_json["fork_name"]
|
||||
.as_str()
|
||||
.ok_or_else(|| eyre::eyre!("fork_name field not found or not a string"))?
|
||||
.to_lowercase();
|
||||
|
||||
let mocked_vk = vec![0u8; 32]; // Mock verification key
|
||||
let interpreter = Some(MockChunkInterpreter);
|
||||
|
||||
let (pi_hash, metadata, task) = gen_universal_task(
|
||||
task_type as i32,
|
||||
&task_json,
|
||||
&fork_name,
|
||||
&mocked_vk,
|
||||
interpreter,
|
||||
)?;
|
||||
|
||||
assert!(!pi_hash.is_zero(), "PI hash should not be zero");
|
||||
assert!(!metadata.is_empty(), "Metadata should not be empty");
|
||||
assert!(!task.is_empty(), "Task should not be empty");
|
||||
|
||||
// Dump the task content to testdata directory
|
||||
let dump_path = Path::new(TESTDATA_DIR).join("dump_univ_task.json");
|
||||
std::fs::write(&dump_path, &task).ok(); // Dump task content
|
||||
|
||||
Ok(serde_json::from_str(&task)?)
|
||||
}
|
||||
|
||||
#[ignore = "need testing stuff"]
|
||||
#[test]
|
||||
fn test_gen_universal_task_chunk() {
|
||||
let _ = test_gen_universal_task("chunk_proving_task.json", TaskType::Chunk).unwrap();
|
||||
}
|
||||
|
||||
#[ignore = "need testing stuff"]
|
||||
#[test]
|
||||
fn test_gen_universal_task_batch() {
|
||||
let _ = test_gen_universal_task("batch_proving_task.json", TaskType::Batch).unwrap();
|
||||
}
|
||||
|
||||
#[ignore = "need testing stuff"]
|
||||
#[test]
|
||||
fn test_gen_universal_task_bundle() {
|
||||
let _ = test_gen_universal_task("bundle_proving_task.json", TaskType::Bundle).unwrap();
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,16 @@ use std::sync::OnceLock;
|
||||
|
||||
static LOG_SETTINGS: OnceLock<Result<(), String>> = OnceLock::new();
|
||||
|
||||
fn enable_dump() -> bool {
|
||||
static ZKVM_DEBUG_DUMP: OnceLock<bool> = OnceLock::new();
|
||||
*ZKVM_DEBUG_DUMP.get_or_init(|| {
|
||||
std::env::var("ZKVM_DEBUG")
|
||||
.or_else(|_| std::env::var("ZKVM_DEBUG_PROOF"))
|
||||
.map(|s| s.to_lowercase() == "true")
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_tracing() {
|
||||
@@ -52,6 +62,7 @@ pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof_str = proof;
|
||||
let proof = c_char_to_vec(proof);
|
||||
|
||||
match libzkp::verify_proof(proof, fork_name_str, task_type) {
|
||||
@@ -59,7 +70,24 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
|
||||
tracing::error!("{:?} verify failed, error: {:#}", task_type, e);
|
||||
false as c_char
|
||||
}
|
||||
Ok(result) => result as c_char,
|
||||
Ok(result) => {
|
||||
if !result && enable_dump() {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
// Dump req.input to a temporary file
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let filename = format!("/tmp/proof_{}.json", timestamp);
|
||||
let cstr = unsafe { std::ffi::CStr::from_ptr(proof_str) };
|
||||
if let Err(e) = std::fs::write(&filename, cstr.to_bytes()) {
|
||||
eprintln!("Failed to write proof to file {}: {}", filename, e);
|
||||
} else {
|
||||
println!("Dumped failed proof to {}", filename);
|
||||
}
|
||||
}
|
||||
result as c_char
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,6 +195,22 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
expected_pi_hash,
|
||||
}
|
||||
} else {
|
||||
if enable_dump() {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
// Dump req.input to a temporary file
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let c_str = unsafe { std::ffi::CStr::from_ptr(fork_name) };
|
||||
let filename = format!("/tmp/task_{}_{}.json", c_str.to_str().unwrap(), timestamp);
|
||||
if let Err(e) = std::fs::write(&filename, task_json.as_bytes()) {
|
||||
eprintln!("Failed to write task to file {}: {}", filename, e);
|
||||
} else {
|
||||
println!("Dumped failed task to {}", filename);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::error!("gen_universal_task failed, error: {:#}", ret.unwrap_err());
|
||||
failed_handling_result()
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.2.0" }
|
||||
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
@@ -5,9 +5,10 @@ mod zk_circuits_handler;
|
||||
use clap::{ArgAction, Parser, Subcommand};
|
||||
use prover::{LocalProver, LocalProverConfig};
|
||||
use scroll_proving_sdk::{
|
||||
prover::ProverBuilder,
|
||||
prover::{types::ProofType, ProverBuilder},
|
||||
utils::{get_version, init_tracing},
|
||||
};
|
||||
use std::{fs::File, io::BufReader, path::Path};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(disable_version_flag = true)]
|
||||
@@ -38,6 +39,17 @@ enum Commands {
|
||||
/// path to save the verifier's asset
|
||||
asset_path: String,
|
||||
},
|
||||
Handle {
|
||||
/// path to save the verifier's asset
|
||||
task_path: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct HandleSet {
|
||||
chunks: Vec<String>,
|
||||
batches: Vec<String>,
|
||||
bundles: Vec<String>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -62,6 +74,40 @@ async fn main() -> eyre::Result<()> {
|
||||
println!("dump assets for {fork_name} into {asset_path}");
|
||||
local_prover.dump_verifier_assets(&fork_name, asset_path.as_ref())?;
|
||||
}
|
||||
Some(Commands::Handle { task_path }) => {
|
||||
let file = File::open(Path::new(&task_path))?;
|
||||
let reader = BufReader::new(file);
|
||||
let handle_set: HandleSet = serde_json::from_reader(reader)?;
|
||||
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
|
||||
|
||||
let prover = std::sync::Arc::new(prover);
|
||||
println!("Handling task set 1: chunks ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.chunks, ProofType::Chunk)
|
||||
.await
|
||||
);
|
||||
println!("Done! Handling task set 2: batches ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.batches, ProofType::Batch)
|
||||
.await
|
||||
);
|
||||
println!("Done! Handling task set 3: bundles ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.bundles, ProofType::Bundle)
|
||||
.await
|
||||
);
|
||||
println!("All done!");
|
||||
}
|
||||
None => {
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
.build()
|
||||
|
||||
@@ -203,6 +203,10 @@ impl LocalProver {
|
||||
.get(hard_fork_name)
|
||||
.ok_or_else(|| eyre::eyre!("no corresponding config for fork {hard_fork_name}"))?;
|
||||
|
||||
if !config.vks.is_empty() {
|
||||
eyre::bail!("clean vks cache first or we will have wrong dumped vk");
|
||||
}
|
||||
|
||||
let workspace_path = &config.workspace_path;
|
||||
let universal_prover = EuclidV2Handler::new(config);
|
||||
let _ = universal_prover
|
||||
|
||||
13
go.work.sum
13
go.work.sum
@@ -1083,9 +1083,7 @@ github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
|
||||
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
@@ -1122,7 +1120,6 @@ github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c
|
||||
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
|
||||
github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE=
|
||||
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E=
|
||||
@@ -1228,7 +1225,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
@@ -1240,7 +1236,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
@@ -1418,6 +1413,8 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9 h1:u371VK8eOU2Z/0SVf5KDI3eJc8msHSpJbav4do/8n38=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250729113104-bd8f141bb3e9/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
@@ -1454,7 +1451,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k=
|
||||
github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
@@ -1481,7 +1477,6 @@ github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/a
|
||||
github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
|
||||
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
|
||||
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
|
||||
@@ -1716,7 +1711,6 @@ golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1741,11 +1735,9 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1811,7 +1803,6 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
22
permissionless-batches/Makefile
Normal file
22
permissionless-batches/Makefile
Normal file
@@ -0,0 +1,22 @@
|
||||
.PHONY: batch_production_submission launch_prover psql check_proving_status
|
||||
|
||||
export SCROLL_ZKVM_VERSION=0.4.2
|
||||
PG_URL=postgres://postgres@localhost:5432/scroll
|
||||
|
||||
batch_production_submission:
|
||||
docker compose --profile batch-production-submission up
|
||||
|
||||
launch_prover:
|
||||
docker compose up -d
|
||||
|
||||
psql:
|
||||
psql 'postgres://postgres@localhost:5432/scroll'
|
||||
|
||||
check_proving_status:
|
||||
@echo "Checking proving status..."
|
||||
@result=$$(psql "${PG_URL}" -t -c "SELECT proving_status = 4 AS is_status_success FROM batch ORDER BY index LIMIT 1;" | tr -d '[:space:]'); \
|
||||
if [ "$$result" = "t" ]; then \
|
||||
echo "✅ Prove succeeded! You're ready to submit permissionless batch and proof!"; \
|
||||
else \
|
||||
echo "Proof is not ready..."; \
|
||||
fi
|
||||
172
permissionless-batches/README.md
Normal file
172
permissionless-batches/README.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# Permissionless Batches
|
||||
Permissionless batches aka enforced batches is a feature that provides guarantee to users that they can exit Scroll even if the operator is down or censoring.
|
||||
It allows anyone to take over and submit a batch (permissionless batch submission) together with a proof after a certain time period has passed without a batch being finalized on L1.
|
||||
|
||||
Once permissionless batch mode is activated, the operator can no longer submit batches in a permissioned way. Only the security council can deactivate permissionless batch mode and reinstate the operator as the only batch submitter.
|
||||
There are two types of situations to consider:
|
||||
- `Permissionless batch mode is activated:` This means that finalization halted for some time. Now anyone can submit batches utilizing the [batch production toolkit](#batch-production-toolkit).
|
||||
- `Permissionless batch mode is deactivated:` This means that the security council has decided to reinstate the operator as the only batch submitter. The operator needs to [recover](#operator-recovery) the sequencer and relayer to resume batch submission and the valid L2 chain.
|
||||
|
||||
|
||||
## Batch production toolkit
|
||||
The batch production toolkit is a set of tools that allow anyone to submit a batch in permissionless mode. It consists of three main components:
|
||||
1. l2geth state recovery from L1
|
||||
2. l2geth block production
|
||||
3. production, proving and submission of batch with `docker-compose.yml`
|
||||
|
||||
### Prerequisites
|
||||
- Unix-like OS, 32GB RAM
|
||||
- Docker
|
||||
- [l2geth](https://github.com/scroll-tech/go-ethereum/) or [Docker image](https://hub.docker.com/r/scrolltech/l2geth) of corresponding [version](https://docs.scroll.io/en/technology/overview/scroll-upgrades/).
|
||||
- access to an Ethereum L1 RPC node (beacon node and execution client)
|
||||
- ability to run a prover
|
||||
- L1 account with funds to pay for the batch submission
|
||||
|
||||
### 1. l2geth state recovery from L1
|
||||
Once permissionless mode is activated there's no blocks being produced and propagated on L2. The first step is to recover the latest state of the L2 chain from L1. This is done by running l2geth in recovery mode.
|
||||
|
||||
Running l2geth in recovery mode requires following configuration:
|
||||
- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode
|
||||
- `--da.blob.beaconnode` - L1 RPC beacon node
|
||||
- `--l1.endpoint` - L1 RPC execution client
|
||||
- `--da.sync=true` - enables syncing with L1
|
||||
- `--da.recovery` - enables recovery mode
|
||||
- `--da.recovery.initiall1block` - initial L1 block (commit tx of initial batch)
|
||||
- `--da.recovery.initialbatch` - batch where to start recovery from. Can be found on [Scrollscan Explorer](https://scrollscan.com/batches).
|
||||
- `--da.recovery.l2endblock` - until which L2 block recovery should run (optional)
|
||||
|
||||
```bash
|
||||
./build/bin/geth --scroll<-sepolia> \
|
||||
--datadir "tmp/datadir" \
|
||||
--gcmode archive \
|
||||
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \
|
||||
--da.blob.beaconnode "<L1 RPC beacon node>" \
|
||||
--l1.endpoint "<L1 RPC execution client>" \
|
||||
--da.sync=true --da.recovery --da.recovery.initiall1block "<initial L1 block (commit tx of initial batch)>" --da.recovery.initialbatch "<batch where to start recovery from>" --da.recovery.l2endblock "<until which L2 block recovery should run (optional)>" \
|
||||
--verbosity 3
|
||||
```
|
||||
|
||||
### 2. l2geth block production
|
||||
After the state is recovered, the next step is to produce blocks on L2. This is done by running l2geth in block production mode.
|
||||
As a prerequisite, the state recovery must be completed and the latest state of the L2 chain must be available.
|
||||
|
||||
You also need to generate a keystore e.g. with [Clef](https://geth.ethereum.org/docs/fundamentals/account-management) to be able to sign blocks.
|
||||
This key is not used for any funds, but required for block production to work. Once you generated blocks you can safely discard it.
|
||||
|
||||
Running l2geth in block production mode requires following configuration:
|
||||
- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode
|
||||
- `--da.blob.beaconnode` - L1 RPC beacon node
|
||||
- `--l1.endpoint` - L1 RPC execution client
|
||||
- `--da.sync=true` - enables syncing with L1
|
||||
- `--da.recovery` - enables recovery mode
|
||||
- `--da.recovery.produceblocks` - enables block production
|
||||
- `--miner.etherbase '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' --mine` - enables mining. the address is not used, but required for mining to work
|
||||
- `---miner.gaslimit 1 --miner.gasprice 1 --miner.maxaccountsnum 100 --rpc.gascap 0 --gpo.ignoreprice 1` - gas limits for block production
|
||||
|
||||
```bash
|
||||
./build/bin/geth --scroll<-sepolia> \
|
||||
--datadir "tmp/datadir" \
|
||||
--gcmode archive \
|
||||
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \
|
||||
--da.blob.beaconnode "<L1 RPC beacon node>" \
|
||||
--l1.endpoint "<L1 RPC execution client>" \
|
||||
--da.sync=true --da.recovery --da.recovery.produceblocks \
|
||||
--miner.gaslimit 1 --miner.gasprice 1 --miner.maxaccountsnum 100 --rpc.gascap 0 --gpo.ignoreprice 1 \
|
||||
--miner.etherbase '0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee' --mine \
|
||||
--verbosity 3
|
||||
```
|
||||
|
||||
### 3. production, proving and submission of batch with `docker-compose.yml`
|
||||
After the blocks are produced, the next step is to produce a batch, prove it and submit it to L1. This is done by running the `docker-compose.yml` in the `permissionless-batches` folder.
|
||||
|
||||
|
||||
#### Producing a batch
|
||||
To produce a batch you need to run the `batch-production-submission` profile in `docker-compose.yml`.
|
||||
|
||||
1. Fill `conf/genesis.json` with the latest genesis state from the L2 chain. The genesis for the current fork can be found [here](https://docs.scroll.io/en/technology/overview/scroll-upgrades/).
|
||||
2. Make sure that `l2geth` with your locally produced blocks is running and reachable from the Docker network (e.g. `http://host.docker.internal:8545`)
|
||||
3. Fill in required fields in `conf/relayer/config.json`
|
||||
|
||||
|
||||
Run with `make batch_production_submission`.
|
||||
This will produce chunks, a batch and bundle which will be proven in the next step.
|
||||
`Success! You're ready to generate proofs!` indicates that everything is working correctly and the batch is ready to be proven.
|
||||
|
||||
#### Proving a batch
|
||||
To prove the chunk, batch and bundle you just generated you need to run the `prover` profile in `docker-compose.yml`.
|
||||
|
||||
Local Proving:
|
||||
|
||||
1. Hardware spec for local prover: CPU: 36+ core, 128G memory GPU: 24G memory (e.g. Rtx 3090/3090Ti/4090/A10/L4)
|
||||
2. Make sure `verifier` and `high_version_circuit` in `conf/coordinator/config.json` are correct for the [latest fork](https://docs.scroll.io/en/technology/overview/scroll-upgrades/)
|
||||
3. Set the `SCROLL_ZKVM_VERSION` environment variable on `Makefile` to the correct [version](https://docs.scroll.io/en/technology/overview/scroll-upgrades/).
|
||||
4. Fill in the required fields in `conf/proving-service/config.json`
|
||||
|
||||
Run with `make launch_prover`.
|
||||
|
||||
|
||||
#### Batch submission
|
||||
To submit the batch you need to run the `batch-production-submission` profile in `docker-compose.yml`.
|
||||
|
||||
1. Fill in required fields in `conf/relayer/config.json` for the sender config.
|
||||
|
||||
Run with `make batch_production_submission`.
|
||||
This will submit the batch to L1 and finalize it. The transaction will be retried in case of failure.
|
||||
|
||||
**Troubleshooting**
|
||||
- in case the submission fails it will print the calldata for the transaction in an error message. You can use this with `cast call --trace --rpc-url "$SCROLL_L1_DEPLOYMENT_RPC" "$L1_SCROLL_CHAIN_PROXY_ADDR" <calldata>` to see what went wrong.
|
||||
- `0x4df567b9: ErrorNotInEnforcedBatchMode`: permissionless batch mode is not activated, you can't submit a batch
|
||||
- `0xa5d305cc: ErrorBatchIsEmpty`: no blob was provided. This is usually returned if you do the `cast call`, permissionless mode is activated but you didn't provide a blob in the transaction.
|
||||
|
||||
## Operator recovery
|
||||
Operator recovery needs to be run by the rollup operator to resume normal rollup operation after permissionless batch mode is deactivated. It consists of two main components:
|
||||
1. l2geth recovery
|
||||
2. Relayer recovery
|
||||
|
||||
These steps are required to resume permissioned batch submission and the valid L2 chain. They will restore the entire history of the batches submitted during permissionless mode.
|
||||
|
||||
### Prerequisites
|
||||
- l2geth with the latest state of the L2 chain (before permissionless mode was activated)
|
||||
- signer key for the sequencer according to Clique consensus
|
||||
- relayer and coordinator are set up, running and up-to-date with the latest state of the L2 chain (before permissionless mode was activated)
|
||||
|
||||
### l2geth recovery
|
||||
Running l2geth in recovery mode requires following configuration:
|
||||
- `--scroll` or `--scroll-sepolia` - enables Scroll Mainnet or Sepolia mode
|
||||
- `--da.blob.beaconnode` - L1 RPC beacon node
|
||||
- `--l1.endpoint` - L1 RPC execution client
|
||||
- `--da.sync=true` - enables syncing with L1
|
||||
- `--da.recovery` - enables recovery mode
|
||||
- `--da.recovery.signblocks` - enables signing blocks with the sequencer and configured key
|
||||
- `--da.recovery.initiall1block` - initial L1 block (commit tx of initial batch)
|
||||
- `--da.recovery.initialbatch` - batch where to start recovery from. Can be found on [Scrollscan Explorer](https://scrollscan.com/batches).
|
||||
- `--da.recovery.l2endblock` - until which L2 block recovery should run (optional)
|
||||
|
||||
```bash
|
||||
./build/bin/geth --scroll<-sepolia> \
|
||||
--datadir "tmp/datadir" \
|
||||
--gcmode archive \
|
||||
--http --http.addr "0.0.0.0" --http.port 8545 --http.api "eth,net,web3,debug,scroll" --http.vhosts "*" \
|
||||
--da.blob.beaconnode "<L1 RPC beacon node>" \
|
||||
--l1.endpoint "<L1 RPC execution client>" \
|
||||
--da.sync=true --da.recovery --da.recovery.signblocks --da.recovery.initiall1block "<initial L1 block (commit tx of initial batch)>" --da.recovery.initialbatch "<batch where to start recovery from>" --da.recovery.l2endblock "<until which L2 block recovery should run (optional)>" \
|
||||
--verbosity 3
|
||||
```
|
||||
|
||||
After the recovery is finished, start the sequencer in normal operation and continue issuing L2 blocks as normal. This will resume the L2 chain, allow the relayer (after running recovery) to create new batches and allow other L2 follower nodes to sync up the valid and signed L2 chain.
|
||||
|
||||
### Relayer recovery
|
||||
Start the relayer with the following additional top-level configuration:
|
||||
```
|
||||
"recovery_config": {
|
||||
"enable": true
|
||||
}
|
||||
```
|
||||
|
||||
This will make the relayer recover all the chunks, batches and bundles that were submitted during permissionless mode. These batches are marked automatically as proven and finalized.
|
||||
Once this process is finished, start the relayer normally without the recovery config to resume normal operation.
|
||||
```
|
||||
"recovery_config": {
|
||||
"enable": false
|
||||
}
|
||||
```
|
||||
30
permissionless-batches/conf/coordinator/config.json
Normal file
30
permissionless-batches/conf/coordinator/config.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 100,
|
||||
"chunk_collection_time_sec": 36000,
|
||||
"batch_collection_time_sec": 2700,
|
||||
"bundle_collection_time_sec": 2700,
|
||||
"verifier": {
|
||||
"high_version_circuit" : {
|
||||
"fork_name": "euclid",
|
||||
"assets_path": "/verifier/openvm/verifier",
|
||||
"min_prover_version": "v4.5.7"
|
||||
}
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://db/scroll?sslmode=disable&user=postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"chain_id": 333333
|
||||
},
|
||||
"auth": {
|
||||
"secret": "e788b62d39254928a821ac1c76b274a8c835aa1e20ecfb6f50eb10e87847de44",
|
||||
"challenge_expire_duration_sec": 10,
|
||||
"login_expire_duration_sec": 3600
|
||||
}
|
||||
}
|
||||
76
permissionless-batches/conf/coordinator/coordinator_run.sh
Executable file
76
permissionless-batches/conf/coordinator/coordinator_run.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
apt update
|
||||
apt install -y wget libdigest-sha-perl
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
echo "SCROLL_ZKVM_VERSION not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${HTTP_PORT}" ]; then
|
||||
echo "HTTP_PORT not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${METRICS_PORT}" ]; then
|
||||
echo "METRICS_PORT not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $CHAIN_ID in
|
||||
"5343532222") # staging network
|
||||
echo "staging network not supported"
|
||||
exit 1
|
||||
;;
|
||||
"534353") # alpha network
|
||||
echo "alpha network not supported"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
BASE_DOWNLOAD_DIR="/verifier"
|
||||
# Ensure the base directory exists
|
||||
mkdir -p "$BASE_DOWNLOAD_DIR"
|
||||
|
||||
# Set subdirectories
|
||||
OPENVM_DIR="$BASE_DOWNLOAD_DIR/openvm"
|
||||
|
||||
# Create necessary directories
|
||||
mkdir -p "$OPENVM_DIR/verifier"
|
||||
|
||||
# Define URLs for OpenVM files (No checksum verification)
|
||||
OPENVM_URLS=(
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root-verifier-vm-config"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root-verifier-committed-exe"
|
||||
)
|
||||
|
||||
# Download OpenVM files (No checksum verification, but skips if file exists)
|
||||
for url in "${OPENVM_URLS[@]}"; do
|
||||
dest_subdir="$OPENVM_DIR/$(basename $(dirname "$url"))"
|
||||
mkdir -p "$dest_subdir"
|
||||
|
||||
filepath="$dest_subdir/$(basename "$url")"
|
||||
echo "Downloading $filepath..."
|
||||
curl -o "$filepath" -L "$url"
|
||||
done
|
||||
|
||||
mkdir -p "$HOME/.openvm"
|
||||
ln -s "$OPENVM_DIR/params" "$HOME/.openvm/params"
|
||||
|
||||
echo "All files downloaded successfully! 🎉"
|
||||
|
||||
mkdir -p /usr/local/bin
|
||||
wget https://github.com/ethereum/solidity/releases/download/v0.8.19/solc-static-linux -O /usr/local/bin/solc
|
||||
chmod +x /usr/local/bin/solc
|
||||
|
||||
# Start coordinator
|
||||
echo "Starting coordinator api"
|
||||
|
||||
RUST_BACKTRACE=1 exec coordinator_api --config /coordinator/config.json \
|
||||
--genesis /coordinator/genesis.json \
|
||||
--http --http.addr "0.0.0.0" --http.port ${HTTP_PORT} \
|
||||
--metrics --metrics.addr "0.0.0.0" --metrics.port ${METRICS_PORT} \
|
||||
--log.debug
|
||||
1
permissionless-batches/conf/genesis.json
Normal file
1
permissionless-batches/conf/genesis.json
Normal file
@@ -0,0 +1 @@
|
||||
<fill with correct genesis.json>
|
||||
28
permissionless-batches/conf/proving-service/config.json
Normal file
28
permissionless-batches/conf/proving-service/config.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"sdk_config": {
|
||||
"prover_name_prefix": "local_prover",
|
||||
"keys_dir": "/keys",
|
||||
"db_path": "/db",
|
||||
"coordinator": {
|
||||
"base_url": "http://172.17.0.1:8556",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "<L2 RPC with generated blocks reachable from Docker network>"
|
||||
},
|
||||
"prover": {
|
||||
"circuit_type": 2,
|
||||
"supported_proof_types": [1,2,3],
|
||||
"circuit_version": "v0.13.1"
|
||||
},
|
||||
"health_listener_addr": "0.0.0.0:89"
|
||||
},
|
||||
"circuits": {
|
||||
"euclidV2": {
|
||||
"hard_fork_name": "euclidV2",
|
||||
"workspace_path": "/openvm"
|
||||
}
|
||||
}
|
||||
}
|
||||
54
permissionless-batches/conf/proving-service/prover_run.sh
Normal file
54
permissionless-batches/conf/proving-service/prover_run.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
apt update
|
||||
apt install -y wget curl
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
echo "SCROLL_ZKVM_VERSION not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BASE_DOWNLOAD_DIR="/openvm"
|
||||
# Ensure the base directory exists
|
||||
mkdir -p "$BASE_DOWNLOAD_DIR"
|
||||
|
||||
# Define URLs for OpenVM files (No checksum verification)
|
||||
OPENVM_URLS=(
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/app.vmexe"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/openvm.toml"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/app.vmexe"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/openvm.toml"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app.vmexe"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app_euclidv1.vmexe"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/openvm.toml"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/verifier.bin"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/verifier.sol"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_1.hex"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_2.hex"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_1_euclidv1.hex"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/digest_2_euclidv1.hex"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/params/kzg_bn254_22.srs"
|
||||
"https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/params/kzg_bn254_24.srs"
|
||||
)
|
||||
|
||||
# Download OpenVM files (No checksum verification, but skips if file exists)
|
||||
for url in "${OPENVM_URLS[@]}"; do
|
||||
dest_subdir="$BASE_DOWNLOAD_DIR/$(basename $(dirname "$url"))"
|
||||
mkdir -p "$dest_subdir"
|
||||
|
||||
filepath="$dest_subdir/$(basename "$url")"
|
||||
echo "Downloading $filepath..."
|
||||
curl -o "$filepath" -L "$url"
|
||||
done
|
||||
|
||||
mkdir -p "$HOME/.openvm"
|
||||
ln -s "/openvm/params" "$HOME/.openvm/params"
|
||||
|
||||
mkdir -p /usr/local/bin
|
||||
wget https://github.com/ethereum/solidity/releases/download/v0.8.19/solc-static-linux -O /usr/local/bin/solc
|
||||
chmod +x /usr/local/bin/solc
|
||||
|
||||
mkdir -p /openvm/cache
|
||||
|
||||
RUST_MIN_STACK=16777216 RUST_BACKTRACE=1 exec /prover/prover --config /prover/conf/config.json
|
||||
49
permissionless-batches/conf/relayer/config.json
Normal file
49
permissionless-batches/conf/relayer/config.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"l1_config": {
|
||||
"endpoint": "<L1 RPC execution node>"
|
||||
},
|
||||
"l2_config": {
|
||||
"confirmations": "0x0",
|
||||
"endpoint": "<L2 RPC with generated blocks reachable from Docker network>",
|
||||
"relayer_config": {
|
||||
"commit_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
"private_key_signer_config": {
|
||||
"private_key": "<the private key of L1 address to submit permissionless batch, please fund it in advance>"
|
||||
}
|
||||
}
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"propose_interval_milliseconds": 100,
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"propose_interval_milliseconds": 1000,
|
||||
"batch_timeout_sec": 300,
|
||||
"max_chunks_per_batch": 45,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
},
|
||||
"bundle_proposer_config": {
|
||||
"max_batch_num_per_bundle": 20,
|
||||
"bundle_timeout_sec": 36000
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://172.17.0.1:5432/scroll?sslmode=disable&user=postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"recovery_config": {
|
||||
"enable": true,
|
||||
"l1_block_height": "<commit tx of last finalized batch on L1>",
|
||||
"latest_finalized_batch": "<last finalized batch on L1>",
|
||||
"l2_block_height_limit": "<L2 block up to which to produce batch>",
|
||||
"force_latest_finalized_batch": false,
|
||||
"force_l1_message_count": 0,
|
||||
"submit_without_proof": false
|
||||
}
|
||||
}
|
||||
98
permissionless-batches/docker-compose.yml
Normal file
98
permissionless-batches/docker-compose.yml
Normal file
@@ -0,0 +1,98 @@
|
||||
name: permissionless-batches
|
||||
|
||||
services:
|
||||
relayer-batch-production:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: build/dockerfiles/recovery_permissionless_batches.Dockerfile
|
||||
network_mode: host
|
||||
container_name: permissionless-batches-relayer
|
||||
volumes:
|
||||
- ./conf/relayer/config.json:/app/conf/config.json
|
||||
- ./conf/genesis.json:/app/conf/genesis.json
|
||||
command: "--config /app/conf/config.json --min-codec-version 0"
|
||||
profiles:
|
||||
- batch-production-submission
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
db:
|
||||
image: postgres:17.0
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: scroll
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pg_isready -U postgres" ]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
volumes:
|
||||
- db_data:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
coordinator-api:
|
||||
image: scrolltech/coordinator-api:v4.5.19
|
||||
volumes:
|
||||
- ./conf/coordinator/config.json:/coordinator/config.json:ro
|
||||
- ./conf/genesis.json:/coordinator/genesis.json:ro
|
||||
- ./conf/coordinator/coordinator_run.sh:/bin/coordinator_run.sh
|
||||
entrypoint: /bin/coordinator_run.sh
|
||||
profiles:
|
||||
- local-prover
|
||||
- cloud-prover
|
||||
ports: [8556:8555]
|
||||
environment:
|
||||
- SCROLL_ZKVM_VERSION=${SCROLL_ZKVM_VERSION}
|
||||
- SCROLL_PROVER_ASSETS_DIR=/verifier/assets/
|
||||
- HTTP_PORT=8555
|
||||
- METRICS_PORT=8390
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8555/coordinator/v1/challenge"]
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
start_period: 5m
|
||||
|
||||
coordinator-cron:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: build/dockerfiles/coordinator-cron.Dockerfile
|
||||
volumes:
|
||||
- ./conf/coordinator/config.json:/app/conf/config.json
|
||||
command: "--config /app/conf/config.json --verbosity 3"
|
||||
profiles:
|
||||
- local-prover
|
||||
- cloud-prover
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
local-prover:
|
||||
image: scrolltech/cuda-prover:v4.5.12-97de9882-6ad5d8c-261b322
|
||||
network_mode: host
|
||||
platform: linux/amd64
|
||||
runtime: nvidia
|
||||
entrypoint: /bin/prover_run.sh
|
||||
environment:
|
||||
- SCROLL_ZKVM_VERSION=${SCROLL_ZKVM_VERSION}
|
||||
- LD_LIBRARY_PATH=/prover:/usr/local/cuda/lib64
|
||||
- RUST_MIN_STACK=16777216
|
||||
- RUST_BACKTRACE=1
|
||||
- RUST_LOG=info
|
||||
volumes:
|
||||
- ./conf/proving-service/config.json:/prover/conf/config.json:ro
|
||||
- ./conf/proving-service/prover_run.sh:/bin/prover_run.sh
|
||||
- ./conf/proving-service/db:/db
|
||||
- ./conf/proving-service/keys:/keys
|
||||
depends_on:
|
||||
coordinator-api:
|
||||
condition: service_healthy
|
||||
|
||||
volumes:
|
||||
db_data:
|
||||
@@ -1,13 +1,12 @@
|
||||
package bridgeabi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
func TestPackCommitBatches(t *testing.T) {
|
||||
@@ -92,20 +91,3 @@ func TestPackSetL2BaseFee(t *testing.T) {
|
||||
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPrintABISignatures(t *testing.T) {
|
||||
// print all error signatures of ABI
|
||||
abi, err := ScrollChainMetaData.GetAbi()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, method := range abi.Methods {
|
||||
fmt.Println(hexutil.Encode(method.ID[:4]), method.Sig, method.Name)
|
||||
}
|
||||
|
||||
fmt.Println("------------------------------")
|
||||
for _, errors := range abi.Errors {
|
||||
fmt.Println(hexutil.Encode(errors.ID[:4]), errors.Sig, errors.Name)
|
||||
}
|
||||
}
|
||||
|
||||
22
rollup/abi/validium_abi.go
Normal file
22
rollup/abi/validium_abi.go
Normal file
File diff suppressed because one or more lines are too long
@@ -70,7 +70,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
|
||||
go utils.Loop(subCtx, 1*time.Second, blobUploader.UploadBlobToS3)
|
||||
|
||||
// Finish start all blob-uploader functions.
|
||||
log.Info("Start blob-uploader successfully", "version", version.Version)
|
||||
|
||||
144
rollup/cmd/permissionless_batches/app/app.go
Normal file
144
rollup/cmd/permissionless_batches/app/app.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/permissionless_batches"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up rollup-relayer app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "permissionless-batches"
|
||||
app.Usage = "The Scroll Rollup Relayer for permissionless batch production"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
defer cancel()
|
||||
|
||||
// Sanity check config. Make sure the required fields are set.
|
||||
if cfg.RecoveryConfig == nil {
|
||||
return fmt.Errorf("recovery config must be specified")
|
||||
}
|
||||
if cfg.RecoveryConfig.L1BeaconNodeEndpoint == "" {
|
||||
return fmt.Errorf("L1 beacon node endpoint must be specified")
|
||||
}
|
||||
if cfg.RecoveryConfig.L1BlockHeight == 0 {
|
||||
return fmt.Errorf("L1 block height must be specified")
|
||||
}
|
||||
if cfg.RecoveryConfig.LatestFinalizedBatch == 0 {
|
||||
return fmt.Errorf("latest finalized batch must be specified")
|
||||
}
|
||||
|
||||
// init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Crit("failed to close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
genesisPath := ctx.String(utils.Genesis.Name)
|
||||
genesis, err := utils.ReadGenesis(genesisPath)
|
||||
if err != nil {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, false, registry)
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to L2geth at RPC=%s: %w", cfg.L2Config.Endpoint, err)
|
||||
}
|
||||
|
||||
l2Watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
|
||||
|
||||
recovery := permissionless_batches.NewRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2Watcher)
|
||||
|
||||
if recovery.RecoveryNeeded() {
|
||||
if err = recovery.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run recovery: %w", err)
|
||||
}
|
||||
log.Info("Success! You're ready to generate proofs!")
|
||||
} else {
|
||||
log.Info("No recovery needed, submitting batch and proof to L1...")
|
||||
submitter, err := permissionless_batches.NewSubmitter(subCtx, db, cfg.L2Config.RelayerConfig, genesis.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create submitter: %w", err)
|
||||
}
|
||||
if err = submitter.Submit(!cfg.RecoveryConfig.SubmitWithoutProof); err != nil {
|
||||
return fmt.Errorf("failed to submit batch: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Transaction submitted to L1, waiting for confirmation...")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
select {
|
||||
case <-subCtx.Done():
|
||||
case confirmation := <-submitter.Sender().ConfirmChan():
|
||||
if confirmation.IsSuccessful {
|
||||
log.Info("Transaction confirmed on L1, your permissionless batch is part of the ledger!", "tx hash", confirmation.TxHash)
|
||||
}
|
||||
case <-interrupt:
|
||||
log.Info("CTRL-C received, shutting down...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run rollup relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
rollup/cmd/permissionless_batches/main.go
Normal file
7
rollup/cmd/permissionless_batches/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/rollup/cmd/permissionless_batches/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/l1"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
@@ -107,11 +108,37 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
|
||||
|
||||
if cfg.RecoveryConfig != nil && cfg.RecoveryConfig.Enable {
|
||||
log.Info("Starting rollup-relayer in recovery mode", "version", version.Version)
|
||||
|
||||
l1Client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to L1 client: %w", err)
|
||||
}
|
||||
reader, err := l1.NewReader(context.Background(), l1.Config{
|
||||
ScrollChainAddress: genesis.Config.Scroll.L1Config.ScrollChainAddress,
|
||||
L1MessageQueueAddress: genesis.Config.Scroll.L1Config.L1MessageQueueAddress,
|
||||
}, l1Client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create L1 reader: %w", err)
|
||||
}
|
||||
|
||||
fullRecovery, err := relayer.NewFullRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2watcher, l1Client, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create full recovery: %w", err)
|
||||
}
|
||||
if err = fullRecovery.RestoreFullPreviousState(); err != nil {
|
||||
log.Crit("failed to restore full previous state", "error", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
@@ -119,7 +146,8 @@ func action(ctx *cli.Context) error {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
}
|
||||
l2watcher.TryFetchRunningMissingBlocks(number)
|
||||
// errors are logged in the try method as well
|
||||
_ = l2watcher.TryFetchRunningMissingBlocks(number)
|
||||
})
|
||||
|
||||
go utils.Loop(subCtx, time.Duration(cfg.L2Config.ChunkProposerConfig.ProposeIntervalMilliseconds)*time.Millisecond, chunkProposer.TryProposeChunk)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"escalate_blocks": 1,
|
||||
@@ -36,8 +36,8 @@
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"validium_mode": false,
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"escalate_blocks": 1,
|
||||
@@ -123,4 +123,4 @@
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,9 +18,10 @@ import (
|
||||
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
L1Config *L1Config `json:"l1_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
L1Config *L1Config `json:"l1_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
RecoveryConfig *RecoveryConfig `json:"recovery_config"`
|
||||
}
|
||||
|
||||
type ConfigForReplay struct {
|
||||
|
||||
@@ -8,4 +8,7 @@ type L1Config struct {
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The relayer config
|
||||
RelayerConfig *RelayerConfig `json:"relayer_config"`
|
||||
|
||||
// beacon node url
|
||||
BeaconNodeEndpoint string `json:"beacon_node_endpoint"`
|
||||
}
|
||||
|
||||
14
rollup/internal/config/recovery.go
Normal file
14
rollup/internal/config/recovery.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package config
|
||||
|
||||
type RecoveryConfig struct {
|
||||
Enable bool `json:"enable"`
|
||||
|
||||
L1BeaconNodeEndpoint string `json:"l1_beacon_node_endpoint"` // the L1 beacon node endpoint to connect to
|
||||
L1BlockHeight uint64 `json:"l1_block_height"` // the L1 block height to start recovery from
|
||||
LatestFinalizedBatch uint64 `json:"latest_finalized_batch"` // the latest finalized batch number
|
||||
L2BlockHeightLimit uint64 `json:"l2_block_height_limit"` // L2 block up to which to produce batch
|
||||
|
||||
ForceLatestFinalizedBatch bool `json:"force_latest_finalized_batch"` // whether to force usage of the latest finalized batch - mainly used for testing
|
||||
ForceL1MessageCount uint64 `json:"force_l1_message_count"` // force the number of L1 messages, useful for testing
|
||||
SubmitWithoutProof bool `json:"submit_without_proof"` // whether to submit batches without proof, useful for testing
|
||||
}
|
||||
@@ -53,6 +53,8 @@ type ChainMonitor struct {
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
// What we need to pay attention to is that
|
||||
type RelayerConfig struct {
|
||||
// ValidiumMode indicates if the relayer is in validium mode.
|
||||
ValidiumMode bool `json:"validium_mode"`
|
||||
// RollupContractAddress store the rollup contract address.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
// GasPriceOracleContractAddress store the scroll messenger contract address.
|
||||
@@ -73,8 +75,6 @@ type RelayerConfig struct {
|
||||
|
||||
// Indicates if bypass features specific to testing environments are enabled.
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
|
||||
}
|
||||
|
||||
@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
case encoding.CodecV7:
|
||||
case encoding.CodecV7, encoding.CodecV8:
|
||||
encodingBatch = &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
|
||||
|
||||
@@ -25,8 +25,6 @@ type S3Uploader struct {
|
||||
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
|
||||
// load AWS config
|
||||
var opts []func(*awsconfig.LoadOptions) error
|
||||
opts = append(opts, awsconfig.WithRegion(cfg.Region))
|
||||
|
||||
// if AccessKey && SecretKey provided, use it
|
||||
if cfg.AccessKey != "" && cfg.SecretKey != "" {
|
||||
opts = append(opts, awsconfig.WithCredentialsProvider(
|
||||
@@ -38,6 +36,10 @@ func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if cfg.Region != "" {
|
||||
opts = append(opts, awsconfig.WithRegion(cfg.Region))
|
||||
}
|
||||
|
||||
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load default config: %w", err)
|
||||
|
||||
@@ -0,0 +1,458 @@
|
||||
package permissionless_batches
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/l1"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultFakeRestoredChunkIndex is the default index of the last restored fake chunk. It is used to be able to generate new chunks pretending that we have already processed some chunks.
|
||||
defaultFakeRestoredChunkIndex uint64 = 1337
|
||||
// defaultFakeRestoredBundleIndex is the default index of the last restored fake bundle. It is used to be able to generate new bundles pretending that we have already processed some bundles.
|
||||
defaultFakeRestoredBundleIndex uint64 = 1
|
||||
)
|
||||
|
||||
type MinimalRecovery struct {
|
||||
ctx context.Context
|
||||
cfg *config.Config
|
||||
genesis *core.Genesis
|
||||
db *gorm.DB
|
||||
chunkORM *orm.Chunk
|
||||
batchORM *orm.Batch
|
||||
bundleORM *orm.Bundle
|
||||
|
||||
chunkProposer *watcher.ChunkProposer
|
||||
batchProposer *watcher.BatchProposer
|
||||
bundleProposer *watcher.BundleProposer
|
||||
l2Watcher *watcher.L2WatcherClient
|
||||
}
|
||||
|
||||
func NewRecovery(ctx context.Context, cfg *config.Config, genesis *core.Genesis, db *gorm.DB, chunkProposer *watcher.ChunkProposer, batchProposer *watcher.BatchProposer, bundleProposer *watcher.BundleProposer, l2Watcher *watcher.L2WatcherClient) *MinimalRecovery {
|
||||
return &MinimalRecovery{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
genesis: genesis,
|
||||
db: db,
|
||||
chunkORM: orm.NewChunk(db),
|
||||
batchORM: orm.NewBatch(db),
|
||||
bundleORM: orm.NewBundle(db),
|
||||
chunkProposer: chunkProposer,
|
||||
batchProposer: batchProposer,
|
||||
bundleProposer: bundleProposer,
|
||||
l2Watcher: l2Watcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *MinimalRecovery) RecoveryNeeded() bool {
|
||||
chunk, err := r.chunkORM.GetLatestChunk(r.ctx)
|
||||
if err != nil || chunk == nil {
|
||||
return true
|
||||
}
|
||||
if chunk.Index <= defaultFakeRestoredChunkIndex {
|
||||
return true
|
||||
}
|
||||
|
||||
batch, err := r.batchORM.GetLatestBatch(r.ctx)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if batch.Index <= r.cfg.RecoveryConfig.LatestFinalizedBatch {
|
||||
return true
|
||||
}
|
||||
|
||||
bundle, err := r.bundleORM.GetLatestBundle(r.ctx)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if bundle.Index <= defaultFakeRestoredBundleIndex {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *MinimalRecovery) Run() error {
|
||||
// Make sure we start from a clean state.
|
||||
if err := r.resetDB(); err != nil {
|
||||
return fmt.Errorf("failed to reset DB: %w", err)
|
||||
}
|
||||
|
||||
// Restore minimal previous state required to be able to create new chunks, batches and bundles.
|
||||
restoredFinalizedChunk, restoredFinalizedBatch, restoredFinalizedBundle, err := r.restoreMinimalPreviousState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restore minimal previous state: %w", err)
|
||||
}
|
||||
|
||||
// Fetch and insert the missing blocks from the last block in the latestFinalizedBatch to the latest L2 block.
|
||||
fromBlock := restoredFinalizedChunk.EndBlockNumber
|
||||
toBlock, err := r.fetchL2Blocks(fromBlock, r.cfg.RecoveryConfig.L2BlockHeightLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch L2 blocks: %w", err)
|
||||
}
|
||||
|
||||
// Create chunks for L2 blocks.
|
||||
log.Info("Creating chunks for L2 blocks", "from", fromBlock, "to", toBlock)
|
||||
|
||||
var latestChunk *orm.Chunk
|
||||
var count int
|
||||
for {
|
||||
if err = r.chunkProposer.ProposeChunk(); err != nil {
|
||||
return fmt.Errorf("failed to propose chunk: %w", err)
|
||||
}
|
||||
count++
|
||||
|
||||
latestChunk, err = r.chunkORM.GetLatestChunk(r.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest latestFinalizedChunk: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Chunk created", "index", latestChunk.Index, "hash", latestChunk.Hash, "StartBlockNumber", latestChunk.StartBlockNumber, "EndBlockNumber", latestChunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", latestChunk.TotalL1MessagesPoppedBefore)
|
||||
|
||||
// We have created chunks for all available L2 blocks.
|
||||
if latestChunk.EndBlockNumber >= toBlock {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Chunks created", "count", count, "latest Chunk", latestChunk.Index, "hash", latestChunk.Hash, "StartBlockNumber", latestChunk.StartBlockNumber, "EndBlockNumber", latestChunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", latestChunk.TotalL1MessagesPoppedBefore, "PrevL1MessageQueueHash", latestChunk.PrevL1MessageQueueHash, "PostL1MessageQueueHash", latestChunk.PostL1MessageQueueHash)
|
||||
|
||||
// Create batch for the created chunks. We only allow 1 batch it needs to be submitted (and finalized) with a proof in a single step.
|
||||
log.Info("Creating batch for chunks", "from", restoredFinalizedChunk.Index+1, "to", latestChunk.Index)
|
||||
|
||||
r.batchProposer.TryProposeBatch()
|
||||
latestBatch, err := r.batchORM.GetLatestBatch(r.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest latestFinalizedBatch: %w", err)
|
||||
}
|
||||
|
||||
// Sanity check that the batch was created correctly:
|
||||
// 1. should be a new batch
|
||||
// 2. should contain all chunks created
|
||||
if restoredFinalizedBatch.Index+1 != latestBatch.Index {
|
||||
return fmt.Errorf("batch was not created correctly, expected %d but got %d", restoredFinalizedBatch.Index+1, latestBatch.Index)
|
||||
}
|
||||
|
||||
firstChunkInBatch, err := r.chunkORM.GetChunkByIndex(r.ctx, latestBatch.StartChunkIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get first chunk in batch: %w", err)
|
||||
}
|
||||
lastChunkInBatch, err := r.chunkORM.GetChunkByIndex(r.ctx, latestBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get last chunk in batch: %w", err)
|
||||
}
|
||||
|
||||
// Make sure that the batch contains all previously created chunks and thus all blocks. If not the user will need to
|
||||
// produce another batch (running the application again) starting from the end block of the last chunk in the batch + 1.
|
||||
if latestBatch.EndChunkIndex != latestChunk.Index {
|
||||
log.Warn("Produced batch does not contain all chunks and blocks. You'll need to produce another batch starting from end block+1.", "starting block", firstChunkInBatch.StartBlockNumber, "end block", lastChunkInBatch.EndBlockNumber, "latest block", latestChunk.EndBlockNumber)
|
||||
}
|
||||
|
||||
log.Info("Batch created", "index", latestBatch.Index, "hash", latestBatch.Hash, "StartChunkIndex", latestBatch.StartChunkIndex, "EndChunkIndex", latestBatch.EndChunkIndex, "starting block", firstChunkInBatch.StartBlockNumber, "ending block", lastChunkInBatch.EndBlockNumber, "PrevL1MessageQueueHash", latestBatch.PrevL1MessageQueueHash, "PostL1MessageQueueHash", latestBatch.PostL1MessageQueueHash)
|
||||
|
||||
if err = r.bundleProposer.UpdateDBBundleInfo([]*orm.Batch{latestBatch}, encoding.CodecVersion(latestBatch.CodecVersion)); err != nil {
|
||||
return fmt.Errorf("failed to create bundle: %w", err)
|
||||
}
|
||||
|
||||
latestBundle, err := r.bundleORM.GetLatestBundle(r.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest bundle: %w", err)
|
||||
}
|
||||
|
||||
// Sanity check that the bundle was created correctly:
|
||||
// 1. should be a new bundle
|
||||
// 2. should only contain 1 batch, the one we created
|
||||
if restoredFinalizedBundle.Index == latestBundle.Index {
|
||||
return fmt.Errorf("bundle was not created correctly")
|
||||
}
|
||||
if latestBundle.StartBatchIndex != latestBatch.Index || latestBundle.EndBatchIndex != latestBatch.Index {
|
||||
return fmt.Errorf("bundle does not contain the correct batch: %d != %d", latestBundle.StartBatchIndex, latestBatch.Index)
|
||||
}
|
||||
|
||||
log.Info("Bundle created", "index", latestBundle.Index, "hash", latestBundle.Hash, "StartBatchIndex", latestBundle.StartBatchIndex, "EndBatchIndex", latestBundle.EndBatchIndex, "starting block", firstChunkInBatch.StartBlockNumber, "ending block", lastChunkInBatch.EndBlockNumber)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// restoreMinimalPreviousState restores the minimal previous state required to be able to create new chunks, batches and bundles.
|
||||
func (r *MinimalRecovery) restoreMinimalPreviousState() (*orm.Chunk, *orm.Batch, *orm.Bundle, error) {
|
||||
log.Info("Restoring previous state with", "L1 block height", r.cfg.RecoveryConfig.L1BlockHeight, "latest finalized batch", r.cfg.RecoveryConfig.LatestFinalizedBatch)
|
||||
|
||||
l1Client, err := ethclient.Dial(r.cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to connect to L1 client: %w", err)
|
||||
}
|
||||
reader, err := l1.NewReader(r.ctx, l1.Config{
|
||||
ScrollChainAddress: r.genesis.Config.Scroll.L1Config.ScrollChainAddress,
|
||||
L1MessageQueueAddress: r.genesis.Config.Scroll.L1Config.L1MessageQueueV2Address,
|
||||
}, l1Client)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to create L1 reader: %w", err)
|
||||
}
|
||||
|
||||
// 1. Sanity check user input: Make sure that the user's L1 block height is not higher than the latest finalized block number.
|
||||
latestFinalizedL1Block, err := reader.GetLatestFinalizedBlockNumber()
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to get latest finalized L1 block number: %w", err)
|
||||
}
|
||||
if r.cfg.RecoveryConfig.L1BlockHeight > latestFinalizedL1Block {
|
||||
return nil, nil, nil, fmt.Errorf("specified L1 block height is higher than the latest finalized block number: %d > %d", r.cfg.RecoveryConfig.L1BlockHeight, latestFinalizedL1Block)
|
||||
}
|
||||
|
||||
log.Info("Latest finalized L1 block number", "latest finalized L1 block", latestFinalizedL1Block)
|
||||
|
||||
// 2. Make sure that the specified batch is indeed finalized on the L1 rollup contract and is the latest finalized batch.
|
||||
var latestFinalizedBatchIndex uint64
|
||||
if r.cfg.RecoveryConfig.ForceLatestFinalizedBatch {
|
||||
latestFinalizedBatchIndex = r.cfg.RecoveryConfig.LatestFinalizedBatch
|
||||
} else {
|
||||
latestFinalizedBatchIndex, err = reader.LatestFinalizedBatchIndex(latestFinalizedL1Block)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to get latest finalized batch: %w", err)
|
||||
}
|
||||
if r.cfg.RecoveryConfig.LatestFinalizedBatch != latestFinalizedBatchIndex {
|
||||
return nil, nil, nil, fmt.Errorf("batch %d is not the latest finalized batch: %d", r.cfg.RecoveryConfig.LatestFinalizedBatch, latestFinalizedBatchIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the commit event for the latest finalized batch.
|
||||
var batchCommitEvent *l1.CommitBatchEvent
|
||||
err = reader.FetchRollupEventsInRangeWithCallback(r.cfg.RecoveryConfig.L1BlockHeight, latestFinalizedL1Block, func(event l1.RollupEvent) bool {
|
||||
if event.Type() == l1.CommitEventType && event.BatchIndex().Uint64() == latestFinalizedBatchIndex {
|
||||
batchCommitEvent = event.(*l1.CommitBatchEvent)
|
||||
// We found the commit event for the batch, stop searching.
|
||||
return false
|
||||
}
|
||||
|
||||
// Continue until we find the commit event for the batch.
|
||||
return true
|
||||
})
|
||||
if batchCommitEvent == nil {
|
||||
return nil, nil, nil, fmt.Errorf("commit event not found for batch %d", latestFinalizedBatchIndex)
|
||||
}
|
||||
|
||||
log.Info("Found commit event for batch", "batch", batchCommitEvent.BatchIndex(), "hash", batchCommitEvent.BatchHash(), "L1 block height", batchCommitEvent.BlockNumber(), "L1 tx hash", batchCommitEvent.TxHash())
|
||||
|
||||
// 3. Fetch commit tx data for latest finalized batch and decode it.
|
||||
daBatch, daBlobPayload, err := r.decodeLatestFinalizedBatch(reader, batchCommitEvent)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to decode latest finalized batch: %w", err)
|
||||
}
|
||||
fmt.Println(daBatch, daBlobPayload)
|
||||
|
||||
blocksInBatch := daBlobPayload.Blocks()
|
||||
|
||||
if len(blocksInBatch) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("no blocks in batch %d", batchCommitEvent.BatchIndex())
|
||||
}
|
||||
lastBlockInBatch := blocksInBatch[len(blocksInBatch)-1]
|
||||
|
||||
log.Info("Last L2 block in batch", "batch", batchCommitEvent.BatchIndex(), "L2 block", lastBlockInBatch, "PostL1MessageQueueHash", daBlobPayload.PostL1MessageQueueHash())
|
||||
|
||||
// 4. Get the L1 messages count and state root after the latest finalized batch.
|
||||
var l1MessagesCount uint64
|
||||
if r.cfg.RecoveryConfig.ForceL1MessageCount == 0 {
|
||||
l1MessagesCount, err = reader.NextUnfinalizedL1MessageQueueIndex(latestFinalizedL1Block)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to get L1 messages count: %w", err)
|
||||
}
|
||||
} else {
|
||||
l1MessagesCount = r.cfg.RecoveryConfig.ForceL1MessageCount
|
||||
}
|
||||
|
||||
log.Info("L1 messages count after latest finalized batch", "batch", batchCommitEvent.BatchIndex(), "count", l1MessagesCount)
|
||||
|
||||
stateRoot, err := reader.GetFinalizedStateRootByBatchIndex(latestFinalizedL1Block, latestFinalizedBatchIndex)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to get state root: %w", err)
|
||||
}
|
||||
|
||||
log.Info("State root after latest finalized batch", "batch", batchCommitEvent.BatchIndex(), "stateRoot", stateRoot.Hex())
|
||||
|
||||
// 5. Insert minimal state to DB.
|
||||
chunk, err := r.chunkORM.InsertPermissionlessChunk(r.ctx, defaultFakeRestoredChunkIndex, daBatch.Version(), daBlobPayload, l1MessagesCount, stateRoot)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to insert chunk raw: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Inserted last finalized chunk to DB", "chunk", chunk.Index, "hash", chunk.Hash, "StartBlockNumber", chunk.StartBlockNumber, "EndBlockNumber", chunk.EndBlockNumber, "TotalL1MessagesPoppedBefore", chunk.TotalL1MessagesPoppedBefore)
|
||||
|
||||
batch, err := r.batchORM.InsertPermissionlessBatch(r.ctx, batchCommitEvent.BatchIndex(), batchCommitEvent.BatchHash(), daBatch.Version(), chunk)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to insert batch raw: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Inserted last finalized batch to DB", "batch", batch.Index, "hash", batch.Hash)
|
||||
|
||||
var bundle *orm.Bundle
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
bundle, err = r.bundleORM.InsertBundle(r.ctx, []*orm.Batch{batch}, encoding.CodecVersion(batch.CodecVersion), dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert bundle: %w", err)
|
||||
}
|
||||
if err = r.bundleORM.UpdateProvingStatus(r.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update proving status: %w", err)
|
||||
}
|
||||
if err = r.bundleORM.UpdateRollupStatus(r.ctx, bundle.Hash, types.RollupFinalized); err != nil {
|
||||
return fmt.Errorf("failed to update rollup status: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Inserted last finalized bundle to DB", "bundle", bundle.Index, "hash", bundle.Hash, "StartBatchIndex", bundle.StartBatchIndex, "EndBatchIndex", bundle.EndBatchIndex)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to insert bundle: %w", err)
|
||||
}
|
||||
return chunk, batch, bundle, nil
|
||||
}
|
||||
|
||||
func (r *MinimalRecovery) decodeLatestFinalizedBatch(reader *l1.Reader, event *l1.CommitBatchEvent) (encoding.DABatch, encoding.DABlobPayload, error) {
|
||||
blockHeader, err := reader.FetchBlockHeaderByNumber(event.BlockNumber())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
|
||||
args, err := reader.FetchCommitTxData(event)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to fetch commit tx data: %w", err)
|
||||
}
|
||||
|
||||
codecVersion := encoding.CodecVersion(args.Version)
|
||||
if codecVersion < encoding.CodecV7 {
|
||||
return nil, nil, fmt.Errorf("codec version %d is not supported", codecVersion)
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get codec: %w", err)
|
||||
}
|
||||
|
||||
// Since we only store the last batch hash committed in a single tx in the contracts we can also only ever
|
||||
// finalize a last batch of a tx. This means we can assume here that the batch given in the event is the last batch
|
||||
// that was committed in the tx.
|
||||
|
||||
if event.BatchIndex().Uint64()+1 < uint64(len(args.BlobHashes)) {
|
||||
return nil, nil, fmt.Errorf("batch index %d+1 is lower than the number of blobs %d", event.BatchIndex().Uint64(), len(args.BlobHashes))
|
||||
}
|
||||
firstBatchIndex := event.BatchIndex().Uint64() + 1 - uint64(len(args.BlobHashes))
|
||||
|
||||
var targetBatch encoding.DABatch
|
||||
var targetBlobVersionedHash common.Hash
|
||||
parentBatchHash := args.ParentBatchHash
|
||||
for i, blobVersionedHash := range args.BlobHashes {
|
||||
batchIndex := firstBatchIndex + uint64(i)
|
||||
|
||||
calculatedBatch, err := codec.NewDABatchFromParams(batchIndex, blobVersionedHash, parentBatchHash)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex().Uint64(), err)
|
||||
}
|
||||
parentBatchHash = calculatedBatch.Hash()
|
||||
|
||||
if batchIndex == event.BatchIndex().Uint64() {
|
||||
if calculatedBatch.Hash() != event.BatchHash() {
|
||||
return nil, nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex(), event.BatchHash().String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
// We found the batch we are looking for, break out of the loop.
|
||||
targetBatch = calculatedBatch
|
||||
targetBlobVersionedHash = blobVersionedHash
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if targetBatch == nil {
|
||||
return nil, nil, fmt.Errorf("target batch with index %d could not be found and decoded", event.BatchIndex())
|
||||
}
|
||||
|
||||
// sanity check that this is indeed the last batch in the tx
|
||||
if targetBatch.Hash() != args.LastBatchHash {
|
||||
return nil, nil, fmt.Errorf("last batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex(), args.LastBatchHash.String(), targetBatch.Hash().String())
|
||||
}
|
||||
|
||||
// TODO: add support for multiple blob clients
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if r.cfg.RecoveryConfig.L1BeaconNodeEndpoint != "" {
|
||||
client, err := blob_client.NewBeaconNodeClient(r.cfg.RecoveryConfig.L1BeaconNodeEndpoint)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create beacon node client: %w", err)
|
||||
}
|
||||
blobClient.AddBlobClient(client)
|
||||
}
|
||||
|
||||
log.Info("Fetching blob by versioned hash and block time", "TargetBlobVersionedHash", targetBlobVersionedHash, "BlockTime", blockHeader.Time, "BlockNumber", blockHeader.Number)
|
||||
blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(r.ctx, targetBlobVersionedHash, blockHeader.Time)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get blob by versioned hash and block time for batch %d: %w", event.BatchIndex(), err)
|
||||
}
|
||||
|
||||
daBlobPayload, err := codec.DecodeBlob(blob)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to decode blob for batch %d: %w", event.BatchIndex(), err)
|
||||
}
|
||||
|
||||
return targetBatch, daBlobPayload, nil
|
||||
}
|
||||
|
||||
func (r *MinimalRecovery) fetchL2Blocks(fromBlock uint64, l2BlockHeightLimit uint64) (uint64, error) {
|
||||
if l2BlockHeightLimit > 0 && fromBlock > l2BlockHeightLimit {
|
||||
return 0, fmt.Errorf("fromBlock (latest finalized L2 block) is higher than specified L2BlockHeightLimit: %d > %d", fromBlock, l2BlockHeightLimit)
|
||||
}
|
||||
|
||||
log.Info("Fetching L2 blocks with", "fromBlock", fromBlock, "l2BlockHeightLimit", l2BlockHeightLimit)
|
||||
|
||||
// Fetch and insert the missing blocks from the last block in the batch to the latest L2 block.
|
||||
latestL2Block, err := r.l2Watcher.Client.BlockNumber(r.ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get latest L2 block number: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Latest L2 block number", "latest L2 block", latestL2Block)
|
||||
|
||||
if l2BlockHeightLimit > latestL2Block {
|
||||
return 0, fmt.Errorf("l2BlockHeightLimit is higher than the latest L2 block number, not all blocks are available in L2geth: %d > %d", l2BlockHeightLimit, latestL2Block)
|
||||
}
|
||||
|
||||
toBlock := latestL2Block
|
||||
if l2BlockHeightLimit > 0 {
|
||||
toBlock = l2BlockHeightLimit
|
||||
}
|
||||
|
||||
err = r.l2Watcher.GetAndStoreBlocks(r.ctx, fromBlock, toBlock)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get and store blocks: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Fetched L2 blocks from", "fromBlock", fromBlock, "toBlock", toBlock)
|
||||
|
||||
return toBlock, nil
|
||||
}
|
||||
|
||||
func (r *MinimalRecovery) resetDB() error {
|
||||
sqlDB, err := r.db.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get db connection: %w", err)
|
||||
}
|
||||
|
||||
if err = migrate.ResetDB(sqlDB); err != nil {
|
||||
return fmt.Errorf("failed to reset db: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
265
rollup/internal/controller/permissionless_batches/submitter.go
Normal file
265
rollup/internal/controller/permissionless_batches/submitter.go
Normal file
@@ -0,0 +1,265 @@
|
||||
package permissionless_batches
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
type Submitter struct {
|
||||
ctx context.Context
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
bundleOrm *orm.Bundle
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
}
|
||||
|
||||
func NewSubmitter(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig) (*Submitter, error) {
|
||||
registry := prometheus.DefaultRegisterer
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderSignerConfig, "permissionless_batches_submitter", "finalize_sender", types.SenderTypeFinalizeBatch, db, registry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new finalize sender failed, err: %w", err)
|
||||
}
|
||||
|
||||
return &Submitter{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
cfg: cfg,
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
chainCfg: chainCfg,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *Submitter) Sender() *sender.Sender {
|
||||
return s.finalizeSender
|
||||
}
|
||||
|
||||
func (s *Submitter) Submit(withProof bool) error {
|
||||
// Check if the bundle is already finalized
|
||||
bundle, err := s.bundleOrm.GetLatestBundle(s.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading latest bundle: %w", err)
|
||||
}
|
||||
|
||||
if bundle.Index != defaultFakeRestoredBundleIndex+1 {
|
||||
return fmt.Errorf("unexpected bundle index %d with hash %s, expected %d", bundle.Index, bundle.Hash, defaultFakeRestoredBundleIndex+1)
|
||||
}
|
||||
|
||||
if types.RollupStatus(bundle.RollupStatus) == types.RollupFinalized {
|
||||
return fmt.Errorf("bundle %d %s is already finalized. nothing to do", bundle.Index, bundle.Hash)
|
||||
}
|
||||
|
||||
if bundle.StartBatchIndex != bundle.EndBatchIndex {
|
||||
return fmt.Errorf("bundle %d %s has unexpected batch indices (should only contain a single batch): start %d, end %d", bundle.Index, bundle.Hash, bundle.StartBatchIndex, bundle.EndBatchIndex)
|
||||
}
|
||||
if bundle.StartBatchHash != bundle.EndBatchHash {
|
||||
return fmt.Errorf("bundle %d %s has unexpected batch hashes (should only contain a single batch): start %s, end %s", bundle.Index, bundle.Hash, bundle.StartBatchHash, bundle.EndBatchHash)
|
||||
}
|
||||
|
||||
batch, err := s.batchOrm.GetBatchByIndex(s.ctx, bundle.StartBatchIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load batch %d: %w", bundle.StartBatchIndex, err)
|
||||
}
|
||||
if batch == nil {
|
||||
return fmt.Errorf("batch %d not found", bundle.StartBatchIndex)
|
||||
}
|
||||
if batch.Hash != bundle.StartBatchHash {
|
||||
return fmt.Errorf("bundle %d %s has unexpected batch hash: %s", bundle.Index, bundle.Hash, batch.Hash)
|
||||
}
|
||||
|
||||
log.Info("submitting batch", "index", batch.Index, "hash", batch.Hash)
|
||||
|
||||
endChunk, err := s.chunkOrm.GetChunkByIndex(s.ctx, batch.EndChunkIndex)
|
||||
if err != nil || endChunk == nil {
|
||||
return fmt.Errorf("failed to get end chunk with index %d of batch: %w", batch.EndChunkIndex, err)
|
||||
}
|
||||
|
||||
var aggProof *message.OpenVMBundleProof
|
||||
if withProof {
|
||||
firstChunk, err := s.chunkOrm.GetChunkByIndex(s.ctx, batch.StartChunkIndex)
|
||||
if err != nil || firstChunk == nil {
|
||||
return fmt.Errorf("failed to get first chunk %d of batch: %w", batch.StartChunkIndex, err)
|
||||
}
|
||||
|
||||
aggProof, err = s.bundleOrm.GetVerifiedProofByHash(s.ctx, bundle.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err)
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", bundle.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
var calldata []byte
|
||||
var blob *kzg4844.Blob
|
||||
switch encoding.CodecVersion(bundle.CodecVersion) {
|
||||
case encoding.CodecV7:
|
||||
calldata, blob, err = s.constructCommitAndFinalizeCalldataAndBlob(batch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct CommitAndFinalize calldata and blob, bundle index: %v, batch index: %v, err: %w", bundle.Index, batch.Index, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
|
||||
}
|
||||
|
||||
txHash, _, err := s.finalizeSender.SendTransaction("commitAndFinalize-"+bundle.Hash, &s.cfg.RollupContractAddress, calldata, []*kzg4844.Blob{blob})
|
||||
if err != nil {
|
||||
log.Error("commitAndFinalize in layer1 failed", "with proof", withProof, "index", bundle.Index,
|
||||
"batch index", bundle.StartBatchIndex,
|
||||
"RollupContractAddress", s.cfg.RollupContractAddress, "err", err, "calldata", common.Bytes2Hex(calldata))
|
||||
|
||||
var rpcError rpc.DataError
|
||||
if errors.As(err, &rpcError) {
|
||||
log.Error("rpc.DataError ", "error", rpcError.Error(), "message", rpcError.ErrorData())
|
||||
}
|
||||
|
||||
return fmt.Errorf("commitAndFinalize failed, bundle index: %d, err: %w", bundle.Index, err)
|
||||
}
|
||||
|
||||
log.Info("commitAndFinalize in layer1", "with proof", withProof, "batch index", bundle.StartBatchIndex, "tx hash", txHash.String())
|
||||
|
||||
// Updating rollup status in database.
|
||||
err = s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if err = s.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(s.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(s.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn("failed to update rollup status of bundle and batches", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
|
||||
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
|
||||
if !withProof {
|
||||
txErr := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if updateErr := s.bundleOrm.UpdateProvingStatus(s.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if updateErr := s.batchOrm.UpdateProvingStatusByBundleHash(s.ctx, bundle.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ {
|
||||
tmpBatch, getErr := s.batchOrm.GetBatchByIndex(s.ctx, batchIndex)
|
||||
if getErr != nil {
|
||||
return getErr
|
||||
}
|
||||
if updateErr := s.chunkOrm.UpdateProvingStatusByBatchHash(s.ctx, tmpBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if txErr != nil {
|
||||
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "bundleHash", bundle.Hash, "err", txErr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Submitter) constructCommitAndFinalizeCalldataAndBlob(batch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, *kzg4844.Blob, error) {
|
||||
// Create the FinalizeStruct tuple as an abi-compatible struct
|
||||
finalizeStruct := struct {
|
||||
BatchHeader []byte
|
||||
TotalL1MessagesPoppedOverall *big.Int
|
||||
PostStateRoot common.Hash
|
||||
WithdrawRoot common.Hash
|
||||
ZkProof []byte
|
||||
}{
|
||||
BatchHeader: batch.BatchHeader,
|
||||
TotalL1MessagesPoppedOverall: new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore + endChunk.TotalL1MessagesPoppedInChunk),
|
||||
PostStateRoot: common.HexToHash(batch.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(batch.WithdrawRoot),
|
||||
}
|
||||
if aggProof != nil {
|
||||
finalizeStruct.ZkProof = aggProof.Proof()
|
||||
}
|
||||
|
||||
calldata, err := s.l1RollupABI.Pack("commitAndFinalizeBatch", uint8(batch.CodecVersion), common.HexToHash(batch.ParentBatchHash), finalizeStruct)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitAndFinalizeBatch: %w", err)
|
||||
}
|
||||
|
||||
chunks, err := s.chunkOrm.GetChunksInRange(s.ctx, batch.StartChunkIndex, batch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get chunks in range for batch %d: %w", batch.Index, err)
|
||||
}
|
||||
if chunks[len(chunks)-1].Index != batch.EndChunkIndex {
|
||||
return nil, nil, fmt.Errorf("unexpected last chunk index %d, expected %d", chunks[len(chunks)-1].Index, batch.EndChunkIndex)
|
||||
}
|
||||
|
||||
var batchBlocks []*encoding.Block
|
||||
for _, c := range chunks {
|
||||
blocks, err := s.l2BlockOrm.GetL2BlocksInRange(s.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get blocks in range for batch %d: %w", batch.Index, err)
|
||||
}
|
||||
|
||||
batchBlocks = append(batchBlocks, blocks...)
|
||||
}
|
||||
|
||||
encodingBatch := &encoding.Batch{
|
||||
Index: batch.Index,
|
||||
ParentBatchHash: common.HexToHash(batch.ParentBatchHash),
|
||||
PrevL1MessageQueueHash: common.HexToHash(batch.PrevL1MessageQueueHash),
|
||||
PostL1MessageQueueHash: common.HexToHash(batch.PostL1MessageQueueHash),
|
||||
Blocks: batchBlocks,
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(batch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", batch.CodecVersion, err)
|
||||
}
|
||||
|
||||
daBatch, err := codec.NewDABatch(encodingBatch)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", err)
|
||||
}
|
||||
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
476
rollup/internal/controller/relayer/full_recovery.go
Normal file
476
rollup/internal/controller/relayer/full_recovery.go
Normal file
@@ -0,0 +1,476 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/l1"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
type FullRecovery struct {
|
||||
ctx context.Context
|
||||
cfg *config.Config
|
||||
genesis *core.Genesis
|
||||
db *gorm.DB
|
||||
blockORM *orm.L2Block
|
||||
chunkORM *orm.Chunk
|
||||
batchORM *orm.Batch
|
||||
bundleORM *orm.Bundle
|
||||
|
||||
chunkProposer *watcher.ChunkProposer
|
||||
batchProposer *watcher.BatchProposer
|
||||
bundleProposer *watcher.BundleProposer
|
||||
l2Watcher *watcher.L2WatcherClient
|
||||
l1Client *ethclient.Client
|
||||
l1Reader *l1.Reader
|
||||
beaconNodeClient *blob_client.BeaconNodeClient
|
||||
}
|
||||
|
||||
func NewFullRecovery(ctx context.Context, cfg *config.Config, genesis *core.Genesis, db *gorm.DB, chunkProposer *watcher.ChunkProposer, batchProposer *watcher.BatchProposer, bundleProposer *watcher.BundleProposer, l2Watcher *watcher.L2WatcherClient, l1Client *ethclient.Client, l1Reader *l1.Reader) (*FullRecovery, error) {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.L1Config.BeaconNodeEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create blob client failed: %v", err)
|
||||
}
|
||||
|
||||
return &FullRecovery{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
genesis: genesis,
|
||||
db: db,
|
||||
blockORM: orm.NewL2Block(db),
|
||||
chunkORM: orm.NewChunk(db),
|
||||
batchORM: orm.NewBatch(db),
|
||||
bundleORM: orm.NewBundle(db),
|
||||
|
||||
chunkProposer: chunkProposer,
|
||||
batchProposer: batchProposer,
|
||||
bundleProposer: bundleProposer,
|
||||
l2Watcher: l2Watcher,
|
||||
l1Client: l1Client,
|
||||
l1Reader: l1Reader,
|
||||
beaconNodeClient: beaconNodeClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RestoreFullPreviousState restores the full state from L1.
|
||||
// The DB state should be clean: the latest batch in the DB should be finalized on L1. This function will
|
||||
// restore all batches between the latest finalized batch in the DB and the latest finalized batch on L1.
|
||||
func (f *FullRecovery) RestoreFullPreviousState() error {
|
||||
log.Info("Restoring full previous state")
|
||||
|
||||
// 1. Get latest finalized batch stored in DB
|
||||
latestDBBatch, err := f.batchORM.GetLatestBatch(f.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest batch from DB: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Latest finalized batch in DB", "batch", latestDBBatch.Index, "hash", latestDBBatch.Hash)
|
||||
|
||||
// 2. Get latest finalized L1 block
|
||||
latestFinalizedL1Block, err := f.l1Reader.GetLatestFinalizedBlockNumber()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest finalized L1 block number: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Latest finalized L1 block number", "latest finalized L1 block", latestFinalizedL1Block)
|
||||
|
||||
// 3. Get latest finalized batch from contract (at latest finalized L1 block)
|
||||
latestFinalizedBatchContract, err := f.l1Reader.LatestFinalizedBatchIndex(latestFinalizedL1Block)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest finalized batch: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Latest finalized batch from L1 contract", "latest finalized batch", latestFinalizedBatchContract, "at latest finalized L1 block", latestFinalizedL1Block)
|
||||
|
||||
// 4. Get batches one by one from stored in DB to latest finalized batch.
|
||||
var fromBlock uint64
|
||||
if latestDBBatch.Index > 0 {
|
||||
receipt, err := f.l1Client.TransactionReceipt(f.ctx, common.HexToHash(latestDBBatch.CommitTxHash))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get transaction receipt of latest DB batch finalization transaction: %w", err)
|
||||
}
|
||||
fromBlock = receipt.BlockNumber.Uint64()
|
||||
} else {
|
||||
fromBlock = f.cfg.L1Config.StartHeight
|
||||
}
|
||||
|
||||
log.Info("Fetching rollup events from L1", "from block", fromBlock, "to block", latestFinalizedL1Block, "from batch", latestDBBatch.Index, "to batch", latestFinalizedBatchContract)
|
||||
|
||||
commitsHeapMap := common.NewHeapMap[uint64, *l1.CommitBatchEvent](func(event *l1.CommitBatchEvent) uint64 {
|
||||
return event.BatchIndex().Uint64()
|
||||
})
|
||||
batchEventsHeap := common.NewHeap[*batchEvents]()
|
||||
var bundles [][]*batchEvents
|
||||
|
||||
err = f.l1Reader.FetchRollupEventsInRangeWithCallback(fromBlock, latestFinalizedL1Block, func(event l1.RollupEvent) bool {
|
||||
// We're only interested in batches that are newer than the latest finalized batch in the DB.
|
||||
if event.BatchIndex().Uint64() <= latestDBBatch.Index {
|
||||
return true
|
||||
}
|
||||
|
||||
switch event.Type() {
|
||||
case l1.CommitEventType:
|
||||
commitEvent := event.(*l1.CommitBatchEvent)
|
||||
commitsHeapMap.Push(commitEvent)
|
||||
|
||||
case l1.FinalizeEventType:
|
||||
finalizeEvent := event.(*l1.FinalizeBatchEvent)
|
||||
|
||||
var bundle []*batchEvents
|
||||
|
||||
// with bundles all committed batches until this finalized batch are finalized in the same bundle
|
||||
for commitsHeapMap.Len() > 0 {
|
||||
commitEvent := commitsHeapMap.Peek()
|
||||
if commitEvent.BatchIndex().Uint64() > finalizeEvent.BatchIndex().Uint64() {
|
||||
break
|
||||
}
|
||||
|
||||
bEvents := newBatchEvents(commitEvent, finalizeEvent)
|
||||
commitsHeapMap.Pop()
|
||||
batchEventsHeap.Push(bEvents)
|
||||
bundle = append(bundle, bEvents)
|
||||
}
|
||||
|
||||
bundles = append(bundles, bundle)
|
||||
|
||||
// Stop fetching rollup events if we reached the latest finalized batch.
|
||||
if finalizeEvent.BatchIndex().Uint64() >= latestFinalizedBatchContract {
|
||||
return false
|
||||
}
|
||||
case l1.RevertEventV0Type:
|
||||
// We ignore reverted batches.
|
||||
commitsHeapMap.RemoveByKey(event.BatchIndex().Uint64())
|
||||
case l1.RevertEventV7Type:
|
||||
// We ignore reverted batches.
|
||||
|
||||
revertBatch, ok := event.(*l1.RevertBatchEventV7)
|
||||
if !ok {
|
||||
log.Error(fmt.Sprintf("unexpected type of revert event: %T, expected RevertEventV7Type", event))
|
||||
return false
|
||||
}
|
||||
|
||||
// delete all batches from revertBatch.StartBatchIndex (inclusive) to revertBatch.FinishBatchIndex (inclusive)
|
||||
for i := revertBatch.StartBatchIndex().Uint64(); i <= revertBatch.FinishBatchIndex().Uint64(); i++ {
|
||||
commitsHeapMap.RemoveByKey(i)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch rollup events: %w", err)
|
||||
}
|
||||
|
||||
// 5. Process all finalized batches: fetch L2 blocks and reproduce chunks and batches.
|
||||
var batches []*batchEvents
|
||||
for batchEventsHeap.Len() > 0 {
|
||||
nextBatch := batchEventsHeap.Pop().Value()
|
||||
batches = append(batches, nextBatch)
|
||||
}
|
||||
|
||||
if err = f.processFinalizedBatches(batches); err != nil {
|
||||
return fmt.Errorf("failed to process finalized batches: %w", err)
|
||||
}
|
||||
|
||||
// 6. Create bundles if needed.
|
||||
for _, bundle := range bundles {
|
||||
var dbBatches []*orm.Batch
|
||||
var lastBatchInBundle *orm.Batch
|
||||
|
||||
for _, batch := range bundle {
|
||||
dbBatch, err := f.batchORM.GetBatchByIndex(f.ctx, batch.commit.BatchIndex().Uint64())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get batch by index for bundle generation: %w", err)
|
||||
}
|
||||
// Bundles are only supported for codec version 3 and above.
|
||||
if encoding.CodecVersion(dbBatch.CodecVersion) < encoding.CodecV3 {
|
||||
break
|
||||
}
|
||||
|
||||
dbBatches = append(dbBatches, dbBatch)
|
||||
lastBatchInBundle = dbBatch
|
||||
}
|
||||
|
||||
if len(dbBatches) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
err = f.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
newBundle, err := f.bundleORM.InsertBundle(f.ctx, dbBatches, encoding.CodecVersion(lastBatchInBundle.CodecVersion), dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert bundle to DB: %w", err)
|
||||
}
|
||||
if err = f.batchORM.UpdateBundleHashInRange(f.ctx, newBundle.StartBatchIndex, newBundle.EndBatchIndex, newBundle.Hash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update bundle_hash %s for batches (%d to %d): %w", newBundle.Hash, newBundle.StartBatchIndex, newBundle.EndBatchIndex, err)
|
||||
}
|
||||
|
||||
if err = f.bundleORM.UpdateFinalizeTxHashAndRollupStatus(f.ctx, newBundle.Hash, lastBatchInBundle.FinalizeTxHash, types.RollupFinalized, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update finalize tx hash and rollup status for bundle %s: %w", newBundle.Hash, err)
|
||||
}
|
||||
|
||||
if err = f.bundleORM.UpdateProvingStatus(f.ctx, newBundle.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update proving status for bundle %s: %w", newBundle.Hash, err)
|
||||
}
|
||||
|
||||
log.Info("Inserted bundle", "hash", newBundle.Hash, "start batch index", newBundle.StartBatchIndex, "end batch index", newBundle.EndBatchIndex)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert bundle in DB transaction: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FullRecovery) processFinalizedBatches(batches []*batchEvents) error {
|
||||
if len(batches) == 0 {
|
||||
return fmt.Errorf("no finalized batches to process")
|
||||
}
|
||||
|
||||
firstBatch := batches[0]
|
||||
lastBatch := batches[len(batches)-1]
|
||||
|
||||
log.Info("Processing finalized batches", "first batch", firstBatch.commit.BatchIndex(), "hash", firstBatch.commit.BatchHash(), "last batch", lastBatch.commit.BatchIndex(), "hash", lastBatch.commit.BatchHash())
|
||||
|
||||
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
|
||||
// with one transaction carrying multiple blobs,
|
||||
// each CommitBatch event corresponds to a blob containing block range data.
|
||||
// To correctly process these events, we need to:
|
||||
// 1. Parsing the associated blob data to extract the block range for each event
|
||||
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
|
||||
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
|
||||
// - Derive the index of the current batch by the number of parent batch hashes tracked
|
||||
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
|
||||
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
|
||||
// The index map serves this purpose with:
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
for _, b := range batches {
|
||||
args, err := f.l1Reader.FetchCommitTxData(b.commit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", b.commit.BatchIndex().Uint64(), b.commit.TxHash().Hex(), err)
|
||||
}
|
||||
|
||||
// all batches we process here will be > CodecV7 since that is the minimum codec version for permissionless batches
|
||||
if args.Version < 7 {
|
||||
return fmt.Errorf("unsupported codec version: %v, batch index: %v, tx hash: %s", args.Version, b.commit.BatchIndex().Uint64(), b.commit.TxHash().Hex())
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unsupported codec version: %v, err: %w", args.Version, err)
|
||||
}
|
||||
|
||||
// we append the batch hash to the slice for the current commit transaction after processing the batch.
|
||||
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
|
||||
currentIndex := len(txBlobIndexMap[b.commit.TxHash()])
|
||||
if currentIndex >= len(args.BlobHashes) {
|
||||
return fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
|
||||
b.commit.TxHash(), len(args.BlobHashes), currentIndex, b.commit.BatchIndex().Uint64())
|
||||
}
|
||||
blobVersionedHash := args.BlobHashes[currentIndex]
|
||||
|
||||
// validate the batch hash
|
||||
var parentBatchHash common.Hash
|
||||
if currentIndex == 0 {
|
||||
parentBatchHash = args.ParentBatchHash
|
||||
} else {
|
||||
// here we need to subtract 1 from the current index to get the parent batch hash.
|
||||
parentBatchHash = txBlobIndexMap[b.commit.TxHash()][currentIndex-1]
|
||||
}
|
||||
calculatedBatch, err := codec.NewDABatchFromParams(b.commit.BatchIndex().Uint64(), blobVersionedHash, parentBatchHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", b.commit.BatchIndex().Uint64(), err)
|
||||
}
|
||||
if calculatedBatch.Hash() != b.commit.BatchHash() {
|
||||
return fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", b.commit.BatchIndex(), b.commit.BatchHash().String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
txBlobIndexMap[b.commit.TxHash()] = append(txBlobIndexMap[b.commit.TxHash()], b.commit.BatchHash())
|
||||
|
||||
if err = f.insertBatchIntoDB(b, codec, blobVersionedHash); err != nil {
|
||||
return fmt.Errorf("failed to insert batch into DB, batch index: %d, err: %w", b.commit.BatchIndex().Uint64(), err)
|
||||
}
|
||||
|
||||
log.Info("Processed batch", "index", b.commit.BatchIndex(), "hash", b.commit.BatchHash(), "commit tx hash", b.commit.TxHash().Hex(), "finalize tx hash", b.finalize.TxHash().Hex(), "blob versioned hash", blobVersionedHash.String(), "parent batch hash", parentBatchHash.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FullRecovery) insertBatchIntoDB(batch *batchEvents, codec encoding.Codec, blobVersionedHash common.Hash) error {
|
||||
// 5.1 Fetch block time.
|
||||
blockHeader, err := f.l1Reader.FetchBlockHeaderByNumber(batch.commit.BlockNumber())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch block header by number %d: %w", batch.commit.BlockNumber(), err)
|
||||
}
|
||||
|
||||
// 5.2 Fetch blob data for batch.
|
||||
daBlocks, err := f.getBatchBlockRangeFromBlob(codec, blobVersionedHash, blockHeader.Time)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get batch block range from blob %s: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
lastBlock := daBlocks[len(daBlocks)-1]
|
||||
|
||||
// 5.2. Fetch L2 blocks for the entire batch.
|
||||
if err = f.l2Watcher.TryFetchRunningMissingBlocks(lastBlock.Number()); err != nil {
|
||||
return fmt.Errorf("failed to fetch L2 blocks: %w", err)
|
||||
}
|
||||
|
||||
// 5.3. Reproduce chunk. Since we don't know the internals of a batch we just create 1 chunk per batch.
|
||||
start := daBlocks[0].Number()
|
||||
end := lastBlock.Number()
|
||||
|
||||
// get last chunk from DB
|
||||
lastChunk, err := f.chunkORM.GetLatestChunk(f.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest chunk from DB: %w", err)
|
||||
}
|
||||
|
||||
blocks, err := f.blockORM.GetL2BlocksInRange(f.ctx, start, end)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get L2 blocks in range: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Reproducing chunk", "start block", start, "end block", end)
|
||||
|
||||
var chunk encoding.Chunk
|
||||
chunk.Blocks = blocks
|
||||
chunk.PrevL1MessageQueueHash = common.HexToHash(lastChunk.PostL1MessageQueueHash)
|
||||
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(chunk.PrevL1MessageQueueHash, blocks)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply L1 messages from blocks: %w", err)
|
||||
}
|
||||
|
||||
metrics, err := butils.CalculateChunkMetrics(&chunk, codec.Version())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", err)
|
||||
}
|
||||
|
||||
var dbChunk *orm.Chunk
|
||||
err = f.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err = f.chunkORM.InsertChunk(f.ctx, &chunk, codec.Version(), *metrics, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk to DB: %w", err)
|
||||
}
|
||||
if err := f.blockORM.UpdateChunkHashInRange(f.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update chunk_hash for l2_blocks (chunk hash: %s, start block: %d, end block: %d): %w", dbChunk.Hash, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, err)
|
||||
}
|
||||
|
||||
if err = f.chunkORM.UpdateProvingStatus(f.ctx, dbChunk.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update proving status for chunk %s: %w", dbChunk.Hash, err)
|
||||
}
|
||||
|
||||
log.Info("Inserted chunk", "index", dbChunk.Index, "hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk in DB transaction: %w", err)
|
||||
}
|
||||
|
||||
// 5.4 Reproduce batch.
|
||||
dbParentBatch, err := f.batchORM.GetLatestBatch(f.ctx)
|
||||
if err != nil || dbParentBatch == nil {
|
||||
return fmt.Errorf("failed to get latest batch from DB: %w", err)
|
||||
}
|
||||
|
||||
var encBatch encoding.Batch
|
||||
encBatch.Index = dbParentBatch.Index + 1
|
||||
encBatch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
|
||||
encBatch.TotalL1MessagePoppedBefore = dbChunk.TotalL1MessagesPoppedBefore
|
||||
encBatch.PrevL1MessageQueueHash = chunk.PrevL1MessageQueueHash
|
||||
encBatch.PostL1MessageQueueHash = chunk.PostL1MessageQueueHash
|
||||
encBatch.Chunks = []*encoding.Chunk{&chunk}
|
||||
encBatch.Blocks = blocks
|
||||
|
||||
batchMetrics, err := butils.CalculateBatchMetrics(&encBatch, codec.Version(), false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", err)
|
||||
}
|
||||
|
||||
err = f.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbBatch, err := f.batchORM.InsertBatch(f.ctx, &encBatch, codec.Version(), *batchMetrics, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch to DB: %w", err)
|
||||
}
|
||||
if err = f.chunkORM.UpdateBatchHashInRange(f.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, dbBatch.Hash, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update batch_hash for chunks (batch hash: %s, start chunk: %d, end chunk: %d): %w", dbBatch.Hash, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, err)
|
||||
}
|
||||
|
||||
if err = f.batchORM.UpdateProvingStatus(f.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update proving status for batch %s: %w", dbBatch.Hash, err)
|
||||
}
|
||||
if err = f.batchORM.UpdateRollupStatusCommitAndFinalizeTxHash(f.ctx, dbBatch.Hash, types.RollupFinalized, batch.commit.TxHash().Hex(), batch.finalize.TxHash().Hex(), dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update rollup status for batch %s: %w", dbBatch.Hash, err)
|
||||
}
|
||||
|
||||
log.Info("Inserted batch", "index", dbBatch.Index, "hash", dbBatch.Hash, "start chunk", dbBatch.StartChunkIndex, "end chunk", dbBatch.EndChunkIndex)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch in DB transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FullRecovery) getBatchBlockRangeFromBlob(codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
|
||||
blob, err := f.beaconNodeClient.GetBlobByVersionedHashAndBlockTime(f.ctx, blobVersionedHash, l1BlockTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
if blob == nil {
|
||||
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
blobPayload, err := codec.DecodeBlob(blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
|
||||
blocks := blobPayload.Blocks()
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
type batchEvents struct {
|
||||
commit *l1.CommitBatchEvent
|
||||
finalize *l1.FinalizeBatchEvent
|
||||
}
|
||||
|
||||
func newBatchEvents(commit *l1.CommitBatchEvent, finalize *l1.FinalizeBatchEvent) *batchEvents {
|
||||
if commit.BatchIndex().Uint64() > finalize.BatchIndex().Uint64() {
|
||||
panic(fmt.Sprintf("commit and finalize batch index mismatch: %d != %d", commit.BatchIndex().Uint64(), finalize.BatchIndex().Uint64()))
|
||||
}
|
||||
|
||||
return &batchEvents{
|
||||
commit: commit,
|
||||
finalize: finalize,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *batchEvents) CompareTo(other *batchEvents) int {
|
||||
return e.commit.BatchIndex().Cmp(other.commit.BatchIndex())
|
||||
}
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
@@ -20,7 +22,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -79,6 +80,7 @@ type Layer2Relayer struct {
|
||||
commitSender *sender.Sender
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
validiumABI *abi.ABI
|
||||
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
@@ -172,6 +174,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
commitSender: commitSender,
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
validiumABI: bridgeAbi.ValidiumABI,
|
||||
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
batchStrategy: strategy,
|
||||
@@ -239,10 +242,11 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
Blocks: chunk.Blocks,
|
||||
}
|
||||
|
||||
var dbBatch *orm.Batch
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{}, dbTX)
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{ValidiumMode: r.cfg.ValidiumMode}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
@@ -274,10 +278,23 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
|
||||
// encode "importGenesisBatch" transaction calldata
|
||||
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
|
||||
var calldata []byte
|
||||
var packErr error
|
||||
|
||||
if r.cfg.ValidiumMode {
|
||||
// validium mode: only pass batchHeader
|
||||
calldata, packErr = r.validiumABI.Pack("importGenesisBatch", batchHeader)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack validium importGenesisBatch with batch header: %v. error: %v", common.Bytes2Hex(batchHeader), packErr)
|
||||
}
|
||||
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
|
||||
} else {
|
||||
// rollup mode: pass batchHeader and stateRoot
|
||||
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
|
||||
}
|
||||
log.Info("Rollup importGenesis", "calldata", common.Bytes2Hex(calldata), "stateRoot", stateRoot)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
@@ -285,7 +302,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash, "validium", r.cfg.ValidiumMode)
|
||||
|
||||
// wait for confirmation
|
||||
// we assume that no other transactions are sent before initializeGenesis completes
|
||||
@@ -310,20 +327,23 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
if !confirmation.IsSuccessful {
|
||||
return errors.New("import genesis batch tx failed")
|
||||
}
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
|
||||
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String(), "validium", r.cfg.ValidiumMode)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
// Pending batchess are submitted if one of the following conditions is met:
|
||||
// Pending batches are submitted if one of the following conditions is met:
|
||||
// - the first batch is too old -> forceSubmit
|
||||
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
|
||||
// - we have at least minBatches AND price hits a desired target price
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// Get effective batch limits based on whether validium mode is enabled.
|
||||
minBatches, maxBatches := r.getEffectiveBatchLimits()
|
||||
|
||||
// get pending batches from database in ascending order by their index.
|
||||
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
|
||||
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, maxBatches)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
@@ -432,21 +452,21 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
break
|
||||
}
|
||||
|
||||
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
|
||||
if batchesToSubmitLen < maxBatches {
|
||||
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
|
||||
Batch: dbBatch,
|
||||
Chunks: dbChunks,
|
||||
})
|
||||
}
|
||||
|
||||
if len(batchesToSubmit) >= r.cfg.BatchSubmission.MaxBatches {
|
||||
if len(batchesToSubmit) >= maxBatches {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// we only submit batches if we have a timeout or if we have enough batches to submit
|
||||
if !forceSubmit && len(batchesToSubmit) < r.cfg.BatchSubmission.MinBatches {
|
||||
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", r.cfg.BatchSubmission.MinBatches, "maxBatches", r.cfg.BatchSubmission.MaxBatches)
|
||||
if !forceSubmit && len(batchesToSubmit) < minBatches {
|
||||
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", minBatches, "maxBatches", maxBatches)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -466,10 +486,22 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
|
||||
switch codecVersion {
|
||||
case encoding.CodecV7, encoding.CodecV8:
|
||||
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
|
||||
if err != nil {
|
||||
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
|
||||
return
|
||||
if r.cfg.ValidiumMode {
|
||||
if len(batchesToSubmit) != 1 {
|
||||
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
|
||||
return
|
||||
}
|
||||
calldata, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadValidium(batchesToSubmit[0])
|
||||
if err != nil {
|
||||
log.Error("failed to construct validium payload", "codecVersion", codecVersion, "index", batchesToSubmit[0].Batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
|
||||
if err != nil {
|
||||
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
|
||||
@@ -522,6 +554,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
|
||||
}
|
||||
|
||||
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled.
|
||||
func (r *Layer2Relayer) getEffectiveBatchLimits() (int, int) {
|
||||
if r.cfg.ValidiumMode {
|
||||
return 1, 1 // minBatches=1, maxBatches=1
|
||||
}
|
||||
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MaxBatches
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
|
||||
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
|
||||
for _, batch := range batches {
|
||||
@@ -690,9 +730,16 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
var calldata []byte
|
||||
switch encoding.CodecVersion(bundle.CodecVersion) {
|
||||
case encoding.CodecV7, encoding.CodecV8:
|
||||
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
|
||||
if r.cfg.ValidiumMode {
|
||||
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct validium finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
|
||||
}
|
||||
} else {
|
||||
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct normal finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
|
||||
@@ -951,6 +998,35 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
|
||||
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
|
||||
// Calculate metrics
|
||||
var maxBlockHeight uint64
|
||||
var totalGasUsed uint64
|
||||
for _, c := range batch.Chunks {
|
||||
if c.EndBlockNumber > maxBlockHeight {
|
||||
maxBlockHeight = c.EndBlockNumber
|
||||
}
|
||||
totalGasUsed += c.TotalL2TxGas
|
||||
}
|
||||
|
||||
// Get the commitment from the batch data: for validium mode, we use the last L2 block hash as the commitment to the off-chain data
|
||||
// Get the last chunk from the batch to find the end block hash
|
||||
// TODO: This is a temporary solution, we might use a larger commitment in the future
|
||||
if len(batch.Chunks) == 0 {
|
||||
return nil, 0, 0, fmt.Errorf("last batch has no chunks")
|
||||
}
|
||||
|
||||
lastChunk := batch.Chunks[len(batch.Chunks)-1]
|
||||
commitment := common.HexToHash(lastChunk.EndBlockHash)
|
||||
version := encoding.CodecVersion(batch.Batch.CodecVersion)
|
||||
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
|
||||
if err != nil {
|
||||
return nil, 0, 0, fmt.Errorf("failed to pack commitBatch: %w", err)
|
||||
}
|
||||
log.Info("Validium commitBatch", "maxBlockHeight", maxBlockHeight, "commitment", commitment.Hex())
|
||||
return calldata, maxBlockHeight, totalGasUsed, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
@@ -967,7 +1043,8 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
fmt.Println("packing finalizeBundlePostEuclidV2NoProof", len(dbBatch.BatchHeader), dbBatch.CodecVersion, dbBatch.BatchHeader, new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk), common.HexToHash(dbBatch.StateRoot), common.HexToHash(dbBatch.WithdrawRoot))
|
||||
log.Info("Packing finalizeBundlePostEuclidV2NoProof", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot)
|
||||
|
||||
// finalizeBundle without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundlePostEuclidV2NoProof",
|
||||
@@ -982,6 +1059,26 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadValidium(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
log.Info("Packing validium finalizeBundle", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot, "withProof", aggProof != nil)
|
||||
|
||||
var proof []byte
|
||||
if aggProof != nil {
|
||||
proof = aggProof.Proof()
|
||||
}
|
||||
|
||||
calldata, packErr := r.validiumABI.Pack(
|
||||
"finalizeBundle",
|
||||
dbBatch.BatchHeader,
|
||||
new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk),
|
||||
proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack validium finalizeBundle: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
|
||||
// for unit test
|
||||
func (r *Layer2Relayer) StopSenders() {
|
||||
|
||||
@@ -63,7 +63,7 @@ type FeeData struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
// Sender Transaction sender to send transaction to l1/l2 geth
|
||||
// Sender Transaction sender to send transaction to l1/l2
|
||||
type Sender struct {
|
||||
config *config.SenderConfig
|
||||
gethClient *gethclient.Client
|
||||
@@ -105,13 +105,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
return nil, fmt.Errorf("failed to create transaction signer, err: %w", err)
|
||||
}
|
||||
|
||||
// Set pending nonce
|
||||
nonce, err := client.PendingNonceAt(ctx, transactionSigner.GetAddr())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pending nonce for address %s, err: %w", transactionSigner.GetAddr(), err)
|
||||
}
|
||||
transactionSigner.SetNonce(nonce)
|
||||
|
||||
// Create sender instance first and then initialize nonce
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -127,8 +121,13 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
service: service,
|
||||
senderType: senderType,
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
// Initialize nonce using the new method
|
||||
if err := sender.resetNonce(); err != nil {
|
||||
return nil, fmt.Errorf("failed to reset nonce: %w", err)
|
||||
}
|
||||
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
go sender.loop(ctx)
|
||||
|
||||
return sender, nil
|
||||
@@ -242,7 +241,10 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
// Check if contain nonce, and reset nonce
|
||||
// only reset nonce when it is not from resubmit
|
||||
if strings.Contains(err.Error(), "nonce too low") {
|
||||
s.resetNonce(context.Background())
|
||||
if err := s.resetNonce(); err != nil {
|
||||
log.Warn("failed to reset nonce after failed send transaction", "address", s.transactionSigner.GetAddr().String(), "err", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to reset nonce after failed send transaction, err: %w", err)
|
||||
}
|
||||
}
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to send transaction, err: %w", err)
|
||||
}
|
||||
@@ -327,14 +329,46 @@ func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte,
|
||||
return signedTx, nil
|
||||
}
|
||||
|
||||
// resetNonce reset nonce if send signed tx failed.
|
||||
func (s *Sender) resetNonce(ctx context.Context) {
|
||||
nonce, err := s.client.PendingNonceAt(ctx, s.transactionSigner.GetAddr())
|
||||
// initializeNonce initializes the nonce by taking the maximum of database nonce and pending nonce.
|
||||
func (s *Sender) initializeNonce() (uint64, error) {
|
||||
// Get maximum nonce from database
|
||||
dbNonce, err := s.pendingTransactionOrm.GetMaxNonceBySenderAddress(s.ctx, s.transactionSigner.GetAddr().Hex())
|
||||
if err != nil {
|
||||
log.Warn("failed to reset nonce", "address", s.transactionSigner.GetAddr().String(), "err", err)
|
||||
return
|
||||
return 0, fmt.Errorf("failed to get max nonce from database for address %s, err: %w", s.transactionSigner.GetAddr().Hex(), err)
|
||||
}
|
||||
|
||||
// Get pending nonce from the client
|
||||
pendingNonce, err := s.client.PendingNonceAt(s.ctx, s.transactionSigner.GetAddr())
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get pending nonce for address %s, err: %w", s.transactionSigner.GetAddr().Hex(), err)
|
||||
}
|
||||
|
||||
// Take the maximum of pending nonce and (db nonce + 1)
|
||||
// Database stores the used nonce, so the next available nonce should be dbNonce + 1
|
||||
// When dbNonce is -1 (no records), dbNonce + 1 = 0, which is correct
|
||||
nextDbNonce := uint64(dbNonce + 1)
|
||||
var finalNonce uint64
|
||||
if pendingNonce > nextDbNonce {
|
||||
finalNonce = pendingNonce
|
||||
} else {
|
||||
finalNonce = nextDbNonce
|
||||
}
|
||||
|
||||
log.Info("nonce initialization", "address", s.transactionSigner.GetAddr().Hex(), "maxDbNonce", dbNonce, "nextDbNonce", nextDbNonce, "pendingNonce", pendingNonce, "finalNonce", finalNonce)
|
||||
|
||||
return finalNonce, nil
|
||||
}
|
||||
|
||||
// resetNonce reset nonce if send signed tx failed.
|
||||
func (s *Sender) resetNonce() error {
|
||||
nonce, err := s.initializeNonce()
|
||||
if err != nil {
|
||||
log.Error("failed to reset nonce", "address", s.transactionSigner.GetAddr().String(), "err", err)
|
||||
return fmt.Errorf("failed to reset nonce, err: %w", err)
|
||||
}
|
||||
log.Info("reset nonce", "address", s.transactionSigner.GetAddr().String(), "nonce", nonce)
|
||||
s.transactionSigner.SetNonce(nonce)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) {
|
||||
@@ -612,6 +646,16 @@ func (s *Sender) checkPendingTransaction() {
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil {
|
||||
if strings.Contains(err.Error(), "nonce too low") {
|
||||
// When we receive a 'nonce too low' error but cannot find the transaction receipt, it indicates another transaction with this nonce has already been processed, so this transaction will never be mined and should be marked as failed.
|
||||
log.Warn("nonce too low detected, marking all non-confirmed transactions with same nonce as failed", "nonce", originalTx.Nonce(), "address", s.transactionSigner.GetAddr().Hex(), "txHash", originalTx.Hash().Hex(), "newTxHash", newSignedTx.Hash().Hex(), "err", err)
|
||||
txHashes := []string{originalTx.Hash().Hex(), newSignedTx.Hash().Hex()}
|
||||
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHashes(s.ctx, txHashes, types.TxStatusConfirmedFailed); updateErr != nil {
|
||||
log.Error("failed to update transaction status", "hashes", txHashes, "err", updateErr)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
// SendTransaction failed, need to rollback the previous database changes
|
||||
if rollbackErr := s.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Restore original transaction status back to pending
|
||||
|
||||
@@ -32,6 +32,7 @@ type BatchProposer struct {
|
||||
cfg *config.BatchProposerConfig
|
||||
|
||||
replayMode bool
|
||||
validiumMode bool
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
@@ -53,7 +54,7 @@ type BatchProposer struct {
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, validiumMode bool, reg prometheus.Registerer) *BatchProposer {
|
||||
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
|
||||
|
||||
p := &BatchProposer{
|
||||
@@ -63,7 +64,8 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
cfg: cfg,
|
||||
replayMode: false,
|
||||
replayMode: false, // default is false, set to true when using proposer tool
|
||||
validiumMode: validiumMode,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -171,7 +173,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
|
||||
// recalculate batch metrics after truncation
|
||||
var calcErr error
|
||||
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion)
|
||||
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion, p.validiumMode)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr)
|
||||
}
|
||||
@@ -287,7 +289,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
|
||||
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
|
||||
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
@@ -312,7 +314,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
|
||||
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
|
||||
|
||||
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", err)
|
||||
}
|
||||
@@ -322,7 +324,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
}
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
}, db, false /* rollup mode */, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
@@ -178,7 +178,7 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bp.TryProposeBatch()
|
||||
@@ -246,7 +246,7 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
|
||||
MaxChunksPerBatch: 45,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
@@ -335,7 +335,7 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
||||
MaxChunksPerBatch: math.MaxInt32, // No chunk count limit
|
||||
BatchTimeoutSec: math.MaxUint32, // No timeout limit
|
||||
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
|
||||
}, encoding.CodecV8, chainConfig, db, nil)
|
||||
}, encoding.CodecV8, chainConfig, db, false /* rollup mode */, nil)
|
||||
|
||||
bp.TryProposeBatch()
|
||||
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
@@ -97,7 +98,7 @@ func (p *BundleProposer) TryProposeBundle() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BundleProposer) updateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error {
|
||||
func (p *BundleProposer) UpdateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error {
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -187,7 +188,7 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
|
||||
p.bundleFirstBlockTimeoutReached.Inc()
|
||||
p.bundleBatchesNum.Set(float64(len(batches)))
|
||||
return p.updateDBBundleInfo(batches, codecVersion)
|
||||
return p.UpdateDBBundleInfo(batches, codecVersion)
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
@@ -201,7 +202,7 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
|
||||
p.bundleFirstBlockTimeoutReached.Inc()
|
||||
p.bundleBatchesNum.Set(float64(len(batches)))
|
||||
return p.updateDBBundleInfo(batches, codecVersion)
|
||||
return p.UpdateDBBundleInfo(batches, codecVersion)
|
||||
}
|
||||
|
||||
log.Debug("pending batches are not enough and do not contain a timeout batch")
|
||||
|
||||
@@ -103,7 +103,7 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: 0,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
|
||||
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
bap.TryProposeBatch() // batch1 contains chunk1
|
||||
|
||||
@@ -142,7 +142,7 @@ func (p *ChunkProposer) SetReplayDB(replayDB *gorm.DB) {
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
p.chunkProposerCircleTotal.Inc()
|
||||
if err := p.proposeChunk(); err != nil {
|
||||
if err := p.ProposeChunk(); err != nil {
|
||||
p.proposeChunkFailureTotal.Inc()
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
@@ -225,7 +225,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() error {
|
||||
func (p *ChunkProposer) ProposeChunk() error {
|
||||
// unchunkedBlockHeight >= 1, assuming genesis batch with chunk 0, block 0 is committed.
|
||||
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
|
||||
if err != nil {
|
||||
@@ -268,13 +268,9 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
return fmt.Errorf("failed to get parent chunk: %w", err)
|
||||
}
|
||||
|
||||
// Currently rollup-relayer only supports >= v7 codec version, it checks the minimum codec version after start.
|
||||
// In EuclidV2 transition, empty PostL1MessageQueueHash will be naturally initialized to the first chunk's PrevL1MessageQueueHash.
|
||||
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
|
||||
|
||||
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
|
||||
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
|
||||
chunk.PrevL1MessageQueueHash = common.Hash{}
|
||||
}
|
||||
|
||||
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
|
||||
|
||||
var previousPostL1MessageQueueHash common.Hash
|
||||
|
||||
@@ -59,12 +59,12 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
const blocksFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) error {
|
||||
w.metrics.fetchRunningMissingBlocksTotal.Inc()
|
||||
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
|
||||
return
|
||||
return fmt.Errorf("failed to GetL2BlocksLatestHeight: %w", err)
|
||||
}
|
||||
|
||||
// Fetch and store block traces for missing blocks
|
||||
@@ -75,22 +75,24 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
to = blockHeight
|
||||
}
|
||||
|
||||
if err = w.getAndStoreBlocks(w.ctx, from, to); err != nil {
|
||||
if err = w.GetAndStoreBlocks(w.ctx, from, to); err != nil {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
return fmt.Errorf("fail to getAndStoreBlockTraces: %w", err)
|
||||
}
|
||||
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
|
||||
w.metrics.rollupL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64) error {
|
||||
func (w *L2WatcherClient) GetAndStoreBlocks(ctx context.Context, from, to uint64) error {
|
||||
var blocks []*encoding.Block
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err := w.GetBlockByNumberOrHash(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(number)))
|
||||
block, err := w.BlockByNumber(ctx, new(big.Int).SetUint64(number))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to GetBlockByNumberOrHash: %v. number: %v", err, number)
|
||||
return fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
@@ -125,7 +125,7 @@ func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config
|
||||
|
||||
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
|
||||
chunkProposer.SetReplayDB(dbForReplay)
|
||||
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
|
||||
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, false /* rollup mode */, nil)
|
||||
batchProposer.SetReplayDB(dbForReplay)
|
||||
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)
|
||||
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -285,7 +288,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
|
||||
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion, metrics.ValidiumMode)
|
||||
if err != nil {
|
||||
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
@@ -338,6 +341,37 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
func (o *Batch) InsertPermissionlessBatch(ctx context.Context, batchIndex *big.Int, batchHash common.Hash, codecVersion encoding.CodecVersion, chunk *Chunk) (*Batch, error) {
|
||||
now := time.Now()
|
||||
newBatch := &Batch{
|
||||
Index: batchIndex.Uint64(),
|
||||
Hash: batchHash.Hex(),
|
||||
StartChunkIndex: chunk.Index,
|
||||
StartChunkHash: chunk.Hash,
|
||||
EndChunkIndex: chunk.Index,
|
||||
EndChunkHash: chunk.Hash,
|
||||
StateRoot: chunk.StateRoot,
|
||||
PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash,
|
||||
PostL1MessageQueueHash: chunk.PostL1MessageQueueHash,
|
||||
BatchHeader: []byte{1, 2, 3},
|
||||
CodecVersion: int16(codecVersion),
|
||||
EnableCompress: false,
|
||||
ProvingStatus: int16(types.ProvingTaskVerified),
|
||||
ProvedAt: &now,
|
||||
RollupStatus: int16(types.RollupFinalized),
|
||||
FinalizedAt: &now,
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
|
||||
if err := db.Create(newBatch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.InsertPermissionlessBatch error: %w", err)
|
||||
}
|
||||
|
||||
return newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a batch.
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
@@ -366,6 +400,29 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Batch) UpdateRollupStatusCommitAndFinalizeTxHash(ctx context.Context, hash string, status types.RollupStatus, commitTxHash string, finalizeTxHash string, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["commit_tx_hash"] = commitTxHash
|
||||
updateFields["committed_at"] = utils.NowUTC()
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
|
||||
updateFields["rollup_status"] = int(status)
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateRollupStatusCommitAndFinalizeTxHash error: %w, batch hash: %v, status: %v, commitTxHash: %v, finalizeTxHash: %v", err, hash, status.String(), commitTxHash, finalizeTxHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRollupStatus updates the rollup status of a batch.
|
||||
func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
|
||||
@@ -115,7 +115,8 @@ func (o *BlobUpload) InsertOrUpdateBlobUpload(ctx context.Context, batchIndex ui
|
||||
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload query error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
|
||||
}
|
||||
|
||||
if err := db.Model(&existing).Update("status", int16(status)).Error; err != nil {
|
||||
if err := db.Model(&existing).Where("batch_index = ? AND batch_hash = ? AND platform = ? AND deleted_at IS NULL",
|
||||
batchIndex, batchHash, int16(platform)).Update("status", int16(status)).Error; err != nil {
|
||||
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload update error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
|
||||
}
|
||||
|
||||
|
||||
@@ -59,8 +59,8 @@ func (*Bundle) TableName() string {
|
||||
return "bundle"
|
||||
}
|
||||
|
||||
// getLatestBundle retrieves the latest bundle from the database.
|
||||
func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
|
||||
// GetLatestBundle retrieves the latest bundle from the database.
|
||||
func (o *Bundle) GetLatestBundle(ctx context.Context) (*Bundle, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Order("index desc")
|
||||
@@ -70,7 +70,7 @@ func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("getLatestBundle error: %w", err)
|
||||
return nil, fmt.Errorf("GetLatestBundle error: %w", err)
|
||||
}
|
||||
return &latestBundle, nil
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func (o *Bundle) GetBundles(ctx context.Context, fields map[string]interface{},
|
||||
// GetFirstUnbundledBatchIndex retrieves the first unbundled batch index.
|
||||
func (o *Bundle) GetFirstUnbundledBatchIndex(ctx context.Context) (uint64, error) {
|
||||
// Get the latest bundle
|
||||
latestBundle, err := o.getLatestBundle(ctx)
|
||||
latestBundle, err := o.GetLatestBundle(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Bundle.GetFirstUnbundledBatchIndex error: %w", err)
|
||||
}
|
||||
@@ -237,14 +237,18 @@ func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status ty
|
||||
|
||||
// UpdateRollupStatus updates the rollup status for a bundle.
|
||||
// only used in unit tests.
|
||||
func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error {
|
||||
func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
|
||||
@@ -7,9 +7,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
@@ -275,6 +278,48 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
return &newChunk, nil
|
||||
}
|
||||
|
||||
func (o *Chunk) InsertPermissionlessChunk(ctx context.Context, index uint64, codecVersion encoding.CodecVersion, daBlobPayload encoding.DABlobPayload, totalL1MessagePoppedBefore uint64, stateRoot common.Hash) (*Chunk, error) {
|
||||
// Create some unique identifier. It is not really used for anything except in DB.
|
||||
var chunkBytes []byte
|
||||
for _, block := range daBlobPayload.Blocks() {
|
||||
blockBytes := block.Encode()
|
||||
chunkBytes = append(chunkBytes, blockBytes...)
|
||||
}
|
||||
hash := crypto.Keccak256Hash(chunkBytes)
|
||||
|
||||
numBlocks := len(daBlobPayload.Blocks())
|
||||
emptyHash := common.Hash{}.Hex()
|
||||
newChunk := &Chunk{
|
||||
Index: index,
|
||||
Hash: hash.Hex(),
|
||||
StartBlockNumber: daBlobPayload.Blocks()[0].Number(),
|
||||
StartBlockHash: emptyHash,
|
||||
EndBlockNumber: daBlobPayload.Blocks()[numBlocks-1].Number(),
|
||||
EndBlockHash: emptyHash,
|
||||
StartBlockTime: daBlobPayload.Blocks()[0].Timestamp(),
|
||||
TotalL1MessagesPoppedInChunk: 0, // this needs to be 0 so that the calculation of the total L1 messages popped before for the next chunk is correct
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
PrevL1MessageQueueHash: daBlobPayload.PrevL1MessageQueueHash().Hex(),
|
||||
PostL1MessageQueueHash: daBlobPayload.PostL1MessageQueueHash().Hex(),
|
||||
ParentChunkHash: emptyHash,
|
||||
StateRoot: stateRoot.Hex(),
|
||||
ParentChunkStateRoot: emptyHash,
|
||||
WithdrawRoot: emptyHash,
|
||||
CodecVersion: int16(codecVersion),
|
||||
EnableCompress: false,
|
||||
ProvingStatus: int16(types.ProvingTaskVerified),
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
|
||||
if err := db.Create(newChunk).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk. InsertPermissionlessChunk error: %w, chunk hash: %v", err, newChunk.Hash)
|
||||
}
|
||||
|
||||
return newChunk, nil
|
||||
}
|
||||
|
||||
// InsertTestChunkForProposerTool inserts a new chunk into the database only for analysis usage by proposer tool.
|
||||
func (o *Chunk) InsertTestChunkForProposerTool(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, totalL1MessagePoppedBefore uint64, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
|
||||
@@ -597,3 +597,61 @@ func TestPendingTransactionOrm(t *testing.T) {
|
||||
err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), common.HexToHash("0x123"))
|
||||
assert.Error(t, err) // Should return error for non-existent transaction
|
||||
}
|
||||
|
||||
func TestPendingTransaction_GetMaxNonceBySenderAddress(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
// When there are no transactions for this sender address, should return -1
|
||||
maxNonce, err := pendingTransactionOrm.GetMaxNonceBySenderAddress(context.Background(), "0xdeadbeef")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(-1), maxNonce)
|
||||
|
||||
// Insert two transactions with different nonces for the same sender address
|
||||
senderMeta := &SenderMeta{
|
||||
Name: "testName",
|
||||
Service: "testService",
|
||||
Address: common.HexToAddress("0xdeadbeef"),
|
||||
Type: types.SenderTypeCommitBatch,
|
||||
}
|
||||
|
||||
tx0 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
|
||||
Nonce: 1,
|
||||
To: &common.Address{},
|
||||
Data: []byte{},
|
||||
Gas: 21000,
|
||||
AccessList: gethTypes.AccessList{},
|
||||
Value: big.NewInt(0),
|
||||
ChainID: big.NewInt(1),
|
||||
GasTipCap: big.NewInt(0),
|
||||
GasFeeCap: big.NewInt(1),
|
||||
V: big.NewInt(0),
|
||||
R: big.NewInt(0),
|
||||
S: big.NewInt(0),
|
||||
})
|
||||
tx1 := gethTypes.NewTx(&gethTypes.DynamicFeeTx{
|
||||
Nonce: 3,
|
||||
To: &common.Address{},
|
||||
Data: []byte{},
|
||||
Gas: 22000,
|
||||
AccessList: gethTypes.AccessList{},
|
||||
Value: big.NewInt(0),
|
||||
ChainID: big.NewInt(1),
|
||||
GasTipCap: big.NewInt(1),
|
||||
GasFeeCap: big.NewInt(2),
|
||||
V: big.NewInt(0),
|
||||
R: big.NewInt(0),
|
||||
S: big.NewInt(0),
|
||||
})
|
||||
|
||||
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx0, 0)
|
||||
assert.NoError(t, err)
|
||||
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Now the max nonce for this sender should be 3
|
||||
maxNonce, err = pendingTransactionOrm.GetMaxNonceBySenderAddress(context.Background(), senderMeta.Address.String())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(3), maxNonce)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package orm
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -191,6 +192,25 @@ func (o *PendingTransaction) UpdateTransactionStatusByTxHash(ctx context.Context
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTransactionStatusByTxHashes updates the status of multiple transactions by their hashes in one SQL statement
|
||||
func (o *PendingTransaction) UpdateTransactionStatusByTxHashes(ctx context.Context, txHashes []string, status types.TxStatus, dbTX ...*gorm.DB) error {
|
||||
if len(txHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("hash IN ?", txHashes)
|
||||
if err := db.Update("status", status).Error; err != nil {
|
||||
return fmt.Errorf("failed to update transaction status for hashes %v to status %d: %w", txHashes, status, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateOtherTransactionsAsFailedByNonce updates the status of all transactions to TxStatusConfirmedFailed for a specific nonce and sender address, excluding a specified transaction hash.
|
||||
func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.Context, senderAddress string, nonce uint64, hash common.Hash, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
@@ -207,3 +227,27 @@ func (o *PendingTransaction) UpdateOtherTransactionsAsFailedByNonce(ctx context.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMaxNonceBySenderAddress retrieves the maximum nonce for a specific sender address.
|
||||
// Returns -1 if no transactions are found for the given address.
|
||||
func (o *PendingTransaction) GetMaxNonceBySenderAddress(ctx context.Context, senderAddress string) (int64, error) {
|
||||
var result struct {
|
||||
Nonce int64 `gorm:"column:nonce"`
|
||||
}
|
||||
|
||||
err := o.db.WithContext(ctx).
|
||||
Model(&PendingTransaction{}).
|
||||
Select("nonce").
|
||||
Where("sender_address = ?", senderAddress).
|
||||
Order("nonce DESC").
|
||||
First(&result).Error
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return -1, nil
|
||||
}
|
||||
return -1, fmt.Errorf("failed to get max nonce by sender address, address: %s, err: %w", senderAddress, err)
|
||||
}
|
||||
|
||||
return result.Nonce, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// ChunkMetrics indicates the metrics for proposing a chunk.
|
||||
@@ -60,15 +62,18 @@ type BatchMetrics struct {
|
||||
L1CommitBlobSize uint64
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
ValidiumMode bool // default false: rollup mode
|
||||
|
||||
// timing metrics
|
||||
EstimateBlobSizeTime time.Duration
|
||||
}
|
||||
|
||||
// CalculateBatchMetrics calculates batch metrics.
|
||||
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
|
||||
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetrics, error) {
|
||||
metrics := &BatchMetrics{
|
||||
NumChunks: uint64(len(batch.Chunks)),
|
||||
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
|
||||
ValidiumMode: validiumMode,
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
@@ -119,8 +124,59 @@ type BatchMetadata struct {
|
||||
ChallengeDigest common.Hash
|
||||
}
|
||||
|
||||
// encodeBatchHeaderValidium encodes batch header for validium mode and returns both encoded bytes and hash
|
||||
func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVersion) ([]byte, common.Hash, error) {
|
||||
if b == nil {
|
||||
return nil, common.Hash{}, fmt.Errorf("batch is nil, version: %v, index: %v", codecVersion, b.Index)
|
||||
}
|
||||
|
||||
if len(b.Blocks) == 0 {
|
||||
return nil, common.Hash{}, fmt.Errorf("batch contains no blocks, version: %v, index: %v", codecVersion, b.Index)
|
||||
}
|
||||
|
||||
// For validium mode, use the last block hash as commitment to the off-chain data
|
||||
// TODO: This is a temporary solution, we might use a larger commitment in the future
|
||||
lastBlock := b.Blocks[len(b.Blocks)-1]
|
||||
commitment := lastBlock.Header.Hash()
|
||||
|
||||
// Batch header field sizes
|
||||
const (
|
||||
versionSize = 1
|
||||
indexSize = 8
|
||||
parentHashSize = 32
|
||||
stateRootSize = 32
|
||||
withdrawRootSize = 32
|
||||
commitmentSize = 32 // TODO: 32 bytes for now, might use larger commitment in the future
|
||||
|
||||
// Total size of validium batch header
|
||||
validiumBatchHeaderSize = versionSize + indexSize + parentHashSize + stateRootSize + withdrawRootSize + commitmentSize
|
||||
)
|
||||
|
||||
batchBytes := make([]byte, validiumBatchHeaderSize)
|
||||
|
||||
// Define offsets for each field
|
||||
var (
|
||||
versionOffset = 0
|
||||
indexOffset = versionOffset + versionSize
|
||||
parentHashOffset = indexOffset + indexSize
|
||||
stateRootOffset = parentHashOffset + parentHashSize
|
||||
withdrawRootOffset = stateRootOffset + stateRootSize
|
||||
commitmentOffset = withdrawRootOffset + withdrawRootSize
|
||||
)
|
||||
|
||||
batchBytes[versionOffset] = uint8(codecVersion) // version
|
||||
binary.BigEndian.PutUint64(batchBytes[indexOffset:indexOffset+indexSize], b.Index) // batch index
|
||||
copy(batchBytes[parentHashOffset:parentHashOffset+parentHashSize], b.ParentBatchHash[0:parentHashSize]) // parentBatchHash
|
||||
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], b.StateRoot().Bytes()[0:stateRootSize]) // postStateRoot
|
||||
copy(batchBytes[withdrawRootOffset:withdrawRootOffset+withdrawRootSize], b.WithdrawRoot().Bytes()[0:withdrawRootSize]) // postWithdrawRoot
|
||||
copy(batchBytes[commitmentOffset:commitmentOffset+commitmentSize], commitment[0:commitmentSize]) // data commitment
|
||||
|
||||
hash := crypto.Keccak256Hash(batchBytes)
|
||||
return batchBytes, hash, nil
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
|
||||
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetadata, error) {
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
@@ -139,9 +195,17 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
ChallengeDigest: daBatch.ChallengeDigest(),
|
||||
}
|
||||
|
||||
// If this function is used in Validium, we encode the batch header differently.
|
||||
if validiumMode {
|
||||
batchMeta.BatchBytes, batchMeta.BatchHash, err = encodeBatchHeaderValidium(batch, codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode batch header for validium, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob data proof, version: %v, err: %w", codecVersion, err)
|
||||
return nil, fmt.Errorf("failed to get blob data proof, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
|
||||
}
|
||||
|
||||
numChunks := len(batch.Chunks)
|
||||
|
||||
@@ -128,7 +128,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 2,
|
||||
|
||||
4
zkvm-prover/.work/.gitignore
vendored
4
zkvm-prover/.work/.gitignore
vendored
@@ -1,7 +1,9 @@
|
||||
*.vmexe
|
||||
openvm.toml
|
||||
*.bin
|
||||
*.sol
|
||||
cache
|
||||
db
|
||||
*.json
|
||||
?
|
||||
?
|
||||
root-verifier*
|
||||
@@ -1,35 +0,0 @@
|
||||
[app_fri_params.fri_params]
|
||||
log_blowup = 1
|
||||
log_final_poly_len = 0
|
||||
num_queries = 100
|
||||
proof_of_work_bits = 16
|
||||
|
||||
[app_vm_config.rv32i]
|
||||
|
||||
[app_vm_config.rv32m]
|
||||
|
||||
[app_vm_config.io]
|
||||
|
||||
[app_vm_config.keccak]
|
||||
|
||||
[app_vm_config.castf]
|
||||
|
||||
[app_vm_config.modular]
|
||||
supported_moduli = [
|
||||
"4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787",
|
||||
"52435875175126190479447740508185965837690552500527637822603658699938581184513",
|
||||
]
|
||||
[app_vm_config.native]
|
||||
[app_vm_config.pairing]
|
||||
supported_curves = ["Bls12_381"]
|
||||
[app_vm_config.sha256]
|
||||
[app_vm_config.fp2]
|
||||
supported_moduli = [
|
||||
["Bls12_381Fp2","4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787"]
|
||||
]
|
||||
[[app_vm_config.ecc.supported_curves]]
|
||||
struct_name = "Bls12_381G1Affine"
|
||||
modulus = "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787"
|
||||
scalar = "52435875175126190479447740508185965837690552500527637822603658699938581184513"
|
||||
a = "0"
|
||||
b = "4"
|
||||
@@ -1,17 +0,0 @@
|
||||
[app_fri_params.fri_params]
|
||||
log_blowup = 1
|
||||
log_final_poly_len = 0
|
||||
num_queries = 100
|
||||
proof_of_work_bits = 16
|
||||
|
||||
[app_vm_config.rv32i]
|
||||
|
||||
[app_vm_config.rv32m]
|
||||
|
||||
[app_vm_config.io]
|
||||
|
||||
[app_vm_config.keccak]
|
||||
|
||||
[app_vm_config.castf]
|
||||
|
||||
[app_vm_config.native]
|
||||
@@ -1,58 +0,0 @@
|
||||
[app_fri_params.fri_params]
|
||||
log_blowup = 1
|
||||
log_final_poly_len = 0
|
||||
num_queries = 100
|
||||
proof_of_work_bits = 16
|
||||
|
||||
[app_vm_config.rv32i]
|
||||
|
||||
[app_vm_config.io]
|
||||
|
||||
[app_vm_config.keccak]
|
||||
|
||||
[app_vm_config.rv32m]
|
||||
range_tuple_checker_sizes = [256, 8192]
|
||||
|
||||
[app_vm_config.bigint]
|
||||
range_tuple_checker_sizes = [256, 8192]
|
||||
|
||||
[app_vm_config.modular]
|
||||
supported_moduli = [
|
||||
"21888242871839275222246405745257275088696311157297823662689037894645226208583",
|
||||
"21888242871839275222246405745257275088548364400416034343698204186575808495617",
|
||||
"115792089237316195423570985008687907853269984665640564039457584007908834671663",
|
||||
"115792089237316195423570985008687907852837564279074904382605163141518161494337",
|
||||
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
|
||||
"115792089210356248762697446949407573529996955224135760342422259061068512044369"
|
||||
]
|
||||
|
||||
[app_vm_config.fp2]
|
||||
supported_moduli = [
|
||||
["Bn254Fp2","21888242871839275222246405745257275088696311157297823662689037894645226208583"]
|
||||
]
|
||||
|
||||
[app_vm_config.pairing]
|
||||
supported_curves = ["Bn254"]
|
||||
|
||||
[app_vm_config.sha256]
|
||||
|
||||
[[app_vm_config.ecc.supported_curves]]
|
||||
struct_name = "Secp256k1Point"
|
||||
modulus = "115792089237316195423570985008687907853269984665640564039457584007908834671663"
|
||||
scalar = "115792089237316195423570985008687907852837564279074904382605163141518161494337"
|
||||
a = "0"
|
||||
b = "7"
|
||||
|
||||
[[app_vm_config.ecc.supported_curves]]
|
||||
struct_name = "P256Point"
|
||||
modulus = "115792089210356248762697446949407573530086143415290314195533631308867097853951"
|
||||
scalar = "115792089210356248762697446949407573529996955224135760342422259061068512044369"
|
||||
a = "115792089210356248762697446949407573530086143415290314195533631308867097853948"
|
||||
b = "41058363725152142129326129780047268409114441015993725554835256314039467401291"
|
||||
|
||||
[[app_vm_config.ecc.supported_curves]]
|
||||
struct_name = "Bn254G1Affine"
|
||||
modulus = "21888242871839275222246405745257275088696311157297823662689037894645226208583"
|
||||
scalar = "21888242871839275222246405745257275088548364400416034343698204186575808495617"
|
||||
a = "0"
|
||||
b = "3"
|
||||
@@ -3,7 +3,7 @@
|
||||
RUST_MIN_STACK ?= 16777216
|
||||
export RUST_MIN_STACK
|
||||
|
||||
CIRCUIT_STUFF = .work/chunk/app.vmexe .work/batch/app.vmexe .work/bundle/app.vmexe
|
||||
CIRCUIT_STUFF = .work/euclid/chunk/app.vmexe .work/feynman/chunk/app.vmexe
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
@@ -21,7 +21,7 @@ endif
|
||||
ZKVM_COMMIT=$(shell echo ${ZKVM_VERSION} | cut -d " " -f2)
|
||||
$(info ZKVM_COMMIT is ${ZKVM_COMMIT})
|
||||
|
||||
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
|
||||
$(info PLONKY3_VERSION is ${PLONKY3_VERSION})
|
||||
|
||||
GIT_REV=$(shell git rev-parse --short HEAD)
|
||||
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
|
||||
@@ -32,16 +32,17 @@ else
|
||||
$(info GO_TAG is ${GO_TAG})
|
||||
endif
|
||||
|
||||
ifeq (${PLONKY3_GPU_VERSION},)
|
||||
# use plonky3 with CPU
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
|
||||
else
|
||||
# use halo2_gpu
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
|
||||
endif
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
|
||||
|
||||
E2E_HANDLE_SET = ../tests/prover-e2e/testset.json
|
||||
DUMP_DIR = .work
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cd ../crates/prover-bin && cargo build --release
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override build
|
||||
|
||||
prover_cpu:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
|
||||
|
||||
|
||||
tests_binary:
|
||||
cargo clean && cargo test --release --no-run
|
||||
@@ -53,7 +54,15 @@ lint:
|
||||
cargo fmt --all
|
||||
|
||||
$(CIRCUIT_STUFF):
|
||||
bash .work/download-release.sh
|
||||
@echo "Download stuff with download-release.sh, and put them into correct directory";
|
||||
@exit 1;
|
||||
|
||||
test_run: $(CIRCUIT_STUFF)
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
|
||||
|
||||
test_e2e_run: $(CIRCUIT_STUFF) ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
|
||||
gen_verifier_stuff:
|
||||
mkdir -p ${DUMP_DIR}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json --forkname feynman dump ${DUMP_DIR}
|
||||
|
||||
@@ -1,20 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Define version mapping
|
||||
declare -A VERSION_MAP
|
||||
VERSION_MAP["euclid"]="0.4.3"
|
||||
VERSION_MAP["feynman"]="0.5.0rc1"
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
SCROLL_ZKVM_VERSION=$($SHELL ./print_high_zkvm_version.sh | cut -d' ' -f1|cut -c2-)
|
||||
|
||||
# Check if first argument is provided and matches a known version name
|
||||
if [ -n "$1" ] && [ -n "${VERSION_MAP[$1]}" ]; then
|
||||
SCROLL_ZKVM_VERSION=${VERSION_MAP[$1]}
|
||||
echo "Setting SCROLL_ZKVM_VERSION to ${SCROLL_ZKVM_VERSION} based on '$1' argument"
|
||||
else
|
||||
# Default version if no argument or not recognized
|
||||
SCROLL_ZKVM_VERSION=0.5.0rc0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo $SCROLL_ZKVM_VERSION
|
||||
|
||||
mkdir -p .work/chunk
|
||||
mkdir -p .work/batch
|
||||
mkdir -p .work/bundle
|
||||
|
||||
# chunk-circuit exe
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/app.vmexe -O .work/chunk/app.vmexe
|
||||
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/openvm.toml -O .work/chunk/openvm.toml
|
||||
# batch-circuit exe
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/app.vmexe -O .work/batch/app.vmexe
|
||||
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/openvm.toml -O .work/batch/openvm.toml
|
||||
# bundle-circuit exe
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app.vmexe -O .work/bundle/app.vmexe
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/openvm.toml -O .work/bundle/openvm.toml
|
||||
|
||||
# bundle-circuit exe, legacy version, may not exist
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app_euclidv1.vmexe -O .work/bundle/app_euclidv1.vmexe || echo "legacy app not exist for $SCROLL_ZKVM_VERSION"
|
||||
|
||||
14
zkvm-prover/release-verifier-stuff.sh
Normal file
14
zkvm-prover/release-verifier-stuff.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
# release version
|
||||
SCROLL_ZKVM_STUFFDIR ?= `realpath .work`
|
||||
SCROLL_ZKVM_VERSION ?= 0.5.0rc1
|
||||
DIR_OUTPUT="releases/${SCROLL_ZKVM_VERSION}/verifier"
|
||||
|
||||
STUFF_FILES=('root-verifier-committed-exe' 'root-verifier-vm-config' 'verifier.bin' 'openVmVk.json')
|
||||
|
||||
for stuff_file in "${STUFF_FILES[@]}"; do
|
||||
SRC="${SCROLL_ZKVM_STUFFDIR}/${stuff_file}"
|
||||
TARGET="${DIR_OUTPUT}/${stuff_file}"
|
||||
aws --profile default s3 cp $SRC s3://circuit-release/scroll-zkvm/$TARGET
|
||||
done
|
||||
Reference in New Issue
Block a user