Compare commits

..

2 Commits

Author SHA1 Message Date
georgehao
dd857cf80f update plonkey commit id 2025-07-04 14:08:03 +08:00
georgehao
b3f58d7b96 fix coordiantor bug 2025-07-04 10:23:51 +08:00
34 changed files with 417 additions and 357 deletions

View File

@@ -10,8 +10,7 @@ env:
jobs:
gas_oracle:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -56,8 +55,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup_relayer:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -102,8 +100,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
blob_uploader:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -148,8 +145,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -194,8 +190,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-fetcher:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -240,8 +235,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-api:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -286,8 +280,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -332,8 +325,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -360,6 +352,48 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for repositories and clone them
run: |
mkdir -p ~/.ssh
chmod 700 ~/.ssh
# Setup for plonky3-gpu
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
chmod 600 ~/.ssh/plonky3_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
echo "Loaded plonky3-gpu key"
# Clone plonky3-gpu repository
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
# Setup for openvm-stark-gpu
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
chmod 600 ~/.ssh/openvm_stark_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
echo "Loaded openvm-stark-gpu key"
# Clone openvm-stark-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
# Setup for openvm-gpu
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
chmod 600 ~/.ssh/openvm_gpu_key
eval "$(ssh-agent -s)" > /dev/null
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
echo "Loaded openvm-gpu key"
# Clone openvm-gpu repository
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
# Show number of loaded keys
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
- name: Checkout specific commits
run: |
./build/dockerfiles/coordinator-api/checkout_all.sh
- name: Build and push
uses: docker/build-push-action@v3
env:
@@ -377,8 +411,7 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-cron:
runs-on:
group: scroll-reth-runner-group
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4

14
Cargo.lock generated
View File

@@ -8174,7 +8174,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-prover"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"alloy-primitives",
"base64 0.22.1",
@@ -8212,7 +8212,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-types"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"base64 0.22.1",
"bincode",
@@ -8232,7 +8232,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-types-base"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"alloy-primitives",
"alloy-serde 1.0.16",
@@ -8247,7 +8247,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-types-batch"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"alloy-primitives",
"halo2curves-axiom",
@@ -8267,7 +8267,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-types-bundle"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"alloy-primitives",
"itertools 0.14.0",
@@ -8280,7 +8280,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-types-chunk"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"alloy-primitives",
"itertools 0.14.0",
@@ -8300,7 +8300,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-verifier"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=0dd7b19#0dd7b19590b1b17c5ec5a97fc1b4f638dfd0ce8f"
source = "git+https://github.com/scroll-tech/zkvm-prover?rev=2962428#29624289a052cbcf0e1c7f17e277034f6f5ad432"
dependencies = [
"bincode",
"eyre",

View File

@@ -17,9 +17,9 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "0dd7b19", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "0dd7b19", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "0dd7b19" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2962428" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade" }

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.22.12-rust-nightly-2025-02-14 as chef
WORKDIR app
FROM chef as planner
@@ -11,15 +11,21 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run scripts to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY .git .git
RUN cargo build --release -p libzkp-c
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
@@ -39,7 +45,7 @@ RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_a
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353

View File

@@ -0,0 +1,17 @@
#!/bin/bash
set -uex
PLONKY3_GPU_COMMIT=450ec18 # feynman
OPENVM_STARK_GPU_COMMIT=e3b2d6 # branch: sync/upstream-250702
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout plonky3-gpu
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
# checkout openvm-stark-gpu
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
# checkout openvm-gpu
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-gpu if not exists
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch --all --force

View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone openvm-stark-gpu if not exists
if [ ! -d $DIR/openvm-stark-gpu ]; then
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
fi
cd $DIR/openvm-stark-gpu && git fetch --all --force

View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -uex
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# clone plonky3-gpu if not exists
if [ ! -d $DIR/plonky3-gpu ]; then
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
fi
cd $DIR/plonky3-gpu && git fetch --all --force

View File

@@ -0,0 +1,92 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
# stark-backend
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
# plonky3
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
# gpu crates
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
p3-air = { path = "/plonky3-gpu/air" }
p3-field = { path = "/plonky3-gpu/field" }
p3-commit = { path = "/plonky3-gpu/commit" }
p3-matrix = { path = "/plonky3-gpu/matrix" }
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
p3-util = { path = "/plonky3-gpu/util" }
p3-challenger = { path = "/plonky3-gpu/challenger" }
p3-dft = { path = "/plonky3-gpu/dft" }
p3-fri = { path = "/plonky3-gpu/fri" }
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
p3-keccak = { path = "/plonky3-gpu/keccak" }
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
p3-blake3 = { path = "/plonky3-gpu/blake3" }
p3-mds = { path = "/plonky3-gpu/mds" }
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }

View File

@@ -0,0 +1,2 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.30"
var tag = "v4.5.26"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1,45 +0,0 @@
[patch."https://github.com/openvm-org/openvm.git"]
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "sync/upstream-250702", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "sync/upstream-250702", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], rev = "450ec18" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }

View File

@@ -0,0 +1,32 @@
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.0" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }

File diff suppressed because one or more lines are too long

View File

@@ -70,7 +70,7 @@ func action(ctx *cli.Context) error {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
go utils.Loop(subCtx, 1*time.Second, blobUploader.UploadBlobToS3)
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
// Finish start all blob-uploader functions.
log.Info("Start blob-uploader successfully", "version", version.Version)

View File

@@ -107,7 +107,7 @@ func action(ctx *cli.Context) error {
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)

View File

@@ -36,7 +36,6 @@
"endpoint": "https://rpc.scroll.io",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"validium_mode": false,
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
@@ -124,4 +123,4 @@
"maxOpenNum": 200,
"maxIdleNum": 20
}
}
}

View File

@@ -53,8 +53,6 @@ type ChainMonitor struct {
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
type RelayerConfig struct {
// ValidiumMode indicates if the relayer is in validium mode.
ValidiumMode bool `json:"validium_mode"`
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// GasPriceOracleContractAddress store the scroll messenger contract address.
@@ -75,6 +73,8 @@ type RelayerConfig struct {
// Indicates if bypass features specific to testing environments are enabled.
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
}

View File

@@ -25,6 +25,8 @@ type S3Uploader struct {
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
// load AWS config
var opts []func(*awsconfig.LoadOptions) error
opts = append(opts, awsconfig.WithRegion(cfg.Region))
// if AccessKey && SecretKey provided, use it
if cfg.AccessKey != "" && cfg.SecretKey != "" {
opts = append(opts, awsconfig.WithCredentialsProvider(
@@ -36,10 +38,6 @@ func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
)
}
if cfg.Region != "" {
opts = append(opts, awsconfig.WithRegion(cfg.Region))
}
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to load default config: %w", err)

View File

@@ -79,7 +79,6 @@ type Layer2Relayer struct {
commitSender *sender.Sender
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
validiumABI *abi.ABI
l2GasOracleABI *abi.ABI
@@ -173,7 +172,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
commitSender: commitSender,
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
validiumABI: bridgeAbi.ValidiumABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: strategy,
@@ -241,11 +239,10 @@ func (r *Layer2Relayer) initializeGenesis() error {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
Blocks: chunk.Blocks,
}
var dbBatch *orm.Batch
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{ValidiumMode: r.cfg.ValidiumMode}, dbTX)
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -277,23 +274,10 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
var calldata []byte
var packErr error
if r.cfg.ValidiumMode {
// validium mode: only pass batchHeader
calldata, packErr = r.validiumABI.Pack("importGenesisBatch", batchHeader)
if packErr != nil {
return fmt.Errorf("failed to pack validium importGenesisBatch with batch header: %v. error: %v", common.Bytes2Hex(batchHeader), packErr)
}
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
} else {
// rollup mode: pass batchHeader and stateRoot
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
}
log.Info("Rollup importGenesis", "calldata", common.Bytes2Hex(calldata), "stateRoot", stateRoot)
// encode "importGenesisBatch" transaction calldata
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
}
// submit genesis batch to L1 rollup contract
@@ -301,7 +285,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash, "validium", r.cfg.ValidiumMode)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -326,23 +310,20 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if !confirmation.IsSuccessful {
return errors.New("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String(), "validium", r.cfg.ValidiumMode)
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
return nil
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
// Pending batches are submitted if one of the following conditions is met:
// Pending batchess are submitted if one of the following conditions is met:
// - the first batch is too old -> forceSubmit
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
// - we have at least minBatches AND price hits a desired target price
func (r *Layer2Relayer) ProcessPendingBatches() {
// Get effective batch limits based on whether validium mode is enabled.
minBatches, maxBatches := r.getEffectiveBatchLimits()
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, maxBatches)
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -451,21 +432,21 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
break
}
if batchesToSubmitLen < maxBatches {
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
if len(batchesToSubmit) >= maxBatches {
if len(batchesToSubmit) >= r.cfg.BatchSubmission.MaxBatches {
break
}
}
// we only submit batches if we have a timeout or if we have enough batches to submit
if !forceSubmit && len(batchesToSubmit) < minBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", minBatches, "maxBatches", maxBatches)
if !forceSubmit && len(batchesToSubmit) < r.cfg.BatchSubmission.MinBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", r.cfg.BatchSubmission.MinBatches, "maxBatches", r.cfg.BatchSubmission.MaxBatches)
return
}
@@ -485,22 +466,10 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7, encoding.CodecV8:
if r.cfg.ValidiumMode {
if len(batchesToSubmit) != 1 {
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
return
}
calldata, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadValidium(batchesToSubmit[0])
if err != nil {
log.Error("failed to construct validium payload", "codecVersion", codecVersion, "index", batchesToSubmit[0].Batch.Index, "err", err)
return
}
} else {
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
default:
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
@@ -553,14 +522,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled.
func (r *Layer2Relayer) getEffectiveBatchLimits() (int, int) {
if r.cfg.ValidiumMode {
return 1, 1 // minBatches=1, maxBatches=1
}
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MaxBatches
}
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
for _, batch := range batches {
@@ -729,16 +690,9 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7, encoding.CodecV8:
if r.cfg.ValidiumMode {
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct validium finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
} else {
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct normal finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
}
default:
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
@@ -997,35 +951,6 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
// Calculate metrics
var maxBlockHeight uint64
var totalGasUsed uint64
for _, c := range batch.Chunks {
if c.EndBlockNumber > maxBlockHeight {
maxBlockHeight = c.EndBlockNumber
}
totalGasUsed += c.TotalL2TxGas
}
// Get the commitment from the batch data: for validium mode, we use the last L2 block hash as the commitment to the off-chain data
// Get the last chunk from the batch to find the end block hash
// TODO: This is a temporary solution, we might use a larger commitment in the future
if len(batch.Chunks) == 0 {
return nil, 0, 0, fmt.Errorf("last batch has no chunks")
}
lastChunk := batch.Chunks[len(batch.Chunks)-1]
commitment := common.HexToHash(lastChunk.EndBlockHash)
version := encoding.CodecVersion(batch.Batch.CodecVersion)
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
if err != nil {
return nil, 0, 0, fmt.Errorf("failed to pack commitBatch: %w", err)
}
log.Info("Validium commitBatch", "maxBlockHeight", maxBlockHeight, "commitment", commitment.Hex())
return calldata, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
@@ -1042,8 +967,7 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
log.Info("Packing finalizeBundlePostEuclidV2NoProof", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot)
fmt.Println("packing finalizeBundlePostEuclidV2NoProof", len(dbBatch.BatchHeader), dbBatch.CodecVersion, dbBatch.BatchHeader, new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk), common.HexToHash(dbBatch.StateRoot), common.HexToHash(dbBatch.WithdrawRoot))
// finalizeBundle without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2NoProof",
@@ -1058,26 +982,6 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadValidium(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
log.Info("Packing validium finalizeBundle", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot, "withProof", aggProof != nil)
var proof []byte
if aggProof != nil {
proof = aggProof.Proof()
}
calldata, packErr := r.validiumABI.Pack(
"finalizeBundle",
dbBatch.BatchHeader,
new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk),
proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack validium finalizeBundle: %w", packErr)
}
return calldata, nil
}
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {

View File

@@ -32,7 +32,6 @@ type BatchProposer struct {
cfg *config.BatchProposerConfig
replayMode bool
validiumMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -54,7 +53,7 @@ type BatchProposer struct {
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, validiumMode bool, reg prometheus.Registerer) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
p := &BatchProposer{
@@ -64,8 +63,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
cfg: cfg,
replayMode: false, // default is false, set to true when using proposer tool
validiumMode: validiumMode,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -173,7 +171,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
// recalculate batch metrics after truncation
var calcErr error
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion, p.validiumMode)
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr)
}
@@ -289,7 +287,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
@@ -314,7 +312,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
}
@@ -324,7 +322,7 @@ func (p *BatchProposer) proposeBatch() error {
}
}
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}

View File

@@ -100,7 +100,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, false /* rollup mode */, nil)
}, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -178,7 +178,7 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
}, encoding.CodecV7, chainConfig, db, nil)
for i := 0; i < 2; i++ {
bp.TryProposeBatch()
@@ -246,7 +246,7 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
}, encoding.CodecV7, chainConfig, db, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -335,7 +335,7 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32, // No chunk count limit
BatchTimeoutSec: math.MaxUint32, // No timeout limit
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
}, encoding.CodecV8, chainConfig, db, false /* rollup mode */, nil)
}, encoding.CodecV8, chainConfig, db, nil)
bp.TryProposeBatch()

View File

@@ -103,7 +103,7 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
}, encoding.CodecV7, chainConfig, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
bap.TryProposeBatch() // batch1 contains chunk1

View File

@@ -125,7 +125,7 @@ func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
chunkProposer.SetReplayDB(dbForReplay)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, false /* rollup mode */, nil)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
batchProposer.SetReplayDB(dbForReplay)
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)

View File

@@ -285,7 +285,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
startChunkIndex = parentBatch.EndChunkIndex + 1
}
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion, metrics.ValidiumMode)
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
if err != nil {
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)

View File

@@ -115,8 +115,7 @@ func (o *BlobUpload) InsertOrUpdateBlobUpload(ctx context.Context, batchIndex ui
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload query error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}
if err := db.Model(&existing).Where("batch_index = ? AND batch_hash = ? AND platform = ? AND deleted_at IS NULL",
batchIndex, batchHash, int16(platform)).Update("status", int16(status)).Error; err != nil {
if err := db.Model(&existing).Update("status", int16(status)).Error; err != nil {
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload update error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}

View File

@@ -1,13 +1,11 @@
package utils
import (
"encoding/binary"
"fmt"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// ChunkMetrics indicates the metrics for proposing a chunk.
@@ -62,18 +60,15 @@ type BatchMetrics struct {
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
ValidiumMode bool // default false: rollup mode
// timing metrics
EstimateBlobSizeTime time.Duration
}
// CalculateBatchMetrics calculates batch metrics.
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetrics, error) {
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
metrics := &BatchMetrics{
NumChunks: uint64(len(batch.Chunks)),
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
ValidiumMode: validiumMode,
}
codec, err := encoding.CodecFromVersion(codecVersion)
@@ -124,59 +119,8 @@ type BatchMetadata struct {
ChallengeDigest common.Hash
}
// encodeBatchHeaderValidium encodes batch header for validium mode and returns both encoded bytes and hash
func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVersion) ([]byte, common.Hash, error) {
if b == nil {
return nil, common.Hash{}, fmt.Errorf("batch is nil, version: %v, index: %v", codecVersion, b.Index)
}
if len(b.Blocks) == 0 {
return nil, common.Hash{}, fmt.Errorf("batch contains no blocks, version: %v, index: %v", codecVersion, b.Index)
}
// For validium mode, use the last block hash as commitment to the off-chain data
// TODO: This is a temporary solution, we might use a larger commitment in the future
lastBlock := b.Blocks[len(b.Blocks)-1]
commitment := lastBlock.Header.Hash()
// Batch header field sizes
const (
versionSize = 1
indexSize = 8
parentHashSize = 32
stateRootSize = 32
withdrawRootSize = 32
commitmentSize = 32 // TODO: 32 bytes for now, might use larger commitment in the future
// Total size of validium batch header
validiumBatchHeaderSize = versionSize + indexSize + parentHashSize + stateRootSize + withdrawRootSize + commitmentSize
)
batchBytes := make([]byte, validiumBatchHeaderSize)
// Define offsets for each field
var (
versionOffset = 0
indexOffset = versionOffset + versionSize
parentHashOffset = indexOffset + indexSize
stateRootOffset = parentHashOffset + parentHashSize
withdrawRootOffset = stateRootOffset + stateRootSize
commitmentOffset = withdrawRootOffset + withdrawRootSize
)
batchBytes[versionOffset] = uint8(codecVersion) // version
binary.BigEndian.PutUint64(batchBytes[indexOffset:indexOffset+indexSize], b.Index) // batch index
copy(batchBytes[parentHashOffset:parentHashOffset+parentHashSize], b.ParentBatchHash[0:parentHashSize]) // parentBatchHash
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], b.StateRoot().Bytes()[0:stateRootSize]) // postStateRoot
copy(batchBytes[withdrawRootOffset:withdrawRootOffset+withdrawRootSize], b.WithdrawRoot().Bytes()[0:withdrawRootSize]) // postWithdrawRoot
copy(batchBytes[commitmentOffset:commitmentOffset+commitmentSize], commitment[0:commitmentSize]) // data commitment
hash := crypto.Keccak256Hash(batchBytes)
return batchBytes, hash, nil
}
// GetBatchMetadata retrieves the metadata of a batch.
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetadata, error) {
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
@@ -195,17 +139,9 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion,
ChallengeDigest: daBatch.ChallengeDigest(),
}
// If this function is used in Validium, we encode the batch header differently.
if validiumMode {
batchMeta.BatchBytes, batchMeta.BatchHash, err = encodeBatchHeaderValidium(batch, codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to encode batch header for validium, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
}
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
return nil, fmt.Errorf("failed to get blob data proof, version: %v, err: %w", codecVersion, err)
}
numChunks := len(batch.Chunks)

View File

@@ -128,7 +128,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
}, encoding.CodecV7, chainConfig, db, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
MaxBatchNumPerBundle: 2,

View File

@@ -1,9 +1,7 @@
*.vmexe
openvm.toml
*.bin
*.sol
cache
db
*.json
?
root-verifier*
?

View File

@@ -0,0 +1,35 @@
[app_fri_params.fri_params]
log_blowup = 1
log_final_poly_len = 0
num_queries = 100
proof_of_work_bits = 16
[app_vm_config.rv32i]
[app_vm_config.rv32m]
[app_vm_config.io]
[app_vm_config.keccak]
[app_vm_config.castf]
[app_vm_config.modular]
supported_moduli = [
"4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787",
"52435875175126190479447740508185965837690552500527637822603658699938581184513",
]
[app_vm_config.native]
[app_vm_config.pairing]
supported_curves = ["Bls12_381"]
[app_vm_config.sha256]
[app_vm_config.fp2]
supported_moduli = [
["Bls12_381Fp2","4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787"]
]
[[app_vm_config.ecc.supported_curves]]
struct_name = "Bls12_381G1Affine"
modulus = "4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787"
scalar = "52435875175126190479447740508185965837690552500527637822603658699938581184513"
a = "0"
b = "4"

View File

@@ -0,0 +1,17 @@
[app_fri_params.fri_params]
log_blowup = 1
log_final_poly_len = 0
num_queries = 100
proof_of_work_bits = 16
[app_vm_config.rv32i]
[app_vm_config.rv32m]
[app_vm_config.io]
[app_vm_config.keccak]
[app_vm_config.castf]
[app_vm_config.native]

View File

@@ -0,0 +1,58 @@
[app_fri_params.fri_params]
log_blowup = 1
log_final_poly_len = 0
num_queries = 100
proof_of_work_bits = 16
[app_vm_config.rv32i]
[app_vm_config.io]
[app_vm_config.keccak]
[app_vm_config.rv32m]
range_tuple_checker_sizes = [256, 8192]
[app_vm_config.bigint]
range_tuple_checker_sizes = [256, 8192]
[app_vm_config.modular]
supported_moduli = [
"21888242871839275222246405745257275088696311157297823662689037894645226208583",
"21888242871839275222246405745257275088548364400416034343698204186575808495617",
"115792089237316195423570985008687907853269984665640564039457584007908834671663",
"115792089237316195423570985008687907852837564279074904382605163141518161494337",
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
"115792089210356248762697446949407573529996955224135760342422259061068512044369"
]
[app_vm_config.fp2]
supported_moduli = [
["Bn254Fp2","21888242871839275222246405745257275088696311157297823662689037894645226208583"]
]
[app_vm_config.pairing]
supported_curves = ["Bn254"]
[app_vm_config.sha256]
[[app_vm_config.ecc.supported_curves]]
struct_name = "Secp256k1Point"
modulus = "115792089237316195423570985008687907853269984665640564039457584007908834671663"
scalar = "115792089237316195423570985008687907852837564279074904382605163141518161494337"
a = "0"
b = "7"
[[app_vm_config.ecc.supported_curves]]
struct_name = "P256Point"
modulus = "115792089210356248762697446949407573530086143415290314195533631308867097853951"
scalar = "115792089210356248762697446949407573529996955224135760342422259061068512044369"
a = "115792089210356248762697446949407573530086143415290314195533631308867097853948"
b = "41058363725152142129326129780047268409114441015993725554835256314039467401291"
[[app_vm_config.ecc.supported_curves]]
struct_name = "Bn254G1Affine"
modulus = "21888242871839275222246405745257275088696311157297823662689037894645226208583"
scalar = "21888242871839275222246405745257275088548364400416034343698204186575808495617"
a = "0"
b = "3"

View File

@@ -21,7 +21,7 @@ endif
ZKVM_COMMIT=$(shell echo ${ZKVM_VERSION} | cut -d " " -f2)
$(info ZKVM_COMMIT is ${ZKVM_COMMIT})
#PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
@@ -32,27 +32,16 @@ else
$(info GO_TAG is ${GO_TAG})
endif
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
#ifeq (${PLONKY3_GPU_VERSION},)
# # use plonky3 with CPU
# ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
#else
# # use gpu
# ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
#endif
prover_gpu:
cd ../crates/gpu_override && cargo tree >/dev/null
$(eval PLONKY3_GPU_VERSION:=$(shell ./print_plonky3gpu_version.sh | sed -n '2p'))
$(eval ZK_VERSION:=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION})
@echo "Updated ZK_VERSION to ${ZK_VERSION} after prover_gpu"
prover: prover_gpu
cd ../crates/gpu_override && GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release -p prover
prover_cpu:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release -p prover
ifeq (${PLONKY3_GPU_VERSION},)
# use plonky3 with CPU
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
endif
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cd ../crates/prover-bin && cargo build --release
tests_binary:
cargo clean && cargo test --release --no-run

View File

@@ -1,36 +1,20 @@
#!/bin/bash
# Define version mapping
declare -A VERSION_MAP
VERSION_MAP["euclid"]="0.4.3"
VERSION_MAP["feynman"]="0.5.0rc0"
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
# Check if first argument is provided and matches a known version name
if [ -n "$1" ] && [ -n "${VERSION_MAP[$1]}" ]; then
SCROLL_ZKVM_VERSION=${VERSION_MAP[$1]}
echo "Setting SCROLL_ZKVM_VERSION to ${SCROLL_ZKVM_VERSION} based on '$1' argument"
else
# Default version if no argument or not recognized
SCROLL_ZKVM_VERSION=0.5.0rc0
fi
SCROLL_ZKVM_VERSION=$($SHELL ./print_high_zkvm_version.sh | cut -d' ' -f1|cut -c2-)
fi
echo $SCROLL_ZKVM_VERSION
mkdir -p .work/chunk
mkdir -p .work/batch
mkdir -p .work/bundle
# chunk-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/app.vmexe -O .work/chunk/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/chunk/openvm.toml -O .work/chunk/openvm.toml
# batch-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/app.vmexe -O .work/batch/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/batch/openvm.toml -O .work/batch/openvm.toml
# bundle-circuit exe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app.vmexe -O .work/bundle/app.vmexe
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/openvm.toml -O .work/bundle/openvm.toml
# bundle-circuit exe, legacy version, may not exist
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/bundle/app_euclidv1.vmexe -O .work/bundle/app_euclidv1.vmexe || echo "legacy app not exist for $SCROLL_ZKVM_VERSION"