Compare commits

...

4 Commits

Author SHA1 Message Date
colin
9dc57c6126 feat(rollup-relayer): support codecv8 (#1681)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-26 21:17:56 +08:00
Ho
9367565a31 [Refactor] Universal task (#1680)
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-06-25 14:42:51 +08:00
Ho
d2f7663d26 [Refactor] For universal prover task (the update on coordinator) (#1682)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-06-19 14:54:08 +09:00
Morty
b0943b1035 refactor(rollup-relayer): simplify construct commit batch payload (#1673) 2025-06-18 21:37:29 +08:00
96 changed files with 3588 additions and 10113 deletions

View File

@@ -41,7 +41,7 @@ jobs:
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
workspaces: ". -> target"
# - name: Lint
# working-directory: 'common'
# run: |

View File

@@ -112,7 +112,7 @@ jobs:
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
make libzkp
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3

View File

@@ -38,6 +38,7 @@ jobs:
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
make -C coordinator/internal/logic/libzkp build
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

File diff suppressed because it is too large Load Diff

66
Cargo.toml Normal file
View File

@@ -0,0 +1,66 @@
[workspace]
members = [
"crates/libzkp",
"crates/l2geth",
"crates/libzkp_c",
"crates/prover-bin",
]
resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "0.11", default-features = false }
alloy-primitives = { version = "0.8", default-features = false }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "0.8", default-features = false }
rkyv = "0.8"
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
serde_with = "3.11.0"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
bincode_v1 = { version = "1.3", package = "bincode"}
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
"loader_halo2",
"halo2-axiom",
"display",
] }
once_cell = "1.20"
base64 = "0.22"
#TODO: upgrade when Feynman
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
[patch.crates-io]
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1

View File

@@ -28,7 +28,7 @@ We welcome community contributions to this repository. Before you submit any iss
## Prerequisites
+ Go 1.21
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
+ Hardhat / Foundry
+ Docker

View File

@@ -10,15 +10,15 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect

View File

@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c h1:IpEBKM6O+xOK2qZVZztGxcobFXkKMb5hAkBEVzfXjVg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -3,11 +3,13 @@ FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03
WORKDIR app
FROM chef as planner
COPY ./common/libzkp/impl/ .
COPY ./crates ./
COPY ./Cargo.* ./
COPY ./rust-toolchain ./
RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run scripts to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
@@ -17,8 +19,9 @@ COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
COPY ./crates ./
COPY ./Cargo.* ./
RUN cargo build --release -p libzkp-c
# Download Go dependencies
@@ -37,9 +40,9 @@ RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04

View File

@@ -18,6 +18,6 @@ RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
COPY --from=builder /src/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +0,0 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release
fmt:
@cargo fmt --all -- --check
clippy:
@cargo check --all-features
@cargo clippy --release -- -D warnings

View File

@@ -1 +0,0 @@
nightly-2024-12-06

View File

@@ -1,76 +0,0 @@
mod utils;
mod verifier;
use std::path::Path;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
_dump_vk(fork_name, file);
}
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Ok(verifier) = verifier {
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
}
}

View File

@@ -1,27 +0,0 @@
use std::{
ffi::CStr,
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
};
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
}
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_bytes().to_vec()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -1,12 +0,0 @@
// BatchVerifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
void init(char* config);
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
void dump_vk(char* fork_name, char* file);

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.23"
var tag = "v4.5.25"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -23,7 +23,7 @@ var commit = func() string {
return "000000"
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// ZkVersion is commit-id of cargo.lock/zkvm-prover and openvm, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"

View File

@@ -1,3 +1,4 @@
/build/bin
.idea
internal/logic/verifier/lib
internal/libzkp/lib/libzkp.so

View File

@@ -2,25 +2,30 @@
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKVM_VERSION}-${OPENVM_VERSION}
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
$(LIBZKP_PATH):
$(MAKE) -C ./internal/logic/libzkp build
coordinator_api: libzkp ## Builds the Coordinator api instance.
clean_libzkp:
$(MAKE) -C ./internal/logic/libzkp clean
libzkp: clean_libzkp $(LIBZKP_PATH)
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
coordinator_cron:
@@ -29,8 +34,8 @@ coordinator_cron:
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
#coordinator_api_skip_libzkp:
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
mock_coordinator_api: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
@@ -38,14 +43,13 @@ mock_coordinator_api: ## Builds the mocked Coordinator instance.
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
test-verifier: libzkp
test-verifier: $(LIBZKP_PATH)
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
test-gpu-verifier: libzkp
test-gpu-verifier: $(LIBZKP_PATH)
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder

View File

@@ -9,8 +9,8 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7

View File

@@ -177,10 +177,10 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -28,10 +28,16 @@ type ProverManager struct {
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
}
// l2geth client configuration items
type L2Endpoint struct {
Url string `json:"endpoint"`
}
// L2 loads l2geth configuration items.
type L2 struct {
// l2geth chain_id.
ChainID uint64 `json:"chain_id"`
ChainID uint64 `json:"chain_id"`
Endpoint *L2Endpoint `json:"l2geth"`
}
// Auth provides the auth coordinator

View File

@@ -28,6 +28,17 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
// TODO: enable this when the libzkp has been updated
/*l2cfg := cfg.L2.Endpoint
if l2cfg == nil {
panic("l2geth is not specified")
}
l2cfgBytes, err := json.Marshal(l2cfg)
if err != nil {
panic(err)
}
libzkp.InitL2geth(string(l2cfgBytes))*/
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)

View File

@@ -0,0 +1,17 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release -p libzkp-c
@mkdir -p lib
@cp -f ../../../../target/release/libzkp.so lib/
fmt:
@cargo fmt --all -- --check
clean:
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
@rm -f lib/libzkp.so
clippy:
@cargo check --release --all-features
@cargo clippy --release -- -D warnings

View File

@@ -20,7 +20,7 @@ function build_test_bins() {
cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
cd $REPO/coordinator/internal/logic/libzkp
}
build_test_bins

View File

@@ -0,0 +1,145 @@
package libzkp
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import (
"fmt"
"os"
"unsafe"
"scroll-tech/common/types/message"
)
// Helper function to convert Go string to C string and handle cleanup
func goToCString(s string) *C.char {
return C.CString(s)
}
// Helper function to free C string
func freeCString(s *C.char) {
C.free(unsafe.Pointer(s))
}
// Initialize the verifier
func InitVerifier(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_verifier(cConfig)
}
// Initialize the verifier
func InitL2geth(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_l2geth(cConfig)
}
// Verify a chunk proof
func VerifyChunkProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_chunk_proof(cProof, cForkName)
return result != 0
}
// Verify a batch proof
func VerifyBatchProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_batch_proof(cProof, cForkName)
return result != 0
}
// Verify a bundle proof
func VerifyBundleProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_bundle_proof(cProof, cForkName)
return result != 0
}
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, forkName)
}
// Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON)
cMetadata := goToCString(metadata)
defer freeCString(cProofJSON)
defer freeCString(cMetadata)
// Create a C array from Go slice
var cVkData *C.char
if len(vkData) > 0 {
cVkData = (*C.char)(unsafe.Pointer(&vkData[0]))
}
resultPtr := C.gen_wrapped_proof(cProofJSON, cMetadata, cVkData, C.size_t(len(vkData)))
if resultPtr == nil {
return ""
}
// Convert result to Go string and free C memory
result := C.GoString(resultPtr)
C.release_string(resultPtr)
return result
}
// Dumps a verification key to a file
func DumpVk(forkName, filePath string) error {
cForkName := goToCString(forkName)
cFilePath := goToCString(filePath)
defer freeCString(cForkName)
defer freeCString(cFilePath)
// Call the C function to dump the verification key
C.dump_vk(cForkName, cFilePath)
// Check if the file was created successfully
// Note: The C function doesn't return an error code, so we check if the file exists
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return fmt.Errorf("failed to dump verification key: file %s was not created", filePath)
}
return nil
}

View File

@@ -0,0 +1,48 @@
// Verifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
// - Verify a chunk proof
#ifndef LIBZKP_H
#define LIBZKP_H
#include <stddef.h> // For size_t
// Initialize the verifier with configuration
void init_verifier(char* config);
// Initialize the l2geth with configuration
void init_l2geth(char* config);
// Verify proofs - returns non-zero for success, zero for failure
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
// Dump verification key to file
void dump_vk(char* fork_name, char* file);
// The result struct to hold data from handling a proving task
typedef struct {
char ok;
char* universal_task;
char* metadata;
char expected_pi_hash[32];
} HandlingResult;
// Generate a universal task based on task type and input JSON
// Returns a struct containing task data, metadata, and expected proof hash
HandlingResult gen_universal_task(int task_type, char* task, char* fork_name);
// Release memory allocated for a HandlingResult returned by gen_universal_task
void release_task_result(HandlingResult result);
// Generate a wrapped proof from the universal prover output and metadata
// Returns a JSON string containing the wrapped proof, or NULL on error
// The caller must call release_string on the returned pointer when done
char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_len);
// Release memory allocated for a string returned by gen_wrapped_proof
void release_string(char* string_ptr);
#endif /* LIBZKP_H */

View File

@@ -0,0 +1,42 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
"fmt"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
)
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
var metadata interface{}
switch taskType {
case TaskTypeChunk:
metadata = struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}
case TaskTypeBatch:
metadata = struct {
BatchInfo *message.OpenVMBatchInfo `json:"batch_info"`
BatchHash common.Hash `json:"batch_hash"`
}{BatchInfo: &message.OpenVMBatchInfo{}}
case TaskTypeBundle:
metadata = struct {
BundleInfo *message.OpenVMBundleInfo `json:"bundle_info"`
BundlePIHash common.Hash `json:"bundle_pi_hash"`
}{BundleInfo: &message.OpenVMBundleInfo{}}
}
encodeData, err := json.Marshal(metadata)
if err != nil {
fmt.Println("mock encoding json fail:", err)
return false, "", "", nil
}
return true, "UniversalTask data is not parsed", string(encodeData), []byte{0}
}

View File

@@ -0,0 +1,36 @@
//go:build !mock_verifier
package libzkp
/*
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)
defer freeCString(cTask)
defer freeCString(cForkName)
result := C.gen_universal_task(C.int(taskType), cTask, cForkName)
defer C.release_task_result(result)
// Check if the operation was successful
if result.ok == 0 {
return false, "", "", nil
}
// Convert C strings to Go strings
universalTask := C.GoString(result.universal_task)
metadata := C.GoString(result.metadata)
// Convert C array to Go slice
piHash := make([]byte, 32)
for i := 0; i < 32; i++ {
piHash[i] = byte(result.expected_pi_hash[i])
}
return true, universalTask, metadata, piHash
}

View File

@@ -161,19 +161,31 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch")
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
@@ -233,7 +245,6 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
@@ -266,7 +277,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8:
default:
return taskDetail, nil
}

View File

@@ -159,19 +159,33 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle")
return nil, ErrCoordinatorInternalFailure
}
// bundle proof require snark
taskMsg.UseSnark = true
proverTask.Metadata = metadata
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
@@ -237,7 +251,6 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),

View File

@@ -157,12 +157,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
@@ -170,6 +164,25 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = cp.applyUniversal(taskMsg)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk")
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
@@ -207,7 +220,6 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(taskDetailBytes),

View File

@@ -16,6 +16,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -185,6 +186,16 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return &ptc, nil
}
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName)
if !ok {
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
}
schema.TaskData = uTaskData
return schema, []byte(metadata), nil
}
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{

View File

@@ -19,6 +19,8 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
@@ -69,6 +71,10 @@ type ProofReceiverLogic struct {
validateFailureProverTaskStatusNotOk prometheus.Counter
validateFailureProverTaskTimeout prometheus.Counter
validateFailureProverTaskHaveVerifier prometheus.Counter
ChunkTask provertask.ProverTask
BundleTask provertask.ProverTask
BatchTask provertask.ProverTask
}
// NewSubmitProofReceiverLogic create a proof receiver logic
@@ -168,6 +174,15 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
if getHardForkErr != nil {
return ErrGetHardForkNameFailed
}
if proofParameter.Universal {
if len(proverTask.Metadata) == 0 {
return errors.New("can not re-wrapping proof: no metadata has been recorded in advance")
}
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), []byte{})
if proofParameter.Proof == "" {
return errors.New("can not re-wrapping proof, see coordinator log for reason")
}
}
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:

View File

@@ -2,30 +2,23 @@
package verifier
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/
import "C" //nolint:typecheck
import (
"encoding/base64"
"encoding/json"
"io"
"os"
"path"
"unsafe"
"path/filepath"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
)
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `CircuitConfig` in libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
@@ -40,7 +33,7 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
}
}
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
@@ -67,12 +60,7 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return nil, err
}
configStr := C.CString(string(configBytes))
defer func() {
C.free(unsafe.Pointer(configStr))
}()
C.init(configStr)
libzkp.InitVerifier(string(configBytes))
v := &Verifier{
cfg: cfg,
@@ -94,15 +82,7 @@ func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName st
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBatchProof(string(buf), forkName), nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
@@ -113,15 +93,8 @@ func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName st
}
log.Info("Start to verify chunk proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_chunk_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyChunkProof(string(buf), forkName), nil
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
@@ -131,20 +104,13 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
return false, err
}
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify bundle proof ...")
verified := C.verify_bundle_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
func (v *Verifier) ReadVK(filePat string) (string, error) {
f, err := os.Open(filepath.Clean(filePat))
if err != nil {
return "", err
}
@@ -157,20 +123,12 @@ func (v *Verifier) readVK(filePat string) (string, error) {
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {
if err := os.Remove(tempFile); err != nil {
log.Error("failed to remove temp file", "err", err)
}
}()
err := libzkp.DumpVk(forkName, tempFile)
if err != nil {
return err
}
forkNameCStr := C.CString(forkName)
defer C.free(unsafe.Pointer(forkNameCStr))
tempFileCStr := C.CString(tempFile)
defer C.free(unsafe.Pointer(tempFileCStr))
C.dump_vk(forkNameCStr, tempFileCStr)
f, err := os.Open(tempFile)
f, err := os.Open(filepath.Clean(tempFile))
if err != nil {
return err
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
@@ -58,25 +58,25 @@ func TestFFI(t *testing.T) {
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *message.OpenVMBatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.OpenVMBatchProof{}
proof := &message.OpenVMBatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *message.OpenVMChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.OpenVMChunkProof{}
proof := &message.OpenVMChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -37,6 +37,7 @@ type ProverTask struct {
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
Metadata []byte `json:"metadata" gorm:"column:metadata;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
// metadata

View File

@@ -4,6 +4,8 @@ package types
type GetTaskParameter struct {
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskTypes []int `form:"task_types" json:"task_types"`
TaskID string `form:"task_id,omitempty" json:"task_id,omitempty"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
}
// GetTaskSchema the schema data return to prover for get prover task
@@ -11,6 +13,7 @@ type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
UseSnark bool `json:"use_snark,omitempty"`
TaskData string `json:"task_data"`
HardForkName string `json:"hard_fork_name"`
}

View File

@@ -7,6 +7,7 @@ type SubmitProofParameter struct {
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
}

View File

@@ -161,7 +161,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}}).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -191,7 +191,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -207,32 +207,33 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
var proof []byte
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBatch:
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
if proofStatus == verifiedFailed {
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
if proofStatus != verifiedFailed {
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
} else {
// in "verifiedFailed" status, we purpose the mockprover submit proof but not valid
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
@@ -240,11 +241,12 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
submitProof := types.SubmitProofParameter{
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
Universal: true,
}
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})

26
crates/l2geth/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "l2geth"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = {version = "1", features = ["rt-multi-thread"]}
async-trait = "0.1"
url = ">=2.5.3"
libzkp = { path = "../libzkp" }
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
sbv-primitives = { workspace = true, features = ["scroll"] }
sbv-utils = { workspace = true, features = ["scroll"] }
eyre.workspace = true
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true

19
crates/l2geth/src/lib.rs Normal file
View File

@@ -0,0 +1,19 @@
pub mod rpc_client;
pub use rpc_client::RpcConfig;
use std::sync::{Arc, OnceLock};
static GLOBAL_L2GETH_CLI: OnceLock<Arc<rpc_client::RpcClientCore>> = OnceLock::new();
pub fn init(config: &str) -> eyre::Result<()> {
let cfg: RpcConfig = serde_json::from_str(config)?;
GLOBAL_L2GETH_CLI.get_or_init(|| Arc::new(rpc_client::RpcClientCore::create(&cfg).unwrap()));
Ok(())
}
pub fn get_client() -> rpc_client::RpcClient<'static> {
GLOBAL_L2GETH_CLI
.get()
.expect("must has been inited")
.get_client()
}

View File

@@ -0,0 +1,241 @@
use alloy::{
providers::{Provider, ProviderBuilder, RootProvider},
rpc::client::ClientBuilder,
transports::layers::RetryBackoffLayer,
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::Network;
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {
10
}
fn default_backoff() -> u64 {
100
}
fn default_cups() -> u64 {
100
}
fn default_workers() -> usize {
4
}
fn default_max_concurrency() -> usize {
10
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcConfig {
#[serde(alias = "endpoint")]
pub rpc_url: String,
// The threads used in rt, default 4
#[serde(default = "default_workers")]
pub workers: usize,
// The blocking threads to handle rpc tasks, default 10
#[serde(default = "default_max_concurrency")]
pub max_concurrency: usize,
// Retry parameters
#[serde(default = "default_max_retry")]
pub max_retry: u32,
// backoff duration in milliseconds, default 100ms
#[serde(default = "default_backoff")]
pub backoff: u64,
// compute units per second: default 100
#[serde(default = "default_cups")]
pub cups: u64,
}
/// An rpc client prover which carrying async runtime,
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
pub struct RpcClientCore {
/// rpc prover
provider: RootProvider<Network>,
rt: tokio::runtime::Runtime,
}
#[derive(Clone, Copy)]
pub struct RpcClient<'a> {
provider: &'a RootProvider<Network>,
handle: &'a tokio::runtime::Handle,
}
impl RpcClientCore {
pub fn create(config: &RpcConfig) -> Result<Self> {
let rpc = url::Url::parse(&config.rpc_url)?;
tracing::info!("Using RPC: {}", rpc);
// note we MUST use multi rt since we have no a main thread for driving
// for each call in our method we can acquire a handle of the rt to resolve one or more
// async tasks
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(config.workers)
.max_blocking_threads(config.max_concurrency)
.enable_all()
.build()?;
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
Ok(Self {
provider: ProviderBuilder::<_, _, Network>::default().on_client(client),
rt,
})
}
pub fn get_client(&self) -> RpcClient {
RpcClient {
provider: &self.provider,
handle: self.rt.handle(),
}
}
}
impl ChunkInterpreter for RpcClient<'_> {
fn try_fetch_block_witness(
&self,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
async fn fetch_witness_async(
provider: &RootProvider<Network>,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
use alloy::network::primitives::BlockTransactionsKind;
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
let chain_id = provider.get_chain_id().await?;
let block = provider
.get_block_by_hash(block_hash, BlockTransactionsKind::Full)
.await?
.ok_or_else(|| eyre::eyre!("Block not found"))?;
let number = block.header.number;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let prev_state_root = if let Some(witness) = prev_witness {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
} else {
provider
.scroll_disk_root((number - 1).into())
.await?
.disk_root
};
let witness = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?)
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
.prev_state_root(prev_state_root)
.build()?;
Ok(witness)
}
tracing::debug!("fetch witness for {block_hash}");
self.handle
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
}
fn try_fetch_storage_node(
&self,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
async fn fetch_storage_node_async(
provider: &RootProvider<Network>,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
let ret = provider
.client()
.request::<_, sbv_primitives::Bytes>("debug_dbGet", (node_hash,))
.await?;
Ok(ret)
}
tracing::debug!("fetch storage node for {node_hash}");
self.handle
.block_on(fetch_storage_node_async(self.provider, node_hash))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy::primitives::hex;
use sbv_primitives::B256;
use std::env;
fn create_config_from_env() -> RpcConfig {
let endpoint =
env::var("L2GETH_ENDPOINT").expect("L2GETH_ENDPOINT environment variable must be set");
let config_json = format!(r#"{{"endpoint": "{}"}}"#, endpoint);
serde_json::from_str(&config_json).expect("Failed to parse RPC config")
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_block_witness() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// latest - 1 block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let wit1 = client
.try_fetch_block_witness(block_hash, None)
.expect("should success");
// latest block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
)
.unwrap(),
);
let wit2 = client
.try_fetch_block_witness(block_hash, Some(&wit1))
.expect("should success");
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_storage_node() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// the root node (state root) of the block in unittest above
let node_hash = B256::from(
hex::const_decode_to_array(
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let node = client
.try_fetch_storage_node(node_hash)
.expect("should success");
println!("{}", serde_json::to_string_pretty(&node).unwrap());
}
}

23
crates/libzkp/Cargo.toml Normal file
View File

@@ -0,0 +1,23 @@
[package]
name = "libzkp"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-verifier-euclid.workspace = true
sbv-primitives.workspace = true
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true
eyre.workspace = true
git-version = "0.3.5"
serde_stacker = "0.1"
regex = "1.11"
c-kzg = { version = "1.0", features = ["serde"] }

121
crates/libzkp/src/lib.rs Normal file
View File

@@ -0,0 +1,121 @@
pub mod proofs;
pub mod tasks;
pub mod verifier;
pub use verifier::{TaskType, VerifierConfig};
mod utils;
use sbv_primitives::B256;
use scroll_zkvm_types::util::vec_as_base64;
use serde::{Deserialize, Serialize};
use serde_json::value::RawValue;
use std::path::Path;
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
/// Turn the coordinator's chunk task into a json string for formal chunk proving
/// task (with full witnesses)
pub fn checkout_chunk_task(
task_json: &str,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<String> {
let chunk_task = serde_json::from_str::<tasks::ChunkTask>(task_json)?;
let ret = serde_json::to_string(&tasks::ChunkProvingTask::try_from_with_interpret(
chunk_task,
interpreter,
)?)?;
Ok(ret)
}
/// Generate required staff for proving tasks
pub fn gen_universal_task(
task_type: i32,
task_json: &str,
fork_name: &str,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
use proofs::*;
use tasks::*;
/// Wrapper for metadata
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(untagged)]
enum AnyMetaData {
Chunk(ChunkProofMetadata),
Batch(BatchProofMetadata),
Bundle(BundleProofMetadata),
}
let (pi_hash, metadata, u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) =
gen_universal_chunk_task(task, fork_name.into(), interpreter)?;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {
let task = serde_json::from_str::<BatchProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name.into())?;
(pi_hash, AnyMetaData::Batch(metadata), u_task)
}
x if x == TaskType::Bundle as i32 => {
let task = serde_json::from_str::<BundleProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name.into())?;
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
}
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
};
Ok((
pi_hash,
serde_json::to_string(&metadata)?,
serde_json::to_string(&u_task)?,
))
}
/// helper to rearrange the proof return by universal prover into corresponding wrapped proof
pub fn gen_wrapped_proof(proof_json: &str, metadata: &str, vk: &[u8]) -> eyre::Result<String> {
#[derive(Serialize)]
struct RearrangeWrappedProofJson<'a> {
#[serde(borrow)]
pub metadata: &'a RawValue,
#[serde(borrow)]
pub proof: &'a RawValue,
#[serde(with = "vec_as_base64", default)]
pub vk: Vec<u8>,
pub git_version: String,
}
let re_arrange = RearrangeWrappedProofJson {
metadata: serde_json::from_str(metadata)?,
proof: serde_json::from_str(proof_json)?,
vk: vk.to_vec(),
git_version: utils::short_git_version(),
};
let ret = serde_json::to_string(&re_arrange)?;
Ok(ret)
}
/// init verifier
pub fn verifier_init(config: &str) -> eyre::Result<()> {
let cfg: VerifierConfig = serde_json::from_str(config)?;
verifier::init(cfg);
Ok(())
}
/// verify proof
pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyre::Result<bool> {
let verifier = verifier::get_verifier(fork_name)?;
let ret = verifier.verify(task_type, proof)?;
Ok(ret)
}
/// dump vk
pub fn dump_vk(fork_name: &str, file: &str) -> eyre::Result<()> {
let verifier = verifier::get_verifier(fork_name)?;
verifier.dump_vk(Path::new(file));
Ok(())
}

344
crates/libzkp/src/proofs.rs Normal file
View File

@@ -0,0 +1,344 @@
use std::path::Path;
use crate::utils::short_git_version;
use eyre::Result;
use sbv_primitives::B256;
use scroll_zkvm_types::{
batch::BatchInfo,
bundle::BundleInfo,
chunk::ChunkInfo,
proof::{EvmProof, OpenVmEvmProof, ProofEnum, RootProof},
public_inputs::{ForkName, MultiVersionPublicInputs},
types_agg::{AggregationInput, ProgramCommitment},
util::vec_as_base64,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
/// A wrapper around the actual inner proof.
#[derive(Clone, Serialize, Deserialize)]
pub struct WrappedProof<Metadata> {
/// Generic metadata carried by a proof.
pub metadata: Metadata,
/// The inner proof, either a [`RootProof`] or [`EvmProof`] depending on the
/// [`crate::ProverType`].
pub proof: ProofEnum,
/// Represents the verifying key in serialized form. The purpose of including the verifying key
/// along with the proof is to allow a verifier-only mode to identify the source of proof
/// generation.
///
/// For [`RootProof`] the verifying key is denoted by the digest of the VM's program.
///
/// For [`EvmProof`] its the raw bytes of the halo2 circuit's `VerifyingKey`.
///
/// We encode the vk in base64 format during JSON serialization.
#[serde(with = "vec_as_base64", default)]
pub vk: Vec<u8>,
/// Represents the git ref for `zkvm-prover` that was used to construct the proof.
///
/// This is useful for debugging.
pub git_version: String,
}
pub trait AsRootProof {
fn as_root_proof(&self) -> &RootProof;
}
pub trait AsEvmProof {
fn as_evm_proof(&self) -> &EvmProof;
}
pub trait IntoEvmProof {
fn into_evm_proof(self) -> OpenVmEvmProof;
}
/// Alias for convenience.
pub type ChunkProof = WrappedProof<ChunkProofMetadata>;
/// Alias for convenience.
pub type BatchProof = WrappedProof<BatchProofMetadata>;
/// Alias for convenience.
pub type BundleProof = WrappedProof<BundleProofMetadata>;
impl AsRootProof for ChunkProof {
fn as_root_proof(&self) -> &RootProof {
self.proof
.as_root_proof()
.expect("batch proof use root proof")
}
}
impl AsRootProof for BatchProof {
fn as_root_proof(&self) -> &RootProof {
self.proof
.as_root_proof()
.expect("batch proof use root proof")
}
}
impl AsEvmProof for BundleProof {
fn as_evm_proof(&self) -> &EvmProof {
self.proof
.as_evm_proof()
.expect("bundle proof use evm proof")
}
}
impl IntoEvmProof for BundleProof {
fn into_evm_proof(self) -> OpenVmEvmProof {
self.proof
.as_evm_proof()
.expect("bundle proof use evm proof")
.clone()
.into()
}
}
/// Trait to enable operations in metadata
pub trait ProofMetadata: Serialize + DeserializeOwned + std::fmt::Debug {
type PublicInputs: MultiVersionPublicInputs;
fn pi_hash_info(&self) -> &Self::PublicInputs;
fn new_proof<P: Into<ProofEnum>>(self, proof: P, vk: Option<&[u8]>) -> WrappedProof<Self> {
WrappedProof {
metadata: self,
proof: proof.into(),
vk: vk.map(Vec::from).unwrap_or_default(),
git_version: short_git_version(),
}
}
}
pub trait PersistableProof: Sized {
/// Read and deserialize the proof.
fn from_json<P: AsRef<Path>>(path_proof: P) -> Result<Self>;
/// Serialize the proof and dumping at the given path.
fn dump<P: AsRef<Path>>(&self, path_proof: P) -> Result<()>;
}
/// Metadata attached to [`ChunkProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ChunkProofMetadata {
/// The chunk information describing the list of blocks contained within the chunk.
pub chunk_info: ChunkInfo,
}
impl ProofMetadata for ChunkProofMetadata {
type PublicInputs = ChunkInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.chunk_info
}
}
/// Metadata attached to [`BatchProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BatchProofMetadata {
/// The batch information describing the list of chunks.
pub batch_info: BatchInfo,
/// The [`scroll_zkvm_types::batch::BatchHeader`]'s digest.
pub batch_hash: B256,
}
impl ProofMetadata for BatchProofMetadata {
type PublicInputs = BatchInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.batch_info
}
}
/// Metadata attached to [`BundleProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BundleProofMetadata {
/// The bundle information describing the list of batches to be finalised on-chain.
pub bundle_info: BundleInfo,
/// The public-input digest for the bundle.
pub bundle_pi_hash: B256,
}
impl ProofMetadata for BundleProofMetadata {
type PublicInputs = BundleInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.bundle_info
}
}
impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
fn from(value: &WrappedProof<Metadata>) -> Self {
Self {
public_values: value.proof.public_values(),
commitment: ProgramCommitment::deserialize(&value.vk),
}
}
}
impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn sanity_check(&self, fork_name: ForkName) {
let proof_pi = self.proof.public_values();
let expected_pi = self
.metadata
.pi_hash_info()
.pi_hash_by_fork(fork_name)
.0
.as_ref()
.iter()
.map(|&v| v as u32)
.collect::<Vec<_>>();
assert_eq!(
expected_pi, proof_pi,
"pi mismatch: expected={expected_pi:?}, found={proof_pi:?}"
);
}
}
impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
fn from_json<P: AsRef<Path>>(path_proof: P) -> Result<Self> {
crate::utils::read_json_deep(path_proof)
}
fn dump<P: AsRef<Path>>(&self, path_proof: P) -> Result<()> {
crate::utils::write_json(path_proof, &self)
}
}
#[cfg(test)]
mod tests {
use base64::{prelude::BASE64_STANDARD, Engine};
use sbv_primitives::B256;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleInfoV1},
proof::EvmProof,
public_inputs::PublicInputs,
};
use super::*;
#[test]
fn test_roundtrip() -> eyre::Result<()> {
macro_rules! assert_roundtrip {
($fd:expr, $proof:ident) => {
let proof_str_expected =
std::fs::read_to_string(std::path::Path::new("./testdata").join($fd))?;
let proof = serde_json::from_str::<$proof>(&proof_str_expected)?;
let proof_str_got = serde_json::to_string(&proof)?;
assert_eq!(proof_str_got, proof_str_expected);
};
}
assert_roundtrip!("chunk-proof.json", ChunkProof);
assert_roundtrip!("batch-proof.json", BatchProof);
assert_roundtrip!("bundle-proof.json", BundleProof);
Ok(())
}
#[test]
fn test_dummy_proof() -> eyre::Result<()> {
// 1. Metadata
let metadata = {
let bundle_info: BundleInfoV1 = BundleInfo {
chain_id: 12345,
num_batches: 12,
prev_state_root: B256::repeat_byte(1),
prev_batch_hash: B256::repeat_byte(2),
post_state_root: B256::repeat_byte(3),
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
}
.into();
let bundle_pi_hash = bundle_info.pi_hash();
BundleProofMetadata {
bundle_info: bundle_info.0,
bundle_pi_hash,
}
};
// 2. Proof
let (proof, proof_base64) = {
let proof = std::iter::empty()
.chain(std::iter::repeat_n(1, 1))
.chain(std::iter::repeat_n(2, 2))
.chain(std::iter::repeat_n(3, 3))
.chain(std::iter::repeat_n(4, 4))
.chain(std::iter::repeat_n(5, 5))
.chain(std::iter::repeat_n(6, 6))
.chain(std::iter::repeat_n(7, 7))
.chain(std::iter::repeat_n(8, 8))
.chain(std::iter::repeat_n(9, 9))
.collect::<Vec<u8>>();
let proof_base64 = BASE64_STANDARD.encode(&proof);
(proof, proof_base64)
};
// 3. Instances
let (instances, instances_base64) = {
// LE: [0x56, 0x34, 0x12, 0x00, 0x00, ..., 0x00]
// LE: [0x32, 0x54, 0x76, 0x98, 0x00, ..., 0x00]
let instances = std::iter::empty()
.chain(std::iter::repeat_n(0x00, 29))
.chain(std::iter::once(0x12))
.chain(std::iter::once(0x34))
.chain(std::iter::once(0x56))
.chain(std::iter::repeat_n(0x00, 28))
.chain(std::iter::once(0x98))
.chain(std::iter::once(0x76))
.chain(std::iter::once(0x54))
.chain(std::iter::once(0x32))
.collect::<Vec<u8>>();
let instances_base64 = BASE64_STANDARD.encode(&instances);
(instances, instances_base64)
};
// 4. VK
let (vk, vk_base64) = {
let vk = std::iter::empty()
.chain(std::iter::repeat_n(1, 9))
.chain(std::iter::repeat_n(2, 8))
.chain(std::iter::repeat_n(3, 7))
.chain(std::iter::repeat_n(4, 6))
.chain(std::iter::repeat_n(5, 5))
.chain(std::iter::repeat_n(6, 4))
.chain(std::iter::repeat_n(7, 3))
.chain(std::iter::repeat_n(8, 2))
.chain(std::iter::repeat_n(9, 1))
.collect::<Vec<u8>>();
let vk_base64 = BASE64_STANDARD.encode(&vk);
(vk, vk_base64)
};
let evm_proof = EvmProof { instances, proof };
let bundle_proof = metadata.new_proof(evm_proof, Some(vk.as_slice()));
let bundle_proof_json = serde_json::to_value(&bundle_proof)?;
assert_eq!(
bundle_proof_json.get("proof").unwrap(),
&serde_json::json!({
"proof": proof_base64,
"instances": instances_base64,
}),
);
assert_eq!(
bundle_proof_json.get("vk").unwrap(),
&serde_json::Value::String(vk_base64),
);
let bundle_proof_de = serde_json::from_value::<BundleProof>(bundle_proof_json)?;
assert_eq!(
bundle_proof_de.proof.as_evm_proof(),
bundle_proof.proof.as_evm_proof()
);
assert_eq!(bundle_proof_de.vk, bundle_proof.vk);
Ok(())
}
}

View File

@@ -0,0 +1,76 @@
pub mod batch;
pub mod bundle;
pub mod chunk;
pub mod chunk_interpreter;
pub use batch::BatchProvingTask;
pub use bundle::BundleProvingTask;
pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use chunk_interpreter::{DummyInterpreter, TryFromWithInterpreter};
use sbv_primitives::B256;
use scroll_zkvm_types::{
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs},
};
/// Generate required staff for chunk proving
pub fn gen_universal_chunk_task(
mut task: ChunkProvingTask,
fork_name: ForkName,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
let chunk_info = if let Some(interpreter) = interpreter {
ChunkInfo::try_from_with_interpret(&mut task, interpreter)
} else {
ChunkInfo::try_from_with_interpret(&mut task, DummyInterpreter {})
}?;
let proving_task = task.try_into()?;
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
ChunkProofMetadata { chunk_info },
proving_task,
))
}
/// Generate required staff for batch proving
pub fn gen_universal_batch_task(
task: BatchProvingTask,
fork_name: ForkName,
) -> eyre::Result<(B256, BatchProofMetadata, ProvingTask)> {
let batch_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = batch_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
BatchProofMetadata {
batch_info,
batch_hash: expected_pi_hash,
},
proving_task,
))
}
/// Generate required staff for bundle proving
pub fn gen_universal_bundle_task(
task: BundleProvingTask,
fork_name: ForkName,
) -> eyre::Result<(B256, BundleProofMetadata, ProvingTask)> {
let bundle_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = bundle_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
BundleProofMetadata {
bundle_info,
bundle_pi_hash: expected_pi_hash,
},
proving_task,
))
}

View File

@@ -0,0 +1,253 @@
use crate::proofs::ChunkProof;
use c_kzg::Bytes48;
use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, EnvelopeV6, EnvelopeV7,
PointEvalWitness, ReferenceHeader, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
mod utils;
use utils::{base64, point_eval};
/// Define variable batch header type, since BatchHeaderV6 can not
/// be decoded as V7 we can always has correct deserialization
/// Notice: V6 header MUST be put above V7 since untagged enum
/// try to decode each defination in order
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(untagged)]
pub enum BatchHeaderV {
V6(BatchHeaderV6),
V7(BatchHeaderV7),
}
impl From<BatchHeaderV> for ReferenceHeader {
fn from(value: BatchHeaderV) -> Self {
match value {
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
}
}
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7(h) => h.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7(h) => h,
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
}
}
}
/// Defines a proving task for batch proof generation, the format
/// is compatible with both pre-euclidv2 and euclidv2
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BatchProvingTask {
/// Chunk proofs for the contiguous list of chunks within the batch.
pub chunk_proofs: Vec<ChunkProof>,
/// The [`BatchHeaderV6/V7`], as computed on-chain for this batch.
pub batch_header: BatchHeaderV,
/// The bytes encoding the batch data that will finally be published on-chain in the form of an
/// EIP-4844 blob.
#[serde(with = "base64")]
pub blob_bytes: Vec<u8>,
/// Challenge digest computed using the blob's bytes and versioned hash.
pub challenge_digest: Option<U256>,
/// KZG commitment for the blob.
pub kzg_commitment: Option<Bytes48>,
/// KZG proof.
pub kzg_proof: Option<Bytes48>,
/// fork version specify, for sanity check with batch_header and chunk proof
pub fork_name: String,
}
impl TryFrom<BatchProvingTask> for ProvingTask {
type Error = eyre::Error;
fn try_from(value: BatchProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
Ok(ProvingTask {
identifier: value.batch_header.batch_hash().to_string(),
fork_name: value.fork_name,
aggregated_proofs: value
.chunk_proofs
.into_iter()
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.into_vec()],
vk: Vec::new(),
})
}
}
impl BatchProvingTask {
fn build_guest_input(&self) -> BatchWitness {
let fork_name = self.fork_name.to_lowercase().as_str().into();
// calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
let versioned_hash = point_eval::get_versioned_hash(&commitment);
let challenge_digest = match &self.batch_header {
BatchHeaderV::V6(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
EnvelopeV6::from(self.blob_bytes.as_slice()).challenge_digest(versioned_hash)
}
BatchHeaderV::V7(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
EnvelopeV7::from(padded_blob_bytes.as_slice()).challenge_digest(versioned_hash)
}
};
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
};
if let Some(k) = self.kzg_commitment {
assert_eq!(k, kzg_commitment);
}
if let Some(c) = self.challenge_digest {
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
}
if let Some(p) = self.kzg_proof {
assert_eq!(p, kzg_proof);
}
let point_eval_witness = PointEvalWitness {
kzg_commitment: kzg_commitment.into_inner(),
kzg_proof: kzg_proof.into_inner(),
};
let reference_header = self.batch_header.clone().into();
BatchWitness {
fork_name,
chunk_proofs: self.chunk_proofs.iter().map(|proof| proof.into()).collect(),
chunk_infos: self
.chunk_proofs
.iter()
.map(|p| p.metadata.chunk_info.clone())
.collect(),
blob_bytes: self.blob_bytes.clone(),
reference_header,
point_eval_witness,
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
let (parent_state_root, state_root, chain_id, withdraw_root) = (
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.chain_id,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.withdraw_root,
);
let (parent_batch_hash, prev_msg_queue_hash, post_msg_queue_hash) = match self.batch_header
{
BatchHeaderV::V6(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
(h.parent_batch_hash, Default::default(), Default::default())
}
BatchHeaderV::V7(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
(
h.parent_batch_hash,
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_msg_queue_hash,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_msg_queue_hash,
)
}
};
let batch_hash = self.batch_header.batch_hash();
Ok(BatchInfo {
parent_state_root,
parent_batch_hash,
state_root,
batch_hash,
chain_id,
withdraw_root,
prev_msg_queue_hash,
post_msg_queue_hash,
})
}
}

View File

@@ -0,0 +1,77 @@
pub mod base64 {
use base64::prelude::*;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> {
let base64 = BASE64_STANDARD.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
BASE64_STANDARD
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}
pub mod point_eval {
use c_kzg;
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
use scroll_zkvm_types::util::sha256_rv32;
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.
///
/// For every 32-bytes chunk in the blob, the most-significant byte is set to 0 while the other
/// 31 bytes are copied from the provided blob-envelope.
pub fn to_blob(envelope_bytes: &[u8]) -> c_kzg::Blob {
let mut blob_bytes = [0u8; c_kzg::BYTES_PER_BLOB];
assert!(
envelope_bytes.len()
<= c_kzg::FIELD_ELEMENTS_PER_BLOB * (c_kzg::BYTES_PER_FIELD_ELEMENT - 1),
"too many bytes in blob envelope",
);
for (i, &byte) in envelope_bytes.iter().enumerate() {
blob_bytes[(i / 31) * 32 + 1 + (i % 31)] = byte;
}
c_kzg::Blob::new(blob_bytes)
}
/// Get the KZG commitment from an EIP-4844 blob.
pub fn blob_to_kzg_commitment(blob: &c_kzg::Blob) -> c_kzg::KzgCommitment {
c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, c_kzg::ethereum_kzg_settings())
.expect("blob to kzg commitment should succeed")
}
/// The version for KZG as per EIP-4844.
const VERSIONED_HASH_VERSION_KZG: u8 = 1;
/// Get the EIP-4844 versioned hash from the KZG commitment.
pub fn get_versioned_hash(commitment: &c_kzg::KzgCommitment) -> H256 {
let mut hash: [u8; 32] = sha256_rv32(commitment.to_bytes().as_slice()).into();
hash[0] = VERSIONED_HASH_VERSION_KZG;
H256::new(hash)
}
/// Get x for kzg proof from challenge hash
pub fn get_x_from_challenge(challenge: H256) -> U256 {
U256::from_be_bytes(challenge.0) % BLS_MODULUS
}
/// Generate KZG proof and evaluation given the blob (polynomial) and a random challenge.
pub fn get_kzg_proof(blob: &c_kzg::Blob, challenge: H256) -> (c_kzg::KzgProof, U256) {
let challenge = get_x_from_challenge(challenge);
let (proof, y) = c_kzg::KzgProof::compute_kzg_proof(
blob,
&c_kzg::Bytes32::new(challenge.to_be_bytes()),
c_kzg::ethereum_kzg_settings(),
)
.expect("kzg proof should succeed");
(proof, U256::from_be_slice(y.as_slice()))
}
}

View File

@@ -0,0 +1,125 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// Message indicating a sanity check failure.
const BUNDLE_SANITY_MSG: &str = "bundle must have at least one batch";
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BundleProvingTask {
pub batch_proofs: Vec<BatchProof>,
/// for sanity check
pub bundle_info: Option<BundleInfo>,
/// Fork name specify
pub fork_name: String,
}
impl BundleProvingTask {
fn identifier(&self) -> String {
assert!(!self.batch_proofs.is_empty(), "{BUNDLE_SANITY_MSG}",);
let (first, last) = (
self.batch_proofs
.first()
.expect(BUNDLE_SANITY_MSG)
.metadata
.batch_hash,
self.batch_proofs
.last()
.expect(BUNDLE_SANITY_MSG)
.metadata
.batch_hash,
);
format!("{first}-{last}")
}
fn build_guest_input(&self) -> BundleWitness {
BundleWitness {
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
batch_infos: self
.batch_proofs
.iter()
.map(|wrapped_proof| wrapped_proof.metadata.batch_info.clone())
.collect(),
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
use eyre::eyre;
let err_prefix = format!("metadata_with_prechecks for task_id={}", self.identifier());
for w in self.batch_proofs.windows(2) {
if w[1].metadata.batch_info.chain_id != w[0].metadata.batch_info.chain_id {
return Err(eyre!("{err_prefix}: chain_id mismatch"));
}
if w[1].metadata.batch_info.parent_state_root != w[0].metadata.batch_info.state_root {
return Err(eyre!("{err_prefix}: state_root not chained"));
}
if w[1].metadata.batch_info.parent_batch_hash != w[0].metadata.batch_info.batch_hash {
return Err(eyre!("{err_prefix}: batch_hash not chained"));
}
}
let (first_batch, last_batch) = (
&self
.batch_proofs
.first()
.expect("at least one batch in bundle")
.metadata
.batch_info,
&self
.batch_proofs
.last()
.expect("at least one batch in bundle")
.metadata
.batch_info,
);
let chain_id = first_batch.chain_id;
let num_batches = u32::try_from(self.batch_proofs.len()).expect("num_batches: u32");
let prev_state_root = first_batch.parent_state_root;
let prev_batch_hash = first_batch.parent_batch_hash;
let post_state_root = last_batch.state_root;
let batch_hash = last_batch.batch_hash;
let withdraw_root = last_batch.withdraw_root;
let msg_queue_hash = last_batch.post_msg_queue_hash;
Ok(BundleInfo {
chain_id,
msg_queue_hash,
num_batches,
prev_state_root,
prev_batch_hash,
post_state_root,
batch_hash,
withdraw_root,
})
}
}
impl TryFrom<BundleProvingTask> for ProvingTask {
type Error = eyre::Error;
fn try_from(value: BundleProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
Ok(ProvingTask {
identifier: value.identifier(),
fork_name: value.fork_name,
aggregated_proofs: value
.batch_proofs
.into_iter()
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
vk: Vec::new(),
})
}
}

View File

@@ -0,0 +1,185 @@
use super::chunk_interpreter::*;
use eyre::Result;
use sbv_primitives::{types::BlockWitness, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// The type aligned with coordinator's defination
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ChunkTask {
/// block hashes for a series of block
pub block_hashes: Vec<B256>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
}
impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
fn try_from_with_interpret(
value: ChunkTask,
interpreter: impl ChunkInterpreter,
) -> Result<Self> {
let mut block_witnesses = Vec::new();
for block_hash in value.block_hashes {
let witness =
interpreter.try_fetch_block_witness(block_hash, block_witnesses.last())?;
block_witnesses.push(witness);
}
Ok(Self {
block_witnesses,
prev_msg_queue_hash: value.prev_msg_queue_hash,
fork_name: value.fork_name,
})
}
}
/// Message indicating a sanity check failure.
const CHUNK_SANITY_MSG: &str = "chunk must have at least one block";
/// Proving task for the [`ChunkCircuit`][scroll_zkvm_chunk_circuit].
///
/// The identifier for a chunk proving task is:
/// - {first_block_number}-{last_block_number}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct ChunkProvingTask {
/// Witnesses for every block in the chunk.
pub block_witnesses: Vec<BlockWitness>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
}
#[derive(Clone, Debug)]
pub struct ChunkDetails {
pub num_blocks: usize,
pub num_txs: usize,
pub total_gas_used: u64,
}
impl TryFrom<ChunkProvingTask> for ProvingTask {
type Error = eyre::Error;
fn try_from(value: ChunkProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
Ok(ProvingTask {
identifier: value.identifier(),
fork_name: value.fork_name,
aggregated_proofs: Vec::new(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
vk: Vec::new(),
})
}
}
impl ChunkProvingTask {
pub fn stats(&self) -> ChunkDetails {
let num_blocks = self.block_witnesses.len();
let num_txs = self
.block_witnesses
.iter()
.map(|b| b.transaction.len())
.sum::<usize>();
let total_gas_used = self
.block_witnesses
.iter()
.map(|b| b.header.gas_used)
.sum::<u64>();
ChunkDetails {
num_blocks,
num_txs,
total_gas_used,
}
}
fn identifier(&self) -> String {
assert!(!self.block_witnesses.is_empty(), "{CHUNK_SANITY_MSG}",);
let (first, last) = (
self.block_witnesses
.first()
.expect(CHUNK_SANITY_MSG)
.header
.number,
self.block_witnesses
.last()
.expect(CHUNK_SANITY_MSG)
.header
.number,
);
format!("{first}-{last}")
}
fn build_guest_input(&self) -> ChunkWitness {
ChunkWitness {
blocks: self.block_witnesses.to_vec(),
prev_msg_queue_hash: self.prev_msg_queue_hash,
fork_name: self.fork_name.to_lowercase().as_str().into(),
}
}
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
self.block_witnesses[0].states.push(node);
}
}
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;
impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
fn try_from_with_interpret(
value: &mut ChunkProvingTask,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<Self> {
use eyre::eyre;
let err_prefix = format!(
"metadata_with_prechecks for task_id={:?}",
value.identifier()
);
if value.block_witnesses.is_empty() {
return Err(eyre!(
"{err_prefix}: chunk should contain at least one block",
));
}
// resume from node missing error and keep executing process
let pattern = r"SparseTrieError\(BlindedNode \{ path: Nibbles\((0x[0-9a-fA-F]+)\), hash: (0x[0-9a-fA-F]+) \}\)";
let err_parse_re = regex::Regex::new(pattern)?;
let mut attempts = 0;
loop {
match execute(&value.build_guest_input()) {
Ok(chunk_info) => return Ok(chunk_info),
Err(e) => {
if let Some(caps) = err_parse_re.captures(&e) {
let hash = caps[2].to_string();
tracing::debug!("missing trie hash {hash}");
attempts += 1;
if attempts >= MAX_FETCH_NODES_ATTEMPTS {
return Err(eyre!(
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
));
}
let node_hash =
hash.parse::<sbv_primitives::B256>().expect("should be hex");
let node = interpreter.try_fetch_storage_node(node_hash)?;
tracing::warn!("missing node fetched: {node}");
value.insert_state(node);
} else {
return Err(eyre!("{err_prefix}: {e}"));
}
}
}
}
}
}

View File

@@ -0,0 +1,26 @@
use eyre::Result;
use sbv_primitives::{types::BlockWitness, Bytes, B256};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data
/// (in rare case) from external
pub trait ChunkInterpreter {
fn try_fetch_block_witness(
&self,
_block_hash: B256,
_prev_witness: Option<&BlockWitness>,
) -> Result<BlockWitness> {
Err(eyre::eyre!("no implement"))
}
fn try_fetch_storage_node(&self, _node_hash: B256) -> Result<Bytes> {
Err(eyre::eyre!("no implement"))
}
}
pub trait TryFromWithInterpreter<T>: Sized {
fn try_from_with_interpret(value: T, intepreter: impl ChunkInterpreter) -> Result<Self>;
}
pub struct DummyInterpreter {}
impl ChunkInterpreter for DummyInterpreter {}

View File

@@ -0,0 +1,53 @@
use std::{
panic::{catch_unwind, AssertUnwindSafe},
path::Path,
};
use git_version::git_version;
use serde::{
de::{Deserialize, DeserializeOwned},
Serialize,
};
use eyre::Result;
const GIT_VERSION: &str = git_version!(args = ["--abbrev=7", "--always"]);
/// Shortened git commit ref from [`scroll_zkvm_prover`].
pub(crate) fn short_git_version() -> String {
let commit_version = GIT_VERSION.split('-').next_back().unwrap();
// Check if use commit object as fallback.
if commit_version.len() < 8 {
commit_version.to_string()
} else {
commit_version[1..8].to_string()
}
}
/// Wrapper to read JSON that might be deeply nested.
pub(crate) fn read_json_deep<P: AsRef<Path>, T: DeserializeOwned>(path: P) -> Result<T> {
let fd = std::fs::File::open(path)?;
let mut deserializer = serde_json::Deserializer::from_reader(fd);
deserializer.disable_recursion_limit();
let deserializer = serde_stacker::Deserializer::new(&mut deserializer);
Ok(Deserialize::deserialize(deserializer)?)
}
/// Serialize the provided type to JSON format and write to the given path.
pub(crate) fn write_json<P: AsRef<Path>, T: Serialize>(path: P, value: &T) -> Result<()> {
let mut writer = std::fs::File::create(path)?;
Ok(serde_json::to_writer(&mut writer, value)?)
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -1,8 +1,8 @@
#![allow(static_mut_refs)]
mod euclidv2;
use anyhow::{bail, Result};
use euclidv2::EuclidV2Verifier;
use eyre::Result;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
@@ -13,6 +13,16 @@ pub enum TaskType {
Bundle,
}
impl std::fmt::Display for TaskType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Chunk => write!(f, "chunk"),
Self::Batch => write!(f, "batch"),
Self::Bundle => write!(f, "bundle"),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VKDump {
pub chunk_vk: String,
@@ -61,5 +71,8 @@ pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
Err(eyre::eyre!(
"failed to get verifier, key not found, {}",
fork_name
))
}

View File

@@ -1,10 +1,12 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use eyre::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use crate::{
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
utils::panic_catch,
};
use scroll_zkvm_verifier_euclid::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidV2Verifier {
@@ -35,30 +37,29 @@ impl ProofVerifier for EuclidV2Verifier {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
self.chunk_verifier.verify_proof(proof.as_root_proof())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
self.batch_verifier.verify_proof(proof.as_root_proof())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
.verify_proof_evm(&proof.into_evm_proof())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
}
fn dump_vk(&self, file: &Path) {
use base64::{prelude::BASE64_STANDARD, Engine};
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}

View File

@@ -0,0 +1,14 @@
[package]
name = "libzkp-c"
version.workspace = true
edition.workspace = true
[lib]
name = "zkp"
crate-type = ["cdylib"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libzkp = { path = "../libzkp" }
l2geth = { path = "../l2geth"}
tracing.workspace = true

167
crates/libzkp_c/src/lib.rs Normal file
View File

@@ -0,0 +1,167 @@
mod utils;
use std::ffi::{c_char, CString};
use libzkp::TaskType;
use utils::{c_char_to_str, c_char_to_vec};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
let config_str = c_char_to_str(config);
libzkp::verifier_init(config_str).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
let config_str = c_char_to_str(config);
l2geth::init(config_str).unwrap();
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof = c_char_to_vec(proof);
match libzkp::verify_proof(proof, fork_name_str, task_type) {
Err(e) => {
tracing::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let file_str = c_char_to_str(file);
libzkp::dump_vk(fork_name_str, file_str).unwrap();
}
// Define a struct to hold handling results
#[repr(C)]
pub struct HandlingResult {
ok: c_char,
universal_task: *mut c_char,
metadata: *mut c_char,
expected_pi_hash: [c_char; 32],
}
fn failed_handling_result() -> HandlingResult {
HandlingResult {
ok: false as c_char,
universal_task: std::ptr::null_mut(),
metadata: std::ptr::null_mut(),
expected_pi_hash: Default::default(),
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_universal_task(
task_type: i32,
task: *const c_char,
fork_name: *const c_char,
) -> HandlingResult {
let mut interpreter = None;
let task_json = if task_type == TaskType::Chunk as i32 {
let pre_task_str = c_char_to_str(task);
let cli = l2geth::get_client();
match libzkp::checkout_chunk_task(pre_task_str, cli) {
Ok(str) => {
interpreter.replace(cli);
str
}
Err(e) => {
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
return failed_handling_result();
}
}
} else {
c_char_to_str(task).to_string()
};
let ret =
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), interpreter);
if let Ok((pi_hash, task_json, meta_json)) = ret {
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
HandlingResult {
ok: true as c_char,
universal_task: CString::new(task_json).unwrap().into_raw(),
metadata: CString::new(meta_json).unwrap().into_raw(),
expected_pi_hash,
}
} else {
tracing::error!("gen_universal_task failed, error: {:#}", ret.unwrap_err());
failed_handling_result()
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn release_task_result(result: HandlingResult) {
if !result.universal_task.is_null() {
let _ = CString::from_raw(result.universal_task);
}
if !result.metadata.is_null() {
let _ = CString::from_raw(result.metadata);
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_wrapped_proof(
proof: *const c_char,
metadata: *const c_char,
vk: *const c_char,
vk_len: usize,
) -> *mut c_char {
let proof_str = c_char_to_str(proof);
let metadata_str = c_char_to_str(metadata);
let vk_data = std::slice::from_raw_parts(vk as *const u8, vk_len);
match libzkp::gen_wrapped_proof(proof_str, metadata_str, vk_data) {
Ok(result) => CString::new(result).unwrap().into_raw(),
Err(e) => {
tracing::error!("gen_wrapped_proof failed, error: {:#}", e);
std::ptr::null_mut()
}
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
if !ptr.is_null() {
let _ = CString::from_raw(ptr);
}
}

View File

@@ -0,0 +1,11 @@
use std::{ffi::CStr, os::raw::c_char};
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
}
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_bytes().to_vec()
}

View File

@@ -0,0 +1,34 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-prover-euclid.workspace = true
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "refactor/scroll" }
serde.workspace = true
serde_json.workspace = true
once_cell.workspace =true
base64.workspace = true
tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
eyre.workspace = true
futures = "0.3.30"
reqwest = { version = "0.12.4", features = ["gzip"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
hex = "0.4.3"
rand = "0.8.5"
tokio = "1.37.0"
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = "2.5.4"
serde_bytes = "0.11.15"

View File

@@ -0,0 +1,11 @@
## Prover
A runnable zkvm prover which can communicate with coordinator, receving proving task and generate proof
## Testing
+ Get the url of the endpoint of coordinator and a rpc endpoint response to the cooresponding chain
+ Build a `config.json` file with previous knowledge from the template in current directory
+ Call `make test_run`

View File

@@ -26,7 +26,7 @@ struct Args {
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
async fn main() -> eyre::Result<()> {
init_tracing();
let args = Args::parse();
@@ -39,7 +39,10 @@ async fn main() -> anyhow::Result<()> {
let cfg = LocalProverConfig::from_file(args.config_file)?;
let sdk_config = cfg.sdk_config.clone();
let local_prover = LocalProver::new(cfg);
let prover = ProverBuilder::new(sdk_config, local_prover).build().await?;
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
prover.run().await;

View File

@@ -1,8 +1,7 @@
use crate::zk_circuits_handler::{
euclid::EuclidHandler, euclidV2::EuclidV2Handler, CircuitsHandler,
};
use anyhow::{anyhow, Result};
use crate::zk_circuits_handler::{euclidV2::EuclidV2Handler, CircuitsHandler};
use async_trait::async_trait;
use base64::{prelude::BASE64_STANDARD, Engine};
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
@@ -33,7 +32,7 @@ impl LocalProverConfig {
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file(file_name: String) -> Result<Self> {
@@ -69,7 +68,7 @@ impl ProvingService for LocalProver {
let vk = handler.get_vk(*proof_type).await;
if let Some(vk) = vk {
vks.push(base64::encode(vk));
vks.push(BASE64_STANDARD.encode(vk));
}
}
}
@@ -184,9 +183,6 @@ impl LocalProver {
let config = self.config.circuits.get(hard_fork_name).unwrap();
match hard_fork_name {
"euclid" => Arc::new(Arc::new(Mutex::new(EuclidHandler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,
"euclidV2" => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,

View File

@@ -1,11 +1,8 @@
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use scroll_proving_sdk::prover::types::CircuitType;
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
#[serde(rename = "type", default)]
pub task_type: CircuitType,
pub task_data: String,
#[serde(default)]
pub hard_fork_name: String,
@@ -15,7 +12,6 @@ pub struct Task {
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: CircuitType,
pub proof_data: String,
pub error: String,
}

View File

@@ -0,0 +1,67 @@
//pub mod euclid;
#[allow(non_snake_case)]
pub mod euclidV2;
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover_euclid::ProverConfig;
use std::path::Path;
#[async_trait]
pub trait CircuitsHandler: Sync + Send {
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
}
#[derive(Clone, Copy)]
pub(crate) enum Phase {
EuclidV2,
}
impl Phase {
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_exe = workspace_path.join("chunk/app.vmexe");
let path_app_config = workspace_path.join("chunk/openvm.toml");
let segment_len = Some((1 << 22) - 100);
ProverConfig {
dir_cache,
path_app_config,
path_app_exe,
segment_len,
..Default::default()
}
}
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_exe = workspace_path.join("batch/app.vmexe");
let path_app_config = workspace_path.join("batch/openvm.toml");
let segment_len = Some((1 << 22) - 100);
ProverConfig {
dir_cache,
path_app_config,
path_app_exe,
segment_len,
..Default::default()
}
}
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_config = workspace_path.join("bundle/openvm.toml");
let segment_len = Some((1 << 22) - 100);
match self {
Phase::EuclidV2 => ProverConfig {
dir_cache,
path_app_config,
segment_len,
path_app_exe: workspace_path.join("bundle/app.vmexe"),
..Default::default()
},
}
}
}

View File

@@ -1,13 +1,11 @@
use std::{path::Path, sync::Arc};
use super::{euclid::Phase, CircuitsHandler};
use anyhow::{anyhow, Result};
use super::{CircuitsHandler, Phase};
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover_euclid::{
task::{batch::BatchProvingTask, bundle::BundleProvingTask, chunk::ChunkProvingTask},
BatchProver, BundleProverEuclidV2, ChunkProver,
};
use scroll_zkvm_prover_euclid::{BatchProver, BundleProverEuclidV2, ChunkProver};
use scroll_zkvm_types::ProvingTask;
use tokio::sync::Mutex;
pub struct EuclidV2Handler {
chunk_prover: ChunkProver,
@@ -50,30 +48,26 @@ impl CircuitsHandler for Arc<Mutex<EuclidV2Handler>> {
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.proof_type {
ProofType::Chunk => {
let task: ChunkProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().chunk_prover.gen_proof(&task)?;
let u_task: ProvingTask = serde_json::from_str(&prove_request.input)?;
Ok(serde_json::to_string(&proof)?)
}
ProofType::Batch => {
let task: BatchProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().batch_prover.gen_proof(&task)?;
Ok(serde_json::to_string(&proof)?)
}
ProofType::Bundle => {
let batch_proofs: BundleProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self
.try_lock()
.unwrap()
.bundle_prover
.gen_proof_evm(&batch_proofs)?;
Ok(serde_json::to_string(&proof)?)
}
_ => Err(anyhow!("Unsupported proof type")),
}
let proof = match prove_request.proof_type {
ProofType::Chunk => self
.try_lock()
.unwrap()
.chunk_prover
.gen_proof_universal(&u_task, false)?,
ProofType::Batch => self
.try_lock()
.unwrap()
.batch_prover
.gen_proof_universal(&u_task, false)?,
ProofType::Bundle => self
.try_lock()
.unwrap()
.bundle_prover
.gen_proof_universal(&u_task, true)?,
_ => return Err(eyre::eyre!("Unsupported proof type")),
};
Ok(serde_json::to_string(&proof)?)
}
}

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(27), cur)
assert.Equal(t, int64(28), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(27), cur)
assert.Equal(t, int64(28), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(27), version)
assert.Equal(t, int64(28), version)
assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE prover_task
ADD COLUMN metadata BYTEA;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS prover_task
DROP COLUMN IF EXISTS metadata;
-- +goose StatementEnd

View File

@@ -671,6 +671,7 @@ github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
@@ -791,6 +792,7 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
@@ -1403,8 +1405,14 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241005172014-aca0bef21638 h1:2KIfClLB
github.com/scroll-tech/da-codec v0.1.1-0.20241005172014-aca0bef21638/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6ybacXFJ2ti+eFwtf+12Otufod6goxK6/u7Nu1k=
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250313120912-344f2d5e33e1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250609113414-f33adf0904bd h1:NUol+dPtZ8LzLYrP7CPq9tRI0jAhxFxrYNmKYrTQgKE=
github.com/scroll-tech/da-codec v0.1.3-0.20250609113414-f33adf0904bd/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2fRoMl8YWR9LOsZK4RvJnlG3RODgakj5I8VY=
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=

View File

@@ -94,12 +94,14 @@
"propose_interval_milliseconds": 100,
"max_block_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"chunk_timeout_sec": 300
"chunk_timeout_sec": 300,
"max_uncompressed_batch_bytes_size": 4194304
},
"batch_proposer_config": {
"propose_interval_milliseconds": 1000,
"batch_timeout_sec": 300,
"max_chunks_per_batch": 45
"max_chunks_per_batch": 45,
"max_uncompressed_batch_bytes_size": 4194304
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 20,

View File

@@ -15,8 +15,8 @@ require (
github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1
github.com/scroll-tech/go-ethereum v1.10.14-0.20250610090337-00c5c2bd3e65
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0

View File

@@ -285,10 +285,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1 h1:6aKqJSal+QVdB5HMWMs0JTbAIZ6/iAHJx9qizz0w9dU=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250610090337-00c5c2bd3e65 h1:idsnkl5rwVr7eNUB0HxdkKI0L3zbTm8XSGwMTB5ndws=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250610090337-00c5c2bd3e65/go.mod h1:756YMENiSfx/5pCwKq3+uSTWjXuHTbiCB+TirJjsQT8=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -30,17 +30,19 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
}
// BatchProposerConfig loads batch_proposer configuration items.
type BatchProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
}
// BundleProposerConfig loads bundle_proposer configuration items.

View File

@@ -377,7 +377,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
}
var batchesToSubmit []*dbBatchWithChunksAndParent
var batchesToSubmit []*dbBatchWithChunks
for i, dbBatch := range dbBatches {
var dbChunks []*orm.Chunk
var dbParentBatch *orm.Batch
@@ -433,10 +433,9 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunksAndParent{
Batch: dbBatch,
Chunks: dbChunks,
ParentBatch: dbParentBatch,
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
@@ -466,7 +465,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
@@ -477,7 +476,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
txHash, blobBaseFee, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
txHash, blobBaseFee, err := r.commitSender.SendTransaction(r.contextIDFromBatches(codecVersion, batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
if err != nil {
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
@@ -523,28 +522,25 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
func (r *Layer2Relayer) contextIDFromBatches(batches []*dbBatchWithChunksAndParent) string {
contextIDs := []string{"v7"}
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
for _, batch := range batches {
contextIDs = append(contextIDs, batch.Batch.Hash)
}
return strings.Join(contextIDs, "-")
}
func (r *Layer2Relayer) batchHashesFromContextID(contextID string) []string {
if strings.HasPrefix(contextID, "v7-") {
return strings.Split(contextID, "-")[1:]
parts := strings.SplitN(contextID, "-", 2)
if len(parts) == 2 && strings.HasPrefix(parts[0], "v") {
return strings.Split(parts[1], "-")
}
return []string{contextID}
}
type dbBatchWithChunksAndParent struct {
Batch *orm.Batch
Chunks []*orm.Chunk
ParentBatch *orm.Batch
type dbBatchWithChunks struct {
Batch *orm.Batch
Chunks []*orm.Chunk
}
// ProcessPendingBundles submits proof to layer 1 rollup contract
@@ -693,7 +689,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
@@ -898,7 +894,7 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
}
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*dbBatchWithChunksAndParent, firstBatch, lastBatch *orm.Batch) ([]byte, []*kzg4844.Blob, uint64, uint64, error) {
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*dbBatchWithChunks, firstBatch, lastBatch *orm.Batch) ([]byte, []*kzg4844.Blob, uint64, uint64, error) {
var maxBlockHeight uint64
var totalGasUsed uint64
blobs := make([]*kzg4844.Blob, 0, len(batchesToSubmit))
@@ -929,7 +925,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
encodingBatch := &encoding.Batch{
Index: b.Batch.Index,
ParentBatchHash: common.HexToHash(b.ParentBatch.Hash),
ParentBatchHash: common.HexToHash(b.Batch.ParentBatchHash),
PrevL1MessageQueueHash: common.HexToHash(b.Batch.PrevL1MessageQueueHash),
PostL1MessageQueueHash: common.HexToHash(b.Batch.PostL1MessageQueueHash),
Blocks: batchBlocks,

View File

@@ -54,7 +54,7 @@ type BatchProposer struct {
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize)
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
p := &BatchProposer{
ctx: ctx,
@@ -294,16 +294,18 @@ func (p *BatchProposer) proposeBatch() error {
p.recordTimerBatchMetrics(metrics)
if metrics.L1CommitBlobSize > maxBlobSize {
if metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitUncompressedBatchBytesSize > p.cfg.MaxUncompressedBatchBytesSize {
if i == 0 {
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxBlobSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, maxBlobSize)
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, maxBlobSize, p.cfg.MaxUncompressedBatchBytesSize)
}
log.Debug("breaking limit condition in batching",
"l1CommitBlobSize", metrics.L1CommitBlobSize,
"maxBlobSize", maxBlobSize)
"maxBlobSize", maxBlobSize,
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
"maxUncompressedBatchBytesSize", p.cfg.MaxUncompressedBatchBytesSize)
lastChunk := batch.Chunks[len(batch.Chunks)-1]
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]

View File

@@ -72,9 +72,10 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxL2GasPerChunk: 20000000,
ChunkTimeoutSec: 300,
MaxBlockNumPerChunk: 1,
MaxL2GasPerChunk: 20000000,
ChunkTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
@@ -88,8 +89,9 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
cp.TryProposeChunk() // chunk2 contains block2
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: tt.batchTimeoutSec,
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: tt.batchTimeoutSec,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
@@ -152,9 +154,10 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
MaxBlockNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
blockHeight := uint64(0)
@@ -172,8 +175,9 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
for i := 0; i < 2; i++ {
@@ -223,9 +227,10 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
MaxBlockNumPerChunk: math.MaxUint64,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
@@ -238,8 +243,9 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
}
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
bp.TryProposeBatch()
@@ -250,3 +256,102 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
}
func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Add genesis batch
genesisBlock := &encoding.Block{
Header: &gethTypes.Header{
Number: big.NewInt(0),
},
}
genesisChunk := &encoding.Chunk{
Blocks: []*encoding.Block{genesisBlock},
}
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), genesisChunk, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
genesisBatch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{genesisChunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), genesisBatch, encoding.CodecV0, utils.BatchMetrics{})
assert.NoError(t, err)
// Create blocks with large calldata
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
// Create large calldata (3KiB per block)
largeCalldata := make([]byte, 3*1024) // 3KiB calldata
for i := range largeCalldata {
largeCalldata[i] = byte(i % 256)
}
// Modify the block to have a transaction with large calldata
block.Transactions[0].Data = "0x" + common.Bytes2Hex(largeCalldata)
chainConfig := &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
FeynmanTime: new(uint64),
}
// Create chunk proposer with no uncompressed batch bytes limit for chunks
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1, // One block per chunk
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV8, chainConfig, db, nil)
// Insert 2 blocks with large calldata and create 2 chunks
l2BlockOrm := orm.NewL2Block(db)
for i := uint64(1); i <= 2; i++ {
blockCopy := *block
blockCopy.Header = &gethTypes.Header{}
*blockCopy.Header = *block.Header
blockCopy.Header.Number = new(big.Int).SetUint64(i)
blockCopy.Header.Time = i
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{&blockCopy})
assert.NoError(t, err)
cp.TryProposeChunk() // Each call creates one chunk with one block
}
// Create batch proposer with 4KiB uncompressed batch bytes limit
// Each chunk is ~3KiB, so 1 chunk (~3KiB) should fit, but 2 chunks (~6KiB) should exceed limit
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: math.MaxInt32, // No chunk count limit
BatchTimeoutSec: math.MaxUint32, // No timeout limit
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
}, encoding.CodecV8, chainConfig, db, nil)
bp.TryProposeBatch()
// Check that a batch was created
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 2) // genesis batch + 1 new batch
// Verify that the batch contains only 1 chunk (not 2) due to uncompressed batch bytes limit
newBatch := batches[1] // Skip genesis batch
assert.Equal(t, uint64(1), newBatch.StartChunkIndex)
assert.Equal(t, uint64(1), newBatch.EndChunkIndex) // Should only include chunk 1
// Verify that the second chunk is still available for next batch
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 2, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1) // Second chunk should still be available
assert.Equal(t, "", chunks[0].BatchHash) // Should not be assigned to any batch yet
}

View File

@@ -93,14 +93,16 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
MaxBlockNumPerChunk: 1,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
cp.TryProposeChunk() // chunk1 contains block1

View File

@@ -295,17 +295,19 @@ func (p *ChunkProposer) proposeChunk() error {
p.recordTimerChunkMetrics(metrics)
if metrics.L2Gas > p.cfg.MaxL2GasPerChunk || metrics.L1CommitBlobSize > maxBlobSize {
if metrics.L2Gas > p.cfg.MaxL2GasPerChunk || metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitUncompressedBatchBytesSize > p.cfg.MaxUncompressedBatchBytesSize {
if i == 0 {
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxBlobSize: %v", block.Header.Number, metrics, maxBlobSize)
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v", block.Header.Number, metrics, maxBlobSize, p.cfg.MaxUncompressedBatchBytesSize)
}
log.Debug("breaking limit condition in chunking",
"l2Gas", metrics.L2Gas,
"maxL2Gas", p.cfg.MaxL2GasPerChunk,
"l1CommitBlobSize", metrics.L1CommitBlobSize,
"maxBlobSize", maxBlobSize)
"maxBlobSize", maxBlobSize,
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
"maxUncompressedBatchBytesSize", p.cfg.MaxUncompressedBatchBytesSize)
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
chunk.PostL1MessageQueueHash = previousPostL1MessageQueueHash

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
@@ -84,9 +85,10 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxL2GasPerChunk: tt.maxL2Gas,
ChunkTimeoutSec: tt.chunkTimeoutSec,
MaxBlockNumPerChunk: tt.maxBlockNum,
MaxL2GasPerChunk: tt.maxL2Gas,
ChunkTimeoutSec: tt.chunkTimeoutSec,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}, db, nil)
cp.TryProposeChunk()
@@ -128,9 +130,10 @@ func testChunkProposerBlobSizeLimitCodecV7(t *testing.T) {
chainConfig := &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 255,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
MaxBlockNumPerChunk: 255,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
for i := 0; i < 2; i++ {
@@ -153,3 +156,76 @@ func testChunkProposerBlobSizeLimitCodecV7(t *testing.T) {
assert.Equal(t, expected, chunk.EndBlockNumber)
}
}
func testChunkProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
// Create a block with very large calldata to test uncompressed batch bytes limit
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
// Create a transaction with large calldata (around 3KiB)
largeCalldata := make([]byte, 3*1024) // 3KiB calldata
for i := range largeCalldata {
largeCalldata[i] = byte(i % 256)
}
// Modify the block to have a transaction with large calldata
block.Transactions[0].Data = "0x" + common.Bytes2Hex(largeCalldata)
// Insert two identical blocks with large calldata
l2BlockOrm := orm.NewL2Block(db)
for i := uint64(0); i < 2; i++ {
blockCopy := *block
blockCopy.Header = &gethTypes.Header{}
*blockCopy.Header = *block.Header
blockCopy.Header.Number = new(big.Int).SetUint64(i + 1)
blockCopy.Header.Time = i + 1
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{&blockCopy})
assert.NoError(t, err)
}
// Add genesis chunk
chunkOrm := orm.NewChunk(db)
_, err := chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
chainConfig := &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
FeynmanTime: new(uint64),
}
// Set max_uncompressed_batch_bytes_size to 4KiB (4 * 1024)
// One block (~3KiB) should fit, but two blocks (~6KiB) should exceed the limit
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: math.MaxUint64, // No block number limit
MaxL2GasPerChunk: math.MaxUint64, // No gas limit
ChunkTimeoutSec: math.MaxUint32, // No timeout limit
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
}, encoding.CodecV8, chainConfig, db, nil)
// Try to propose chunk
cp.TryProposeChunk()
// Check that a chunk was created
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
// Verify that the chunk contains only 1 block (not 2) due to uncompressed batch bytes limit
chunk := chunks[0]
assert.Equal(t, uint64(1), chunk.StartBlockNumber)
assert.Equal(t, uint64(1), chunk.EndBlockNumber)
// Verify that the second block is still available for next chunk
blockOrm := orm.NewL2Block(db)
blocks, err := blockOrm.GetL2BlocksGEHeight(context.Background(), 2, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 1) // Second block should still be available
}

View File

@@ -103,11 +103,13 @@ func TestFunction(t *testing.T) {
// Run chunk proposer test cases.
t.Run("TestChunkProposerLimitsCodecV7", testChunkProposerLimitsCodecV7)
t.Run("TestChunkProposerBlobSizeLimitCodecV7", testChunkProposerBlobSizeLimitCodecV7)
t.Run("TestChunkProposerUncompressedBatchBytesLimitCodecV8", testChunkProposerUncompressedBatchBytesLimitCodecV8)
// Run batch proposer test cases.
t.Run("TestBatchProposerLimitsCodecV7", testBatchProposerLimitsCodecV7)
t.Run("TestBatchProposerBlobSizeLimitCodecV7", testBatchProposerBlobSizeLimitCodecV7)
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV7", testBatchProposerMaxChunkNumPerBatchLimitCodecV7)
t.Run("TestBatchProposerUncompressedBatchBytesLimitCodecV8", testBatchProposerUncompressedBatchBytesLimitCodecV8)
// Run bundle proposer test cases.
t.Run("TestBundleProposerLimitsCodecV7", testBundleProposerLimitsCodecV7)

View File

@@ -15,7 +15,8 @@ type ChunkMetrics struct {
L2Gas uint64
FirstBlockTimestamp uint64
L1CommitBlobSize uint64
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
// timing metrics
EstimateBlobSizeTime time.Duration
@@ -41,7 +42,7 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
}
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
_, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
return err
})
if err != nil {
@@ -56,7 +57,8 @@ type BatchMetrics struct {
NumChunks uint64
FirstBlockTimestamp uint64
L1CommitBlobSize uint64
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
// timing metrics
EstimateBlobSizeTime time.Duration
@@ -75,7 +77,7 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
}
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
_, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
return err
})
if err != nil {

View File

@@ -4,11 +4,13 @@
"chunk_proposer_config": {
"max_block_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"chunk_timeout_sec": 72000000000
"chunk_timeout_sec": 72000000000,
"max_uncompressed_batch_bytes_size": 4194304
},
"batch_proposer_config": {
"batch_timeout_sec": 72000000000,
"max_chunks_per_batch": 45
"max_chunks_per_batch": 45,
"max_uncompressed_batch_bytes_size": 4194304
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 45,

View File

@@ -6,7 +6,8 @@
"darwinTime": 0,
"darwinV2Time": 0,
"euclidTime": 0,
"euclidV2Time": 0
"euclidV2Time": 0,
"feynmanTime": 0
},
"nonce": "0x0000000000000033",
"timestamp": "0x0",

View File

@@ -118,14 +118,16 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
}
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 300,
MaxBlockNumPerChunk: 100,
MaxL2GasPerChunk: math.MaxUint64,
ChunkTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{

3
rust-toolchain Normal file
View File

@@ -0,0 +1,3 @@
[toolchain]
channel = "nightly-2025-02-14"
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]

View File

@@ -1,37 +1,3 @@
[package]
name = "zkp"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[patch.crates-io]
# patched add rkyv support & MSRV 1.77
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
anyhow = "1.0.86"
[profile.test]
opt-level = 3
[profile.release]
opt-level = 3
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
@@ -63,4 +29,4 @@ p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", t
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }

View File

@@ -1,79 +0,0 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
anyhow = "1.0"
log = "0.4"
env_logger = "0.11.3"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.4.2", package = "scroll-zkvm-prover" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "main", features = [
"openvm",
] }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = [
"scroll",
] }
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = "2.5.4"
serde_bytes = "0.11.15"
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.0" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }

View File

@@ -1,9 +1,9 @@
.PHONY: prover lint tests_binary
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZKVM_VERSION=$(shell ./print_high_zkvm_version.sh)
@@ -40,7 +40,7 @@ prover:
tests_binary:
cargo clean && cargo test --release --no-run
ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
ls ../target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
lint:
cargo check --all-features

View File

@@ -0,0 +1,31 @@
{
"sdk_config": {
"prover_name_prefix": "test-prover",
"keys_dir": ".work",
"coordinator": {
"base_url": "<the url of coordinator>",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 1800
},
"l2geth": {
"endpoint": "<the url of rpc endpoint>"
},
"prover": {
"circuit_type": 2,
"supported_proof_types": [
1,
2,
3
],
"circuit_version": "v0.13.1"
},
"db_path": ".work/db"
},
"circuits": {
"euclidV2": {
"hard_fork_name": "euclidV2",
"workspace_path": ".work"
}
}
}

View File

@@ -1,7 +1,7 @@
#!/bin/bash
set -ue
higher_zkvm_item=`grep "zkvm-prover" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
higher_zkvm_item=`grep "zkvm-prover" ../Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
higher_version=`echo $higher_zkvm_item | awk '{print $1}'`

View File

@@ -1 +0,0 @@
nightly-2024-12-06

View File

@@ -1,15 +0,0 @@
pub mod euclid;
#[allow(non_snake_case)]
pub mod euclidV2;
use anyhow::Result;
use async_trait::async_trait;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
#[async_trait]
pub trait CircuitsHandler: Sync + Send {
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
}