Compare commits

..

14 Commits

Author SHA1 Message Date
colin
9dc57c6126 feat(rollup-relayer): support codecv8 (#1681)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-26 21:17:56 +08:00
Ho
9367565a31 [Refactor] Universal task (#1680)
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-06-25 14:42:51 +08:00
Ho
d2f7663d26 [Refactor] For universal prover task (the update on coordinator) (#1682)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-06-19 14:54:08 +09:00
Morty
b0943b1035 refactor(rollup-relayer): simplify construct commit batch payload (#1673) 2025-06-18 21:37:29 +08:00
Morty
5d6b5a89f4 feat(blob-uploader): blob_upload table add batch hash (#1677) 2025-06-11 18:21:14 +08:00
colin
4ee459a602 fix(gas-oracle & rollup-relayer): blob base fee calculation based on excessBlobGas field (#1676)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-10 17:38:21 +08:00
Morty
276385fd0a feat: add blob storage service (#1672)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-06-10 17:16:16 +08:00
colin
82fb15de3b fix(rollup-relayer): add transaction support to insert l2 blocks (#1674) 2025-06-05 21:27:27 +08:00
colin
5204ad50e0 fix(bridge-history): unclaimed withdrawals (#1671) 2025-05-28 00:54:27 +08:00
colin
f824fb0efc fix(coordinator): remove initialize euclid VKs (#1669) 2025-05-26 15:42:26 +08:00
colin
a55c7bdc77 refactor(coordinator): remove outdated logic (#1668)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-26 14:32:39 +08:00
Alejandro Ranchal-Pedrosa
47b1a037a9 Fix bug sequencer submission strategy and log commit price (#1664)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
2025-05-23 10:02:49 +01:00
Alejandro Ranchal-Pedrosa
ae34020c34 perf(relayer): submission strategy fix logs, use blocktime for submission strategy and log metrics. (#1663)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
Co-authored-by: Jonas Theis <4181434+jonastheis@users.noreply.github.com>
2025-05-22 20:38:10 +08:00
Jonas Theis
fa9fab6e98 fix(relayer): ProcessPendingBatches (#1661)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-05-21 18:23:04 +02:00
125 changed files with 4661 additions and 10259 deletions

View File

@@ -41,7 +41,7 @@ jobs:
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
workspaces: ". -> target"
# - name: Lint
# working-directory: 'common'
# run: |

View File

@@ -112,7 +112,7 @@ jobs:
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
make libzkp
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3

View File

@@ -99,6 +99,51 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
blob_uploader:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: blob-uploader
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: blob-uploader
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/blob_uploader.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
steps:

View File

@@ -38,6 +38,7 @@ jobs:
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
make -C coordinator/internal/logic/libzkp build
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

File diff suppressed because it is too large Load Diff

66
Cargo.toml Normal file
View File

@@ -0,0 +1,66 @@
[workspace]
members = [
"crates/libzkp",
"crates/l2geth",
"crates/libzkp_c",
"crates/prover-bin",
]
resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "29c99de" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "0.11", default-features = false }
alloy-primitives = { version = "0.8", default-features = false }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "0.8", default-features = false }
rkyv = "0.8"
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
serde_with = "3.11.0"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
bincode_v1 = { version = "1.3", package = "bincode"}
snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [
"loader_halo2",
"halo2-axiom",
"display",
] }
once_cell = "1.20"
base64 = "0.22"
#TODO: upgrade when Feynman
vm-zstd = { git = "https://github.com/scroll-tech/rust-zstd-decompressor.git", tag = "v0.1.1" }
[patch.crates-io]
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.18-euclid-upgrade" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-euclid-upgrade" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1

View File

@@ -28,7 +28,7 @@ We welcome community contributions to this repository. Before you submit any iss
## Prerequisites
+ Go 1.21
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
+ Hardhat / Foundry
+ Docker

View File

@@ -10,15 +10,15 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect

View File

@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c h1:IpEBKM6O+xOK2qZVZztGxcobFXkKMb5hAkBEVzfXjVg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626101020-47bc86cd961c/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -154,7 +154,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build blob_uploader
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
# Pull blob_uploader into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/blob_uploader /bin/
WORKDIR /app
ENTRYPOINT ["blob_uploader"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -3,11 +3,13 @@ FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03
WORKDIR app
FROM chef as planner
COPY ./common/libzkp/impl/ .
COPY ./crates ./
COPY ./Cargo.* ./
COPY ./rust-toolchain ./
RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run scripts to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
@@ -17,8 +19,9 @@ COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
COPY ./crates ./
COPY ./Cargo.* ./
RUN cargo build --release -p libzkp-c
# Download Go dependencies
@@ -37,9 +40,9 @@ RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04

View File

@@ -18,6 +18,6 @@ RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
COPY --from=builder /src/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +0,0 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release
fmt:
@cargo fmt --all -- --check
clippy:
@cargo check --all-features
@cargo clippy --release -- -D warnings

View File

@@ -1 +0,0 @@
nightly-2024-12-06

View File

@@ -1,76 +0,0 @@
mod utils;
mod verifier;
use std::path::Path;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
_dump_vk(fork_name, file);
}
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Ok(verifier) = verifier {
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
}
}

View File

@@ -1,27 +0,0 @@
use std::{
ffi::CStr,
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
};
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
}
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_bytes().to_vec()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,12 +0,0 @@
// BatchVerifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
void init(char* config);
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
void dump_vk(char* fork_name, char* file);

4
common/testdata/blobdata.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
}
}
// BlobUploadStatus represents the status of a blob upload
type BlobUploadStatus int
const (
// BlobUploadStatusUndefined indicates an undefined status
BlobUploadStatusUndefined BlobUploadStatus = iota
// BlobUploadStatusPending indicates a pending upload status
BlobUploadStatusPending
// BlobUploadStatusUploaded indicates a successful upload status
BlobUploadStatusUploaded
// BlobUploadStatusFailed indicates a failed upload status
BlobUploadStatusFailed
)
func (s BlobUploadStatus) String() string {
switch s {
case BlobUploadStatusPending:
return "BlobUploadStatusPending"
case BlobUploadStatusUploaded:
return "BlobUploadStatusUploaded"
case BlobUploadStatusFailed:
return "BlobUploadStatusFailed"
default:
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
}
}
// BlobStoragePlatform represents the platform a blob upload to
type BlobStoragePlatform int
const (
// BlobStoragePlatformUndefined indicates an undefined platform
BlobStoragePlatformUndefined BlobStoragePlatform = iota
// BlobStoragePlatformS3 represents AWS S3
BlobStoragePlatformS3
// BlobStoragePlatformArweave represents storage blockchain Arweave
BlobStoragePlatformArweave
)
func (s BlobStoragePlatform) String() string {
switch s {
case BlobStoragePlatformS3:
return "BlobStoragePlatformS3"
case BlobStoragePlatformArweave:
return "BlobStoragePlatformArweave"
default:
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
}
}

View File

@@ -11,7 +11,6 @@ import (
)
const (
EuclidFork = "euclid"
EuclidV2Fork = "euclidV2"
EuclidV2ForkNameForProver = "euclidv2"
@@ -46,7 +45,7 @@ const (
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
@@ -97,7 +96,7 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
@@ -110,7 +109,7 @@ type BatchTaskDetail struct {
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
// use one of the string of EuclidFork / EuclidV2Fork
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`

23
common/utils/blob.go Normal file
View File

@@ -0,0 +1,23 @@
package utils
import (
"crypto/sha256"
"fmt"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
// calculate kzg4844 commitment from blob
commit, err := kzg4844.BlobToCommitment(&blob)
if err != nil {
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
}
// calculate kzg4844 versioned blob hash from blob commitment
hasher := sha256.New()
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
return vh, nil
}

51
common/utils/blob_test.go Normal file
View File

@@ -0,0 +1,51 @@
package utils
import (
"encoding/hex"
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)
type BlobData struct {
VersionedBlobHash string `json:"versionedBlobHash"`
BlobData string `json:"blobData"`
}
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
func TestCalculateVersionedBlobHash(t *testing.T) {
// Read the test data
data, err := os.ReadFile("../testdata/blobdata.json")
if err != nil {
t.Fatalf("Failed to read blobdata.json: %v", err)
}
var blobData BlobData
if err := json.Unmarshal(data, &blobData); err != nil {
t.Fatalf("Failed to parse blobdata.json: %v", err)
}
blobBytes, err := hex.DecodeString(blobData.BlobData)
if err != nil {
t.Fatalf("Failed to decode blob data: %v", err)
}
// Convert []byte to kzg4844.Blob
var blob kzg4844.Blob
copy(blob[:], blobBytes)
// Calculate the hash
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
if err != nil {
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
}
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
if calculatedHash != blobData.VersionedBlobHash {
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.14"
var tag = "v4.5.25"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -23,7 +23,7 @@ var commit = func() string {
return "000000"
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// ZkVersion is commit-id of cargo.lock/zkvm-prover and openvm, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"

View File

@@ -1,3 +1,4 @@
/build/bin
.idea
internal/logic/verifier/lib
internal/libzkp/lib/libzkp.so

View File

@@ -2,25 +2,30 @@
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKVM_VERSION}-${OPENVM_VERSION}
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
$(LIBZKP_PATH):
$(MAKE) -C ./internal/logic/libzkp build
coordinator_api: libzkp ## Builds the Coordinator api instance.
clean_libzkp:
$(MAKE) -C ./internal/logic/libzkp clean
libzkp: clean_libzkp $(LIBZKP_PATH)
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
coordinator_cron:
@@ -29,8 +34,8 @@ coordinator_cron:
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
#coordinator_api_skip_libzkp:
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
mock_coordinator_api: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
@@ -38,14 +43,13 @@ mock_coordinator_api: ## Builds the mocked Coordinator instance.
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
test-verifier: libzkp
test-verifier: $(LIBZKP_PATH)
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
test-gpu-verifier: libzkp
test-gpu-verifier: $(LIBZKP_PATH)
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder

View File

@@ -9,8 +9,8 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7

View File

@@ -177,10 +177,10 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -28,10 +28,16 @@ type ProverManager struct {
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
}
// l2geth client configuration items
type L2Endpoint struct {
Url string `json:"endpoint"`
}
// L2 loads l2geth configuration items.
type L2 struct {
// l2geth chain_id.
ChainID uint64 `json:"chain_id"`
ChainID uint64 `json:"chain_id"`
Endpoint *L2Endpoint `json:"l2geth"`
}
// Auth provides the auth coordinator

View File

@@ -28,6 +28,17 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
// TODO: enable this when the libzkp has been updated
/*l2cfg := cfg.L2.Endpoint
if l2cfg == nil {
panic("l2geth is not specified")
}
l2cfgBytes, err := json.Marshal(l2cfg)
if err != nil {
panic(err)
}
libzkp.InitL2geth(string(l2cfgBytes))*/
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)

View File

@@ -9,7 +9,6 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
@@ -34,7 +33,6 @@ func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *Logi
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
return &LoginLogic{

View File

@@ -0,0 +1,17 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release -p libzkp-c
@mkdir -p lib
@cp -f ../../../../target/release/libzkp.so lib/
fmt:
@cargo fmt --all -- --check
clean:
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
@rm -f lib/libzkp.so
clippy:
@cargo check --release --all-features
@cargo clippy --release -- -D warnings

View File

@@ -20,7 +20,7 @@ function build_test_bins() {
cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
cd $REPO/coordinator/internal/logic/libzkp
}
build_test_bins

View File

@@ -0,0 +1,145 @@
package libzkp
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import (
"fmt"
"os"
"unsafe"
"scroll-tech/common/types/message"
)
// Helper function to convert Go string to C string and handle cleanup
func goToCString(s string) *C.char {
return C.CString(s)
}
// Helper function to free C string
func freeCString(s *C.char) {
C.free(unsafe.Pointer(s))
}
// Initialize the verifier
func InitVerifier(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_verifier(cConfig)
}
// Initialize the verifier
func InitL2geth(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_l2geth(cConfig)
}
// Verify a chunk proof
func VerifyChunkProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_chunk_proof(cProof, cForkName)
return result != 0
}
// Verify a batch proof
func VerifyBatchProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_batch_proof(cProof, cForkName)
return result != 0
}
// Verify a bundle proof
func VerifyBundleProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(forkName)
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_bundle_proof(cProof, cForkName)
return result != 0
}
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, forkName)
}
// Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON)
cMetadata := goToCString(metadata)
defer freeCString(cProofJSON)
defer freeCString(cMetadata)
// Create a C array from Go slice
var cVkData *C.char
if len(vkData) > 0 {
cVkData = (*C.char)(unsafe.Pointer(&vkData[0]))
}
resultPtr := C.gen_wrapped_proof(cProofJSON, cMetadata, cVkData, C.size_t(len(vkData)))
if resultPtr == nil {
return ""
}
// Convert result to Go string and free C memory
result := C.GoString(resultPtr)
C.release_string(resultPtr)
return result
}
// Dumps a verification key to a file
func DumpVk(forkName, filePath string) error {
cForkName := goToCString(forkName)
cFilePath := goToCString(filePath)
defer freeCString(cForkName)
defer freeCString(cFilePath)
// Call the C function to dump the verification key
C.dump_vk(cForkName, cFilePath)
// Check if the file was created successfully
// Note: The C function doesn't return an error code, so we check if the file exists
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return fmt.Errorf("failed to dump verification key: file %s was not created", filePath)
}
return nil
}

View File

@@ -0,0 +1,48 @@
// Verifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
// - Verify a chunk proof
#ifndef LIBZKP_H
#define LIBZKP_H
#include <stddef.h> // For size_t
// Initialize the verifier with configuration
void init_verifier(char* config);
// Initialize the l2geth with configuration
void init_l2geth(char* config);
// Verify proofs - returns non-zero for success, zero for failure
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
// Dump verification key to file
void dump_vk(char* fork_name, char* file);
// The result struct to hold data from handling a proving task
typedef struct {
char ok;
char* universal_task;
char* metadata;
char expected_pi_hash[32];
} HandlingResult;
// Generate a universal task based on task type and input JSON
// Returns a struct containing task data, metadata, and expected proof hash
HandlingResult gen_universal_task(int task_type, char* task, char* fork_name);
// Release memory allocated for a HandlingResult returned by gen_universal_task
void release_task_result(HandlingResult result);
// Generate a wrapped proof from the universal prover output and metadata
// Returns a JSON string containing the wrapped proof, or NULL on error
// The caller must call release_string on the returned pointer when done
char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_len);
// Release memory allocated for a string returned by gen_wrapped_proof
void release_string(char* string_ptr);
#endif /* LIBZKP_H */

View File

@@ -0,0 +1,42 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
"fmt"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
)
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
var metadata interface{}
switch taskType {
case TaskTypeChunk:
metadata = struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}
case TaskTypeBatch:
metadata = struct {
BatchInfo *message.OpenVMBatchInfo `json:"batch_info"`
BatchHash common.Hash `json:"batch_hash"`
}{BatchInfo: &message.OpenVMBatchInfo{}}
case TaskTypeBundle:
metadata = struct {
BundleInfo *message.OpenVMBundleInfo `json:"bundle_info"`
BundlePIHash common.Hash `json:"bundle_pi_hash"`
}{BundleInfo: &message.OpenVMBundleInfo{}}
}
encodeData, err := json.Marshal(metadata)
if err != nil {
fmt.Println("mock encoding json fail:", err)
return false, "", "", nil
}
return true, "UniversalTask data is not parsed", string(encodeData), []byte{0}
}

View File

@@ -0,0 +1,36 @@
//go:build !mock_verifier
package libzkp
/*
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
func generateUniversalTask(taskType int, taskJSON, forkName string) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)
defer freeCString(cTask)
defer freeCString(cForkName)
result := C.gen_universal_task(C.int(taskType), cTask, cForkName)
defer C.release_task_result(result)
// Check if the operation was successful
if result.ok == 0 {
return false, "", "", nil
}
// Convert C strings to Go strings
universalTask := C.GoString(result.universal_task)
metadata := C.GoString(result.metadata)
// Convert C array to Go slice
piHash := make([]byte, 32)
for i := 0; i < 32; i++ {
piHash[i] = byte(result.expected_pi_hash[i])
}
return true, universalTask, metadata, piHash
}

View File

@@ -161,19 +161,31 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch")
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
@@ -233,7 +245,6 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
@@ -266,7 +277,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8:
default:
return taskDetail, nil
}

View File

@@ -159,19 +159,33 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle")
return nil, ErrCoordinatorInternalFailure
}
// bundle proof require snark
taskMsg.UseSnark = true
proverTask.Metadata = metadata
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
@@ -237,7 +251,6 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),

View File

@@ -157,12 +157,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
@@ -170,6 +164,25 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = cp.applyUniversal(taskMsg)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk")
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
@@ -207,7 +220,6 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(taskDetailBytes),

View File

@@ -16,6 +16,7 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -185,6 +186,16 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return &ptc, nil
}
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName)
if !ok {
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
}
schema.TaskData = uTaskData
return schema, []byte(metadata), nil
}
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{

View File

@@ -19,6 +19,8 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
@@ -69,6 +71,10 @@ type ProofReceiverLogic struct {
validateFailureProverTaskStatusNotOk prometheus.Counter
validateFailureProverTaskTimeout prometheus.Counter
validateFailureProverTaskHaveVerifier prometheus.Counter
ChunkTask provertask.ProverTask
BundleTask provertask.ProverTask
BatchTask provertask.ProverTask
}
// NewSubmitProofReceiverLogic create a proof receiver logic
@@ -168,6 +174,15 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
if getHardForkErr != nil {
return ErrGetHardForkNameFailed
}
if proofParameter.Universal {
if len(proverTask.Metadata) == 0 {
return errors.New("can not re-wrapping proof: no metadata has been recorded in advance")
}
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), []byte{})
if proofParameter.Proof == "" {
return errors.New("can not re-wrapping proof, see coordinator log for reason")
}
}
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:

View File

@@ -2,30 +2,23 @@
package verifier
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/
import "C" //nolint:typecheck
import (
"encoding/base64"
"encoding/json"
"io"
"os"
"path"
"unsafe"
"path/filepath"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
)
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `CircuitConfig` in libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
@@ -40,7 +33,7 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
}
}
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
@@ -67,22 +60,13 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return nil, err
}
configStr := C.CString(string(configBytes))
defer func() {
C.free(unsafe.Pointer(configStr))
}()
C.init(configStr)
libzkp.InitVerifier(string(configBytes))
v := &Verifier{
cfg: cfg,
OpenVMVkMap: make(map[string]struct{}),
}
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
return nil, err
}
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
return nil, err
}
@@ -98,15 +82,7 @@ func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName st
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBatchProof(string(buf), forkName), nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
@@ -117,15 +93,8 @@ func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName st
}
log.Info("Start to verify chunk proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_chunk_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyChunkProof(string(buf), forkName), nil
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
@@ -135,20 +104,13 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
return false, err
}
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify bundle proof ...")
verified := C.verify_bundle_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
func (v *Verifier) ReadVK(filePat string) (string, error) {
f, err := os.Open(filepath.Clean(filePat))
if err != nil {
return "", err
}
@@ -161,20 +123,12 @@ func (v *Verifier) readVK(filePat string) (string, error) {
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {
if err := os.Remove(tempFile); err != nil {
log.Error("failed to remove temp file", "err", err)
}
}()
err := libzkp.DumpVk(forkName, tempFile)
if err != nil {
return err
}
forkNameCStr := C.CString(forkName)
defer C.free(unsafe.Pointer(forkNameCStr))
tempFileCStr := C.CString(tempFile)
defer C.free(unsafe.Pointer(tempFileCStr))
C.dump_vk(forkNameCStr, tempFileCStr)
f, err := os.Open(tempFile)
f, err := os.Open(filepath.Clean(tempFile))
if err != nil {
return err
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
@@ -58,25 +58,25 @@ func TestFFI(t *testing.T) {
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *message.OpenVMBatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.OpenVMBatchProof{}
proof := &message.OpenVMBatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *message.OpenVMChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.OpenVMChunkProof{}
proof := &message.OpenVMChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -37,6 +37,7 @@ type ProverTask struct {
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
Metadata []byte `json:"metadata" gorm:"column:metadata;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
// metadata

View File

@@ -4,6 +4,8 @@ package types
type GetTaskParameter struct {
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskTypes []int `form:"task_types" json:"task_types"`
TaskID string `form:"task_id,omitempty" json:"task_id,omitempty"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
}
// GetTaskSchema the schema data return to prover for get prover task
@@ -11,6 +13,7 @@ type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
UseSnark bool `json:"use_snark,omitempty"`
TaskData string `json:"task_data"`
HardForkName string `json:"hard_fork_name"`
}

View File

@@ -2,6 +2,7 @@ package types
import (
"fmt"
"scroll-tech/common/types/message"
)

View File

@@ -7,6 +7,7 @@ type SubmitProofParameter struct {
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
}

View File

@@ -161,7 +161,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}}).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -191,7 +191,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -207,32 +207,33 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
var proof []byte
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBatch:
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
if proofStatus == verifiedFailed {
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}})
if proofStatus != verifiedFailed {
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
} else {
// in "verifiedFailed" status, we purpose the mockprover submit proof but not valid
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
@@ -240,11 +241,12 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
submitProof := types.SubmitProofParameter{
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
Universal: true,
}
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})

26
crates/l2geth/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "l2geth"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = {version = "1", features = ["rt-multi-thread"]}
async-trait = "0.1"
url = ">=2.5.3"
libzkp = { path = "../libzkp" }
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
sbv-primitives = { workspace = true, features = ["scroll"] }
sbv-utils = { workspace = true, features = ["scroll"] }
eyre.workspace = true
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true

19
crates/l2geth/src/lib.rs Normal file
View File

@@ -0,0 +1,19 @@
pub mod rpc_client;
pub use rpc_client::RpcConfig;
use std::sync::{Arc, OnceLock};
static GLOBAL_L2GETH_CLI: OnceLock<Arc<rpc_client::RpcClientCore>> = OnceLock::new();
pub fn init(config: &str) -> eyre::Result<()> {
let cfg: RpcConfig = serde_json::from_str(config)?;
GLOBAL_L2GETH_CLI.get_or_init(|| Arc::new(rpc_client::RpcClientCore::create(&cfg).unwrap()));
Ok(())
}
pub fn get_client() -> rpc_client::RpcClient<'static> {
GLOBAL_L2GETH_CLI
.get()
.expect("must has been inited")
.get_client()
}

View File

@@ -0,0 +1,241 @@
use alloy::{
providers::{Provider, ProviderBuilder, RootProvider},
rpc::client::ClientBuilder,
transports::layers::RetryBackoffLayer,
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::Network;
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {
10
}
fn default_backoff() -> u64 {
100
}
fn default_cups() -> u64 {
100
}
fn default_workers() -> usize {
4
}
fn default_max_concurrency() -> usize {
10
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcConfig {
#[serde(alias = "endpoint")]
pub rpc_url: String,
// The threads used in rt, default 4
#[serde(default = "default_workers")]
pub workers: usize,
// The blocking threads to handle rpc tasks, default 10
#[serde(default = "default_max_concurrency")]
pub max_concurrency: usize,
// Retry parameters
#[serde(default = "default_max_retry")]
pub max_retry: u32,
// backoff duration in milliseconds, default 100ms
#[serde(default = "default_backoff")]
pub backoff: u64,
// compute units per second: default 100
#[serde(default = "default_cups")]
pub cups: u64,
}
/// An rpc client prover which carrying async runtime,
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
pub struct RpcClientCore {
/// rpc prover
provider: RootProvider<Network>,
rt: tokio::runtime::Runtime,
}
#[derive(Clone, Copy)]
pub struct RpcClient<'a> {
provider: &'a RootProvider<Network>,
handle: &'a tokio::runtime::Handle,
}
impl RpcClientCore {
pub fn create(config: &RpcConfig) -> Result<Self> {
let rpc = url::Url::parse(&config.rpc_url)?;
tracing::info!("Using RPC: {}", rpc);
// note we MUST use multi rt since we have no a main thread for driving
// for each call in our method we can acquire a handle of the rt to resolve one or more
// async tasks
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(config.workers)
.max_blocking_threads(config.max_concurrency)
.enable_all()
.build()?;
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
Ok(Self {
provider: ProviderBuilder::<_, _, Network>::default().on_client(client),
rt,
})
}
pub fn get_client(&self) -> RpcClient {
RpcClient {
provider: &self.provider,
handle: self.rt.handle(),
}
}
}
impl ChunkInterpreter for RpcClient<'_> {
fn try_fetch_block_witness(
&self,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
async fn fetch_witness_async(
provider: &RootProvider<Network>,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
use alloy::network::primitives::BlockTransactionsKind;
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
let chain_id = provider.get_chain_id().await?;
let block = provider
.get_block_by_hash(block_hash, BlockTransactionsKind::Full)
.await?
.ok_or_else(|| eyre::eyre!("Block not found"))?;
let number = block.header.number;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let prev_state_root = if let Some(witness) = prev_witness {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
} else {
provider
.scroll_disk_root((number - 1).into())
.await?
.disk_root
};
let witness = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?)
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
.prev_state_root(prev_state_root)
.build()?;
Ok(witness)
}
tracing::debug!("fetch witness for {block_hash}");
self.handle
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
}
fn try_fetch_storage_node(
&self,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
async fn fetch_storage_node_async(
provider: &RootProvider<Network>,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
let ret = provider
.client()
.request::<_, sbv_primitives::Bytes>("debug_dbGet", (node_hash,))
.await?;
Ok(ret)
}
tracing::debug!("fetch storage node for {node_hash}");
self.handle
.block_on(fetch_storage_node_async(self.provider, node_hash))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy::primitives::hex;
use sbv_primitives::B256;
use std::env;
fn create_config_from_env() -> RpcConfig {
let endpoint =
env::var("L2GETH_ENDPOINT").expect("L2GETH_ENDPOINT environment variable must be set");
let config_json = format!(r#"{{"endpoint": "{}"}}"#, endpoint);
serde_json::from_str(&config_json).expect("Failed to parse RPC config")
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_block_witness() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// latest - 1 block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let wit1 = client
.try_fetch_block_witness(block_hash, None)
.expect("should success");
// latest block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
)
.unwrap(),
);
let wit2 = client
.try_fetch_block_witness(block_hash, Some(&wit1))
.expect("should success");
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_storage_node() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// the root node (state root) of the block in unittest above
let node_hash = B256::from(
hex::const_decode_to_array(
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let node = client
.try_fetch_storage_node(node_hash)
.expect("should success");
println!("{}", serde_json::to_string_pretty(&node).unwrap());
}
}

23
crates/libzkp/Cargo.toml Normal file
View File

@@ -0,0 +1,23 @@
[package]
name = "libzkp"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-verifier-euclid.workspace = true
sbv-primitives.workspace = true
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true
eyre.workspace = true
git-version = "0.3.5"
serde_stacker = "0.1"
regex = "1.11"
c-kzg = { version = "1.0", features = ["serde"] }

121
crates/libzkp/src/lib.rs Normal file
View File

@@ -0,0 +1,121 @@
pub mod proofs;
pub mod tasks;
pub mod verifier;
pub use verifier::{TaskType, VerifierConfig};
mod utils;
use sbv_primitives::B256;
use scroll_zkvm_types::util::vec_as_base64;
use serde::{Deserialize, Serialize};
use serde_json::value::RawValue;
use std::path::Path;
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
/// Turn the coordinator's chunk task into a json string for formal chunk proving
/// task (with full witnesses)
pub fn checkout_chunk_task(
task_json: &str,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<String> {
let chunk_task = serde_json::from_str::<tasks::ChunkTask>(task_json)?;
let ret = serde_json::to_string(&tasks::ChunkProvingTask::try_from_with_interpret(
chunk_task,
interpreter,
)?)?;
Ok(ret)
}
/// Generate required staff for proving tasks
pub fn gen_universal_task(
task_type: i32,
task_json: &str,
fork_name: &str,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
use proofs::*;
use tasks::*;
/// Wrapper for metadata
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(untagged)]
enum AnyMetaData {
Chunk(ChunkProofMetadata),
Batch(BatchProofMetadata),
Bundle(BundleProofMetadata),
}
let (pi_hash, metadata, u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) =
gen_universal_chunk_task(task, fork_name.into(), interpreter)?;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {
let task = serde_json::from_str::<BatchProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name.into())?;
(pi_hash, AnyMetaData::Batch(metadata), u_task)
}
x if x == TaskType::Bundle as i32 => {
let task = serde_json::from_str::<BundleProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name.into())?;
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
}
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
};
Ok((
pi_hash,
serde_json::to_string(&metadata)?,
serde_json::to_string(&u_task)?,
))
}
/// helper to rearrange the proof return by universal prover into corresponding wrapped proof
pub fn gen_wrapped_proof(proof_json: &str, metadata: &str, vk: &[u8]) -> eyre::Result<String> {
#[derive(Serialize)]
struct RearrangeWrappedProofJson<'a> {
#[serde(borrow)]
pub metadata: &'a RawValue,
#[serde(borrow)]
pub proof: &'a RawValue,
#[serde(with = "vec_as_base64", default)]
pub vk: Vec<u8>,
pub git_version: String,
}
let re_arrange = RearrangeWrappedProofJson {
metadata: serde_json::from_str(metadata)?,
proof: serde_json::from_str(proof_json)?,
vk: vk.to_vec(),
git_version: utils::short_git_version(),
};
let ret = serde_json::to_string(&re_arrange)?;
Ok(ret)
}
/// init verifier
pub fn verifier_init(config: &str) -> eyre::Result<()> {
let cfg: VerifierConfig = serde_json::from_str(config)?;
verifier::init(cfg);
Ok(())
}
/// verify proof
pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyre::Result<bool> {
let verifier = verifier::get_verifier(fork_name)?;
let ret = verifier.verify(task_type, proof)?;
Ok(ret)
}
/// dump vk
pub fn dump_vk(fork_name: &str, file: &str) -> eyre::Result<()> {
let verifier = verifier::get_verifier(fork_name)?;
verifier.dump_vk(Path::new(file));
Ok(())
}

344
crates/libzkp/src/proofs.rs Normal file
View File

@@ -0,0 +1,344 @@
use std::path::Path;
use crate::utils::short_git_version;
use eyre::Result;
use sbv_primitives::B256;
use scroll_zkvm_types::{
batch::BatchInfo,
bundle::BundleInfo,
chunk::ChunkInfo,
proof::{EvmProof, OpenVmEvmProof, ProofEnum, RootProof},
public_inputs::{ForkName, MultiVersionPublicInputs},
types_agg::{AggregationInput, ProgramCommitment},
util::vec_as_base64,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
/// A wrapper around the actual inner proof.
#[derive(Clone, Serialize, Deserialize)]
pub struct WrappedProof<Metadata> {
/// Generic metadata carried by a proof.
pub metadata: Metadata,
/// The inner proof, either a [`RootProof`] or [`EvmProof`] depending on the
/// [`crate::ProverType`].
pub proof: ProofEnum,
/// Represents the verifying key in serialized form. The purpose of including the verifying key
/// along with the proof is to allow a verifier-only mode to identify the source of proof
/// generation.
///
/// For [`RootProof`] the verifying key is denoted by the digest of the VM's program.
///
/// For [`EvmProof`] its the raw bytes of the halo2 circuit's `VerifyingKey`.
///
/// We encode the vk in base64 format during JSON serialization.
#[serde(with = "vec_as_base64", default)]
pub vk: Vec<u8>,
/// Represents the git ref for `zkvm-prover` that was used to construct the proof.
///
/// This is useful for debugging.
pub git_version: String,
}
pub trait AsRootProof {
fn as_root_proof(&self) -> &RootProof;
}
pub trait AsEvmProof {
fn as_evm_proof(&self) -> &EvmProof;
}
pub trait IntoEvmProof {
fn into_evm_proof(self) -> OpenVmEvmProof;
}
/// Alias for convenience.
pub type ChunkProof = WrappedProof<ChunkProofMetadata>;
/// Alias for convenience.
pub type BatchProof = WrappedProof<BatchProofMetadata>;
/// Alias for convenience.
pub type BundleProof = WrappedProof<BundleProofMetadata>;
impl AsRootProof for ChunkProof {
fn as_root_proof(&self) -> &RootProof {
self.proof
.as_root_proof()
.expect("batch proof use root proof")
}
}
impl AsRootProof for BatchProof {
fn as_root_proof(&self) -> &RootProof {
self.proof
.as_root_proof()
.expect("batch proof use root proof")
}
}
impl AsEvmProof for BundleProof {
fn as_evm_proof(&self) -> &EvmProof {
self.proof
.as_evm_proof()
.expect("bundle proof use evm proof")
}
}
impl IntoEvmProof for BundleProof {
fn into_evm_proof(self) -> OpenVmEvmProof {
self.proof
.as_evm_proof()
.expect("bundle proof use evm proof")
.clone()
.into()
}
}
/// Trait to enable operations in metadata
pub trait ProofMetadata: Serialize + DeserializeOwned + std::fmt::Debug {
type PublicInputs: MultiVersionPublicInputs;
fn pi_hash_info(&self) -> &Self::PublicInputs;
fn new_proof<P: Into<ProofEnum>>(self, proof: P, vk: Option<&[u8]>) -> WrappedProof<Self> {
WrappedProof {
metadata: self,
proof: proof.into(),
vk: vk.map(Vec::from).unwrap_or_default(),
git_version: short_git_version(),
}
}
}
pub trait PersistableProof: Sized {
/// Read and deserialize the proof.
fn from_json<P: AsRef<Path>>(path_proof: P) -> Result<Self>;
/// Serialize the proof and dumping at the given path.
fn dump<P: AsRef<Path>>(&self, path_proof: P) -> Result<()>;
}
/// Metadata attached to [`ChunkProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ChunkProofMetadata {
/// The chunk information describing the list of blocks contained within the chunk.
pub chunk_info: ChunkInfo,
}
impl ProofMetadata for ChunkProofMetadata {
type PublicInputs = ChunkInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.chunk_info
}
}
/// Metadata attached to [`BatchProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BatchProofMetadata {
/// The batch information describing the list of chunks.
pub batch_info: BatchInfo,
/// The [`scroll_zkvm_types::batch::BatchHeader`]'s digest.
pub batch_hash: B256,
}
impl ProofMetadata for BatchProofMetadata {
type PublicInputs = BatchInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.batch_info
}
}
/// Metadata attached to [`BundleProof`].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BundleProofMetadata {
/// The bundle information describing the list of batches to be finalised on-chain.
pub bundle_info: BundleInfo,
/// The public-input digest for the bundle.
pub bundle_pi_hash: B256,
}
impl ProofMetadata for BundleProofMetadata {
type PublicInputs = BundleInfo;
fn pi_hash_info(&self) -> &Self::PublicInputs {
&self.bundle_info
}
}
impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
fn from(value: &WrappedProof<Metadata>) -> Self {
Self {
public_values: value.proof.public_values(),
commitment: ProgramCommitment::deserialize(&value.vk),
}
}
}
impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn sanity_check(&self, fork_name: ForkName) {
let proof_pi = self.proof.public_values();
let expected_pi = self
.metadata
.pi_hash_info()
.pi_hash_by_fork(fork_name)
.0
.as_ref()
.iter()
.map(|&v| v as u32)
.collect::<Vec<_>>();
assert_eq!(
expected_pi, proof_pi,
"pi mismatch: expected={expected_pi:?}, found={proof_pi:?}"
);
}
}
impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
fn from_json<P: AsRef<Path>>(path_proof: P) -> Result<Self> {
crate::utils::read_json_deep(path_proof)
}
fn dump<P: AsRef<Path>>(&self, path_proof: P) -> Result<()> {
crate::utils::write_json(path_proof, &self)
}
}
#[cfg(test)]
mod tests {
use base64::{prelude::BASE64_STANDARD, Engine};
use sbv_primitives::B256;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleInfoV1},
proof::EvmProof,
public_inputs::PublicInputs,
};
use super::*;
#[test]
fn test_roundtrip() -> eyre::Result<()> {
macro_rules! assert_roundtrip {
($fd:expr, $proof:ident) => {
let proof_str_expected =
std::fs::read_to_string(std::path::Path::new("./testdata").join($fd))?;
let proof = serde_json::from_str::<$proof>(&proof_str_expected)?;
let proof_str_got = serde_json::to_string(&proof)?;
assert_eq!(proof_str_got, proof_str_expected);
};
}
assert_roundtrip!("chunk-proof.json", ChunkProof);
assert_roundtrip!("batch-proof.json", BatchProof);
assert_roundtrip!("bundle-proof.json", BundleProof);
Ok(())
}
#[test]
fn test_dummy_proof() -> eyre::Result<()> {
// 1. Metadata
let metadata = {
let bundle_info: BundleInfoV1 = BundleInfo {
chain_id: 12345,
num_batches: 12,
prev_state_root: B256::repeat_byte(1),
prev_batch_hash: B256::repeat_byte(2),
post_state_root: B256::repeat_byte(3),
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
}
.into();
let bundle_pi_hash = bundle_info.pi_hash();
BundleProofMetadata {
bundle_info: bundle_info.0,
bundle_pi_hash,
}
};
// 2. Proof
let (proof, proof_base64) = {
let proof = std::iter::empty()
.chain(std::iter::repeat_n(1, 1))
.chain(std::iter::repeat_n(2, 2))
.chain(std::iter::repeat_n(3, 3))
.chain(std::iter::repeat_n(4, 4))
.chain(std::iter::repeat_n(5, 5))
.chain(std::iter::repeat_n(6, 6))
.chain(std::iter::repeat_n(7, 7))
.chain(std::iter::repeat_n(8, 8))
.chain(std::iter::repeat_n(9, 9))
.collect::<Vec<u8>>();
let proof_base64 = BASE64_STANDARD.encode(&proof);
(proof, proof_base64)
};
// 3. Instances
let (instances, instances_base64) = {
// LE: [0x56, 0x34, 0x12, 0x00, 0x00, ..., 0x00]
// LE: [0x32, 0x54, 0x76, 0x98, 0x00, ..., 0x00]
let instances = std::iter::empty()
.chain(std::iter::repeat_n(0x00, 29))
.chain(std::iter::once(0x12))
.chain(std::iter::once(0x34))
.chain(std::iter::once(0x56))
.chain(std::iter::repeat_n(0x00, 28))
.chain(std::iter::once(0x98))
.chain(std::iter::once(0x76))
.chain(std::iter::once(0x54))
.chain(std::iter::once(0x32))
.collect::<Vec<u8>>();
let instances_base64 = BASE64_STANDARD.encode(&instances);
(instances, instances_base64)
};
// 4. VK
let (vk, vk_base64) = {
let vk = std::iter::empty()
.chain(std::iter::repeat_n(1, 9))
.chain(std::iter::repeat_n(2, 8))
.chain(std::iter::repeat_n(3, 7))
.chain(std::iter::repeat_n(4, 6))
.chain(std::iter::repeat_n(5, 5))
.chain(std::iter::repeat_n(6, 4))
.chain(std::iter::repeat_n(7, 3))
.chain(std::iter::repeat_n(8, 2))
.chain(std::iter::repeat_n(9, 1))
.collect::<Vec<u8>>();
let vk_base64 = BASE64_STANDARD.encode(&vk);
(vk, vk_base64)
};
let evm_proof = EvmProof { instances, proof };
let bundle_proof = metadata.new_proof(evm_proof, Some(vk.as_slice()));
let bundle_proof_json = serde_json::to_value(&bundle_proof)?;
assert_eq!(
bundle_proof_json.get("proof").unwrap(),
&serde_json::json!({
"proof": proof_base64,
"instances": instances_base64,
}),
);
assert_eq!(
bundle_proof_json.get("vk").unwrap(),
&serde_json::Value::String(vk_base64),
);
let bundle_proof_de = serde_json::from_value::<BundleProof>(bundle_proof_json)?;
assert_eq!(
bundle_proof_de.proof.as_evm_proof(),
bundle_proof.proof.as_evm_proof()
);
assert_eq!(bundle_proof_de.vk, bundle_proof.vk);
Ok(())
}
}

View File

@@ -0,0 +1,76 @@
pub mod batch;
pub mod bundle;
pub mod chunk;
pub mod chunk_interpreter;
pub use batch::BatchProvingTask;
pub use bundle::BundleProvingTask;
pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use chunk_interpreter::{DummyInterpreter, TryFromWithInterpreter};
use sbv_primitives::B256;
use scroll_zkvm_types::{
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs},
};
/// Generate required staff for chunk proving
pub fn gen_universal_chunk_task(
mut task: ChunkProvingTask,
fork_name: ForkName,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
let chunk_info = if let Some(interpreter) = interpreter {
ChunkInfo::try_from_with_interpret(&mut task, interpreter)
} else {
ChunkInfo::try_from_with_interpret(&mut task, DummyInterpreter {})
}?;
let proving_task = task.try_into()?;
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
ChunkProofMetadata { chunk_info },
proving_task,
))
}
/// Generate required staff for batch proving
pub fn gen_universal_batch_task(
task: BatchProvingTask,
fork_name: ForkName,
) -> eyre::Result<(B256, BatchProofMetadata, ProvingTask)> {
let batch_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = batch_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
BatchProofMetadata {
batch_info,
batch_hash: expected_pi_hash,
},
proving_task,
))
}
/// Generate required staff for bundle proving
pub fn gen_universal_bundle_task(
task: BundleProvingTask,
fork_name: ForkName,
) -> eyre::Result<(B256, BundleProofMetadata, ProvingTask)> {
let bundle_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = bundle_info.pi_hash_by_fork(fork_name);
Ok((
expected_pi_hash,
BundleProofMetadata {
bundle_info,
bundle_pi_hash: expected_pi_hash,
},
proving_task,
))
}

View File

@@ -0,0 +1,253 @@
use crate::proofs::ChunkProof;
use c_kzg::Bytes48;
use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, EnvelopeV6, EnvelopeV7,
PointEvalWitness, ReferenceHeader, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
mod utils;
use utils::{base64, point_eval};
/// Define variable batch header type, since BatchHeaderV6 can not
/// be decoded as V7 we can always has correct deserialization
/// Notice: V6 header MUST be put above V7 since untagged enum
/// try to decode each defination in order
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(untagged)]
pub enum BatchHeaderV {
V6(BatchHeaderV6),
V7(BatchHeaderV7),
}
impl From<BatchHeaderV> for ReferenceHeader {
fn from(value: BatchHeaderV) -> Self {
match value {
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
}
}
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7(h) => h.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7(h) => h,
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
}
}
}
/// Defines a proving task for batch proof generation, the format
/// is compatible with both pre-euclidv2 and euclidv2
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BatchProvingTask {
/// Chunk proofs for the contiguous list of chunks within the batch.
pub chunk_proofs: Vec<ChunkProof>,
/// The [`BatchHeaderV6/V7`], as computed on-chain for this batch.
pub batch_header: BatchHeaderV,
/// The bytes encoding the batch data that will finally be published on-chain in the form of an
/// EIP-4844 blob.
#[serde(with = "base64")]
pub blob_bytes: Vec<u8>,
/// Challenge digest computed using the blob's bytes and versioned hash.
pub challenge_digest: Option<U256>,
/// KZG commitment for the blob.
pub kzg_commitment: Option<Bytes48>,
/// KZG proof.
pub kzg_proof: Option<Bytes48>,
/// fork version specify, for sanity check with batch_header and chunk proof
pub fork_name: String,
}
impl TryFrom<BatchProvingTask> for ProvingTask {
type Error = eyre::Error;
fn try_from(value: BatchProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
Ok(ProvingTask {
identifier: value.batch_header.batch_hash().to_string(),
fork_name: value.fork_name,
aggregated_proofs: value
.chunk_proofs
.into_iter()
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.into_vec()],
vk: Vec::new(),
})
}
}
impl BatchProvingTask {
fn build_guest_input(&self) -> BatchWitness {
let fork_name = self.fork_name.to_lowercase().as_str().into();
// calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
let versioned_hash = point_eval::get_versioned_hash(&commitment);
let challenge_digest = match &self.batch_header {
BatchHeaderV::V6(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
EnvelopeV6::from(self.blob_bytes.as_slice()).challenge_digest(versioned_hash)
}
BatchHeaderV::V7(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
EnvelopeV7::from(padded_blob_bytes.as_slice()).challenge_digest(versioned_hash)
}
};
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
};
if let Some(k) = self.kzg_commitment {
assert_eq!(k, kzg_commitment);
}
if let Some(c) = self.challenge_digest {
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
}
if let Some(p) = self.kzg_proof {
assert_eq!(p, kzg_proof);
}
let point_eval_witness = PointEvalWitness {
kzg_commitment: kzg_commitment.into_inner(),
kzg_proof: kzg_proof.into_inner(),
};
let reference_header = self.batch_header.clone().into();
BatchWitness {
fork_name,
chunk_proofs: self.chunk_proofs.iter().map(|proof| proof.into()).collect(),
chunk_infos: self
.chunk_proofs
.iter()
.map(|p| p.metadata.chunk_info.clone())
.collect(),
blob_bytes: self.blob_bytes.clone(),
reference_header,
point_eval_witness,
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
let (parent_state_root, state_root, chain_id, withdraw_root) = (
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.chain_id,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.withdraw_root,
);
let (parent_batch_hash, prev_msg_queue_hash, post_msg_queue_hash) = match self.batch_header
{
BatchHeaderV::V6(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
(h.parent_batch_hash, Default::default(), Default::default())
}
BatchHeaderV::V7(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
(
h.parent_batch_hash,
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_msg_queue_hash,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_msg_queue_hash,
)
}
};
let batch_hash = self.batch_header.batch_hash();
Ok(BatchInfo {
parent_state_root,
parent_batch_hash,
state_root,
batch_hash,
chain_id,
withdraw_root,
prev_msg_queue_hash,
post_msg_queue_hash,
})
}
}

View File

@@ -0,0 +1,77 @@
pub mod base64 {
use base64::prelude::*;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> {
let base64 = BASE64_STANDARD.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
BASE64_STANDARD
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}
pub mod point_eval {
use c_kzg;
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
use scroll_zkvm_types::util::sha256_rv32;
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.
///
/// For every 32-bytes chunk in the blob, the most-significant byte is set to 0 while the other
/// 31 bytes are copied from the provided blob-envelope.
pub fn to_blob(envelope_bytes: &[u8]) -> c_kzg::Blob {
let mut blob_bytes = [0u8; c_kzg::BYTES_PER_BLOB];
assert!(
envelope_bytes.len()
<= c_kzg::FIELD_ELEMENTS_PER_BLOB * (c_kzg::BYTES_PER_FIELD_ELEMENT - 1),
"too many bytes in blob envelope",
);
for (i, &byte) in envelope_bytes.iter().enumerate() {
blob_bytes[(i / 31) * 32 + 1 + (i % 31)] = byte;
}
c_kzg::Blob::new(blob_bytes)
}
/// Get the KZG commitment from an EIP-4844 blob.
pub fn blob_to_kzg_commitment(blob: &c_kzg::Blob) -> c_kzg::KzgCommitment {
c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, c_kzg::ethereum_kzg_settings())
.expect("blob to kzg commitment should succeed")
}
/// The version for KZG as per EIP-4844.
const VERSIONED_HASH_VERSION_KZG: u8 = 1;
/// Get the EIP-4844 versioned hash from the KZG commitment.
pub fn get_versioned_hash(commitment: &c_kzg::KzgCommitment) -> H256 {
let mut hash: [u8; 32] = sha256_rv32(commitment.to_bytes().as_slice()).into();
hash[0] = VERSIONED_HASH_VERSION_KZG;
H256::new(hash)
}
/// Get x for kzg proof from challenge hash
pub fn get_x_from_challenge(challenge: H256) -> U256 {
U256::from_be_bytes(challenge.0) % BLS_MODULUS
}
/// Generate KZG proof and evaluation given the blob (polynomial) and a random challenge.
pub fn get_kzg_proof(blob: &c_kzg::Blob, challenge: H256) -> (c_kzg::KzgProof, U256) {
let challenge = get_x_from_challenge(challenge);
let (proof, y) = c_kzg::KzgProof::compute_kzg_proof(
blob,
&c_kzg::Bytes32::new(challenge.to_be_bytes()),
c_kzg::ethereum_kzg_settings(),
)
.expect("kzg proof should succeed");
(proof, U256::from_be_slice(y.as_slice()))
}
}

View File

@@ -0,0 +1,125 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// Message indicating a sanity check failure.
const BUNDLE_SANITY_MSG: &str = "bundle must have at least one batch";
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BundleProvingTask {
pub batch_proofs: Vec<BatchProof>,
/// for sanity check
pub bundle_info: Option<BundleInfo>,
/// Fork name specify
pub fork_name: String,
}
impl BundleProvingTask {
fn identifier(&self) -> String {
assert!(!self.batch_proofs.is_empty(), "{BUNDLE_SANITY_MSG}",);
let (first, last) = (
self.batch_proofs
.first()
.expect(BUNDLE_SANITY_MSG)
.metadata
.batch_hash,
self.batch_proofs
.last()
.expect(BUNDLE_SANITY_MSG)
.metadata
.batch_hash,
);
format!("{first}-{last}")
}
fn build_guest_input(&self) -> BundleWitness {
BundleWitness {
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
batch_infos: self
.batch_proofs
.iter()
.map(|wrapped_proof| wrapped_proof.metadata.batch_info.clone())
.collect(),
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
use eyre::eyre;
let err_prefix = format!("metadata_with_prechecks for task_id={}", self.identifier());
for w in self.batch_proofs.windows(2) {
if w[1].metadata.batch_info.chain_id != w[0].metadata.batch_info.chain_id {
return Err(eyre!("{err_prefix}: chain_id mismatch"));
}
if w[1].metadata.batch_info.parent_state_root != w[0].metadata.batch_info.state_root {
return Err(eyre!("{err_prefix}: state_root not chained"));
}
if w[1].metadata.batch_info.parent_batch_hash != w[0].metadata.batch_info.batch_hash {
return Err(eyre!("{err_prefix}: batch_hash not chained"));
}
}
let (first_batch, last_batch) = (
&self
.batch_proofs
.first()
.expect("at least one batch in bundle")
.metadata
.batch_info,
&self
.batch_proofs
.last()
.expect("at least one batch in bundle")
.metadata
.batch_info,
);
let chain_id = first_batch.chain_id;
let num_batches = u32::try_from(self.batch_proofs.len()).expect("num_batches: u32");
let prev_state_root = first_batch.parent_state_root;
let prev_batch_hash = first_batch.parent_batch_hash;
let post_state_root = last_batch.state_root;
let batch_hash = last_batch.batch_hash;
let withdraw_root = last_batch.withdraw_root;
let msg_queue_hash = last_batch.post_msg_queue_hash;
Ok(BundleInfo {
chain_id,
msg_queue_hash,
num_batches,
prev_state_root,
prev_batch_hash,
post_state_root,
batch_hash,
withdraw_root,
})
}
}
impl TryFrom<BundleProvingTask> for ProvingTask {
type Error = eyre::Error;
fn try_from(value: BundleProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
Ok(ProvingTask {
identifier: value.identifier(),
fork_name: value.fork_name,
aggregated_proofs: value
.batch_proofs
.into_iter()
.map(|w_proof| w_proof.proof.into_root_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
vk: Vec::new(),
})
}
}

View File

@@ -0,0 +1,185 @@
use super::chunk_interpreter::*;
use eyre::Result;
use sbv_primitives::{types::BlockWitness, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// The type aligned with coordinator's defination
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ChunkTask {
/// block hashes for a series of block
pub block_hashes: Vec<B256>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
}
impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
fn try_from_with_interpret(
value: ChunkTask,
interpreter: impl ChunkInterpreter,
) -> Result<Self> {
let mut block_witnesses = Vec::new();
for block_hash in value.block_hashes {
let witness =
interpreter.try_fetch_block_witness(block_hash, block_witnesses.last())?;
block_witnesses.push(witness);
}
Ok(Self {
block_witnesses,
prev_msg_queue_hash: value.prev_msg_queue_hash,
fork_name: value.fork_name,
})
}
}
/// Message indicating a sanity check failure.
const CHUNK_SANITY_MSG: &str = "chunk must have at least one block";
/// Proving task for the [`ChunkCircuit`][scroll_zkvm_chunk_circuit].
///
/// The identifier for a chunk proving task is:
/// - {first_block_number}-{last_block_number}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct ChunkProvingTask {
/// Witnesses for every block in the chunk.
pub block_witnesses: Vec<BlockWitness>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
}
#[derive(Clone, Debug)]
pub struct ChunkDetails {
pub num_blocks: usize,
pub num_txs: usize,
pub total_gas_used: u64,
}
impl TryFrom<ChunkProvingTask> for ProvingTask {
type Error = eyre::Error;
fn try_from(value: ChunkProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
Ok(ProvingTask {
identifier: value.identifier(),
fork_name: value.fork_name,
aggregated_proofs: Vec::new(),
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.to_vec()],
vk: Vec::new(),
})
}
}
impl ChunkProvingTask {
pub fn stats(&self) -> ChunkDetails {
let num_blocks = self.block_witnesses.len();
let num_txs = self
.block_witnesses
.iter()
.map(|b| b.transaction.len())
.sum::<usize>();
let total_gas_used = self
.block_witnesses
.iter()
.map(|b| b.header.gas_used)
.sum::<u64>();
ChunkDetails {
num_blocks,
num_txs,
total_gas_used,
}
}
fn identifier(&self) -> String {
assert!(!self.block_witnesses.is_empty(), "{CHUNK_SANITY_MSG}",);
let (first, last) = (
self.block_witnesses
.first()
.expect(CHUNK_SANITY_MSG)
.header
.number,
self.block_witnesses
.last()
.expect(CHUNK_SANITY_MSG)
.header
.number,
);
format!("{first}-{last}")
}
fn build_guest_input(&self) -> ChunkWitness {
ChunkWitness {
blocks: self.block_witnesses.to_vec(),
prev_msg_queue_hash: self.prev_msg_queue_hash,
fork_name: self.fork_name.to_lowercase().as_str().into(),
}
}
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
self.block_witnesses[0].states.push(node);
}
}
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;
impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
fn try_from_with_interpret(
value: &mut ChunkProvingTask,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<Self> {
use eyre::eyre;
let err_prefix = format!(
"metadata_with_prechecks for task_id={:?}",
value.identifier()
);
if value.block_witnesses.is_empty() {
return Err(eyre!(
"{err_prefix}: chunk should contain at least one block",
));
}
// resume from node missing error and keep executing process
let pattern = r"SparseTrieError\(BlindedNode \{ path: Nibbles\((0x[0-9a-fA-F]+)\), hash: (0x[0-9a-fA-F]+) \}\)";
let err_parse_re = regex::Regex::new(pattern)?;
let mut attempts = 0;
loop {
match execute(&value.build_guest_input()) {
Ok(chunk_info) => return Ok(chunk_info),
Err(e) => {
if let Some(caps) = err_parse_re.captures(&e) {
let hash = caps[2].to_string();
tracing::debug!("missing trie hash {hash}");
attempts += 1;
if attempts >= MAX_FETCH_NODES_ATTEMPTS {
return Err(eyre!(
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
));
}
let node_hash =
hash.parse::<sbv_primitives::B256>().expect("should be hex");
let node = interpreter.try_fetch_storage_node(node_hash)?;
tracing::warn!("missing node fetched: {node}");
value.insert_state(node);
} else {
return Err(eyre!("{err_prefix}: {e}"));
}
}
}
}
}
}

View File

@@ -0,0 +1,26 @@
use eyre::Result;
use sbv_primitives::{types::BlockWitness, Bytes, B256};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data
/// (in rare case) from external
pub trait ChunkInterpreter {
fn try_fetch_block_witness(
&self,
_block_hash: B256,
_prev_witness: Option<&BlockWitness>,
) -> Result<BlockWitness> {
Err(eyre::eyre!("no implement"))
}
fn try_fetch_storage_node(&self, _node_hash: B256) -> Result<Bytes> {
Err(eyre::eyre!("no implement"))
}
}
pub trait TryFromWithInterpreter<T>: Sized {
fn try_from_with_interpret(value: T, intepreter: impl ChunkInterpreter) -> Result<Self>;
}
pub struct DummyInterpreter {}
impl ChunkInterpreter for DummyInterpreter {}

View File

@@ -0,0 +1,53 @@
use std::{
panic::{catch_unwind, AssertUnwindSafe},
path::Path,
};
use git_version::git_version;
use serde::{
de::{Deserialize, DeserializeOwned},
Serialize,
};
use eyre::Result;
const GIT_VERSION: &str = git_version!(args = ["--abbrev=7", "--always"]);
/// Shortened git commit ref from [`scroll_zkvm_prover`].
pub(crate) fn short_git_version() -> String {
let commit_version = GIT_VERSION.split('-').next_back().unwrap();
// Check if use commit object as fallback.
if commit_version.len() < 8 {
commit_version.to_string()
} else {
commit_version[1..8].to_string()
}
}
/// Wrapper to read JSON that might be deeply nested.
pub(crate) fn read_json_deep<P: AsRef<Path>, T: DeserializeOwned>(path: P) -> Result<T> {
let fd = std::fs::File::open(path)?;
let mut deserializer = serde_json::Deserializer::from_reader(fd);
deserializer.disable_recursion_limit();
let deserializer = serde_stacker::Deserializer::new(&mut deserializer);
Ok(Deserialize::deserialize(deserializer)?)
}
/// Serialize the provided type to JSON format and write to the given path.
pub(crate) fn write_json<P: AsRef<Path>, T: Serialize>(path: P, value: &T) -> Result<()> {
let mut writer = std::fs::File::create(path)?;
Ok(serde_json::to_writer(&mut writer, value)?)
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -1,11 +1,8 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use eyre::Result;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
@@ -16,6 +13,16 @@ pub enum TaskType {
Bundle,
}
impl std::fmt::Display for TaskType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Chunk => write!(f, "chunk"),
Self::Batch => write!(f, "batch"),
Self::Bundle => write!(f, "bundle"),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VKDump {
pub chunk_vk: String,
@@ -42,26 +49,14 @@ pub struct VerifierConfig {
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
@@ -70,17 +65,14 @@ pub fn init(config: VerifierConfig) {
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
Err(eyre::eyre!(
"failed to get verifier, key not found, {}",
fork_name
))
}

View File

@@ -1,10 +1,12 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use eyre::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use crate::{
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
utils::panic_catch,
};
use scroll_zkvm_verifier_euclid::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidV2Verifier {
@@ -35,30 +37,29 @@ impl ProofVerifier for EuclidV2Verifier {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
self.chunk_verifier.verify_proof(proof.as_root_proof())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
self.batch_verifier.verify_proof(proof.as_root_proof())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
.verify_proof_evm(&proof.into_evm_proof())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
}
fn dump_vk(&self, file: &Path) {
use base64::{prelude::BASE64_STANDARD, Engine};
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}

View File

@@ -0,0 +1,14 @@
[package]
name = "libzkp-c"
version.workspace = true
edition.workspace = true
[lib]
name = "zkp"
crate-type = ["cdylib"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libzkp = { path = "../libzkp" }
l2geth = { path = "../l2geth"}
tracing.workspace = true

167
crates/libzkp_c/src/lib.rs Normal file
View File

@@ -0,0 +1,167 @@
mod utils;
use std::ffi::{c_char, CString};
use libzkp::TaskType;
use utils::{c_char_to_str, c_char_to_vec};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
let config_str = c_char_to_str(config);
libzkp::verifier_init(config_str).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
let config_str = c_char_to_str(config);
l2geth::init(config_str).unwrap();
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
let proof = c_char_to_vec(proof);
match libzkp::verify_proof(proof, fork_name_str, task_type) {
Err(e) => {
tracing::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let file_str = c_char_to_str(file);
libzkp::dump_vk(fork_name_str, file_str).unwrap();
}
// Define a struct to hold handling results
#[repr(C)]
pub struct HandlingResult {
ok: c_char,
universal_task: *mut c_char,
metadata: *mut c_char,
expected_pi_hash: [c_char; 32],
}
fn failed_handling_result() -> HandlingResult {
HandlingResult {
ok: false as c_char,
universal_task: std::ptr::null_mut(),
metadata: std::ptr::null_mut(),
expected_pi_hash: Default::default(),
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_universal_task(
task_type: i32,
task: *const c_char,
fork_name: *const c_char,
) -> HandlingResult {
let mut interpreter = None;
let task_json = if task_type == TaskType::Chunk as i32 {
let pre_task_str = c_char_to_str(task);
let cli = l2geth::get_client();
match libzkp::checkout_chunk_task(pre_task_str, cli) {
Ok(str) => {
interpreter.replace(cli);
str
}
Err(e) => {
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
return failed_handling_result();
}
}
} else {
c_char_to_str(task).to_string()
};
let ret =
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), interpreter);
if let Ok((pi_hash, task_json, meta_json)) = ret {
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
HandlingResult {
ok: true as c_char,
universal_task: CString::new(task_json).unwrap().into_raw(),
metadata: CString::new(meta_json).unwrap().into_raw(),
expected_pi_hash,
}
} else {
tracing::error!("gen_universal_task failed, error: {:#}", ret.unwrap_err());
failed_handling_result()
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn release_task_result(result: HandlingResult) {
if !result.universal_task.is_null() {
let _ = CString::from_raw(result.universal_task);
}
if !result.metadata.is_null() {
let _ = CString::from_raw(result.metadata);
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_wrapped_proof(
proof: *const c_char,
metadata: *const c_char,
vk: *const c_char,
vk_len: usize,
) -> *mut c_char {
let proof_str = c_char_to_str(proof);
let metadata_str = c_char_to_str(metadata);
let vk_data = std::slice::from_raw_parts(vk as *const u8, vk_len);
match libzkp::gen_wrapped_proof(proof_str, metadata_str, vk_data) {
Ok(result) => CString::new(result).unwrap().into_raw(),
Err(e) => {
tracing::error!("gen_wrapped_proof failed, error: {:#}", e);
std::ptr::null_mut()
}
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
if !ptr.is_null() {
let _ = CString::from_raw(ptr);
}
}

View File

@@ -0,0 +1,11 @@
use std::{ffi::CStr, os::raw::c_char};
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
}
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_bytes().to_vec()
}

View File

@@ -0,0 +1,34 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-prover-euclid.workspace = true
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "refactor/scroll" }
serde.workspace = true
serde_json.workspace = true
once_cell.workspace =true
base64.workspace = true
tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
eyre.workspace = true
futures = "0.3.30"
reqwest = { version = "0.12.4", features = ["gzip"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
hex = "0.4.3"
rand = "0.8.5"
tokio = "1.37.0"
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = "2.5.4"
serde_bytes = "0.11.15"

View File

@@ -0,0 +1,11 @@
## Prover
A runnable zkvm prover which can communicate with coordinator, receving proving task and generate proof
## Testing
+ Get the url of the endpoint of coordinator and a rpc endpoint response to the cooresponding chain
+ Build a `config.json` file with previous knowledge from the template in current directory
+ Call `make test_run`

View File

@@ -26,7 +26,7 @@ struct Args {
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
async fn main() -> eyre::Result<()> {
init_tracing();
let args = Args::parse();
@@ -39,7 +39,10 @@ async fn main() -> anyhow::Result<()> {
let cfg = LocalProverConfig::from_file(args.config_file)?;
let sdk_config = cfg.sdk_config.clone();
let local_prover = LocalProver::new(cfg);
let prover = ProverBuilder::new(sdk_config, local_prover).build().await?;
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
prover.run().await;

View File

@@ -1,8 +1,7 @@
use crate::zk_circuits_handler::{
euclid::EuclidHandler, euclidV2::EuclidV2Handler, CircuitsHandler,
};
use anyhow::{anyhow, Result};
use crate::zk_circuits_handler::{euclidV2::EuclidV2Handler, CircuitsHandler};
use async_trait::async_trait;
use base64::{prelude::BASE64_STANDARD, Engine};
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
@@ -33,7 +32,7 @@ impl LocalProverConfig {
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file(file_name: String) -> Result<Self> {
@@ -69,7 +68,7 @@ impl ProvingService for LocalProver {
let vk = handler.get_vk(*proof_type).await;
if let Some(vk) = vk {
vks.push(base64::encode(vk));
vks.push(BASE64_STANDARD.encode(vk));
}
}
}
@@ -184,9 +183,6 @@ impl LocalProver {
let config = self.config.circuits.get(hard_fork_name).unwrap();
match hard_fork_name {
"euclid" => Arc::new(Arc::new(Mutex::new(EuclidHandler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,
"euclidV2" => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,

View File

@@ -1,11 +1,8 @@
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use scroll_proving_sdk::prover::types::CircuitType;
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
#[serde(rename = "type", default)]
pub task_type: CircuitType,
pub task_data: String,
#[serde(default)]
pub hard_fork_name: String,
@@ -15,7 +12,6 @@ pub struct Task {
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: CircuitType,
pub proof_data: String,
pub error: String,
}

View File

@@ -0,0 +1,67 @@
//pub mod euclid;
#[allow(non_snake_case)]
pub mod euclidV2;
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover_euclid::ProverConfig;
use std::path::Path;
#[async_trait]
pub trait CircuitsHandler: Sync + Send {
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
}
#[derive(Clone, Copy)]
pub(crate) enum Phase {
EuclidV2,
}
impl Phase {
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_exe = workspace_path.join("chunk/app.vmexe");
let path_app_config = workspace_path.join("chunk/openvm.toml");
let segment_len = Some((1 << 22) - 100);
ProverConfig {
dir_cache,
path_app_config,
path_app_exe,
segment_len,
..Default::default()
}
}
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_exe = workspace_path.join("batch/app.vmexe");
let path_app_config = workspace_path.join("batch/openvm.toml");
let segment_len = Some((1 << 22) - 100);
ProverConfig {
dir_cache,
path_app_config,
path_app_exe,
segment_len,
..Default::default()
}
}
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_config = workspace_path.join("bundle/openvm.toml");
let segment_len = Some((1 << 22) - 100);
match self {
Phase::EuclidV2 => ProverConfig {
dir_cache,
path_app_config,
segment_len,
path_app_exe: workspace_path.join("bundle/app.vmexe"),
..Default::default()
},
}
}
}

View File

@@ -1,13 +1,11 @@
use std::{path::Path, sync::Arc};
use super::{euclid::Phase, CircuitsHandler};
use anyhow::{anyhow, Result};
use super::{CircuitsHandler, Phase};
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover_euclid::{
task::{batch::BatchProvingTask, bundle::BundleProvingTask, chunk::ChunkProvingTask},
BatchProver, BundleProverEuclidV2, ChunkProver,
};
use scroll_zkvm_prover_euclid::{BatchProver, BundleProverEuclidV2, ChunkProver};
use scroll_zkvm_types::ProvingTask;
use tokio::sync::Mutex;
pub struct EuclidV2Handler {
chunk_prover: ChunkProver,
@@ -50,30 +48,26 @@ impl CircuitsHandler for Arc<Mutex<EuclidV2Handler>> {
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.proof_type {
ProofType::Chunk => {
let task: ChunkProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().chunk_prover.gen_proof(&task)?;
let u_task: ProvingTask = serde_json::from_str(&prove_request.input)?;
Ok(serde_json::to_string(&proof)?)
}
ProofType::Batch => {
let task: BatchProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().batch_prover.gen_proof(&task)?;
Ok(serde_json::to_string(&proof)?)
}
ProofType::Bundle => {
let batch_proofs: BundleProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self
.try_lock()
.unwrap()
.bundle_prover
.gen_proof_evm(&batch_proofs)?;
Ok(serde_json::to_string(&proof)?)
}
_ => Err(anyhow!("Unsupported proof type")),
}
let proof = match prove_request.proof_type {
ProofType::Chunk => self
.try_lock()
.unwrap()
.chunk_prover
.gen_proof_universal(&u_task, false)?,
ProofType::Batch => self
.try_lock()
.unwrap()
.batch_prover
.gen_proof_universal(&u_task, false)?,
ProofType::Bundle => self
.try_lock()
.unwrap()
.bundle_prover
.gen_proof_universal(&u_task, true)?,
_ => return Err(eyre::eyre!("Unsupported proof type")),
};
Ok(serde_json::to_string(&proof)?)
}
}

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(26), cur)
assert.Equal(t, int64(28), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(26), cur)
assert.Equal(t, int64(28), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(26), version)
assert.Equal(t, int64(28), version)
assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -0,0 +1,32 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE blob_upload (
batch_index BIGINT NOT NULL,
batch_hash VARCHAR NOT NULL,
platform SMALLINT NOT NULL,
status SMALLINT NOT NULL,
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS batch_index_batch_hash_platform_uindex
ON blob_upload(batch_index, batch_hash, platform) WHERE deleted_at IS NULL;
COMMENT ON COLUMN blob_upload.status IS 'undefined, pending, uploaded, failed';
CREATE INDEX IF NOT EXISTS idx_blob_upload_status_platform ON blob_upload(status, platform) WHERE deleted_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_blob_upload_batch_index_batch_hash_status_platform
ON blob_upload(batch_index, batch_hash, status, platform) WHERE deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE blob_upload;
-- +goose StatementEnd

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE prover_task
ADD COLUMN metadata BYTEA;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS prover_task
DROP COLUMN IF EXISTS metadata;
-- +goose StatementEnd

View File

@@ -1,6 +1,4 @@
go 1.22
toolchain go1.22.2
go 1.22.4
use (
./bridge-history-api

View File

@@ -555,12 +555,16 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOC
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
@@ -572,6 +576,7 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY=
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
@@ -654,6 +659,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.14.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU=
@@ -669,7 +675,6 @@ github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPx
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
@@ -684,6 +689,7 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
@@ -714,7 +720,14 @@ github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4=
github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM=
@@ -764,6 +777,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -775,6 +789,7 @@ github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
@@ -799,11 +814,13 @@ github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbI
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ=
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
@@ -822,10 +839,12 @@ github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/Ir
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
@@ -834,7 +853,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0=
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
@@ -854,6 +875,7 @@ github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8=
github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=
@@ -862,6 +884,7 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-chi/chi/v5 v5.0.0 h1:DBPx88FjZJH3FsICfDAfIfnb7XxKIYVGG6lOPlhENAg=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
@@ -873,6 +896,7 @@ github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@@ -913,6 +937,7 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=
@@ -1053,7 +1078,10 @@ github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
@@ -1123,13 +1151,16 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52/go.mod h1:qk1sX/IBgppQNcGCRoj90u6EGC056EBoIc1oEjCWla8=
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY=
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
github.com/kataras/golog v0.1.7/go.mod h1:jOSQ+C5fUqsNSwurB/oAHq1IFSb0KI3l6GMa7xB6dZA=
github.com/kataras/golog v0.1.8/go.mod h1:rGPAin4hYROfk1qT9wZP6VY2rsb4zzc37QpdPjdkqVw=
github.com/kataras/iris/v12 v12.2.0-beta5/go.mod h1:q26aoWJ0Knx/00iPKg5iizDK7oQQSPjbD8np0XDh6dc=
github.com/kataras/iris/v12 v12.2.0/go.mod h1:BLzBpEunc41GbE68OUaQlqX4jzi791mx5HU04uPb90Y=
github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
@@ -1143,9 +1174,11 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfM
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
@@ -1164,9 +1197,11 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
github.com/labstack/echo/v4 v4.2.1 h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQw=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
github.com/labstack/echo/v4 v4.10.0/go.mod h1:S/T/5fy/GigaXnHTkh0ZGe4LpkkQysvRjFMSUTkDRNQ=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=
@@ -1186,6 +1221,7 @@ github.com/lyft/protoc-gen-star/v2 v2.0.3 h1:/3+/2sWyXeMLzKd1bX+ixWKgEMsULrIivpD
github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailgun/raymond/v2 v2.0.46/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@@ -1207,12 +1243,15 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM=
github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4=
github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc=
github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@@ -1265,6 +1304,7 @@ github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk=
@@ -1296,6 +1336,7 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
@@ -1316,15 +1357,24 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
github.com/protolambda/bls12-381-util v0.1.0/go.mod h1:cdkysJTRpeFeuUVx/TXGDQNMTiRAalk1vQw3TYTHcE4=
github.com/protolambda/messagediff v1.4.0/go.mod h1:LboJp0EwIbJsePYpzh5Op/9G1/4mIztMRYzzwR0dR2M=
github.com/protolambda/zrnt v0.32.2/go.mod h1:A0fezkp9Tt3GBLATSPIbuY4ywYESyAuc/FFmPKg8Lqs=
github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU=
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
@@ -1355,9 +1405,14 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241005172014-aca0bef21638 h1:2KIfClLB
github.com/scroll-tech/da-codec v0.1.1-0.20241005172014-aca0bef21638/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6ybacXFJ2ti+eFwtf+12Otufod6goxK6/u7Nu1k=
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250313120912-344f2d5e33e1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250609113414-f33adf0904bd h1:NUol+dPtZ8LzLYrP7CPq9tRI0jAhxFxrYNmKYrTQgKE=
github.com/scroll-tech/da-codec v0.1.3-0.20250609113414-f33adf0904bd/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2fRoMl8YWR9LOsZK4RvJnlG3RODgakj5I8VY=
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
@@ -1370,6 +1425,7 @@ github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPO
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
@@ -1437,6 +1493,7 @@ github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxn
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc=
github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/veraison/go-cose v1.0.0-rc.1 h1:4qA7dbFJGvt7gcqv5MCIyCQvN+NpHFPkW7do3EeDLb8=
@@ -1549,6 +1606,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
@@ -1556,6 +1614,7 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1570,6 +1629,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
@@ -1598,6 +1658,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1637,6 +1698,7 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1658,7 +1720,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
@@ -1718,6 +1779,7 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
@@ -1750,6 +1812,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1800,8 +1863,10 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
@@ -1953,6 +2018,8 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNj
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=

View File

@@ -11,6 +11,7 @@ mock_abi:
rollup_bins: ## Builds the Rollup bins.
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
go build -o $(PWD)/build/bin/blob_uploader ./cmd/blob_uploader/
gas_oracle: ## Builds the gas_oracle bin
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
@@ -18,6 +19,9 @@ gas_oracle: ## Builds the gas_oracle bin
rollup_relayer: ## Builds the rollup_relayer bin
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
blob_uploader: ## Builds the blob_uploader bin
go build -o $(PWD)/build/bin/blob_uploader ./cmd/blob_uploader/
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...

View File

@@ -0,0 +1,94 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/blob_uploader"
)
var app *cli.App
func init() {
// Set up blob-uploader app info.
app = cli.NewApp()
app.Action = action
app.Name = "blob-uploader"
app.Usage = "The Scroll Blob Uploader"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
cancel()
if err = database.CloseDB(db); err != nil {
log.Crit("failed to close db connection", "error", err)
}
}()
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
// sanity check config
if cfg.L2Config.BlobUploaderConfig == nil {
log.Crit("cfg.L2Config.BlobUploaderConfig must not be nil")
}
blobUploader, err := blob_uploader.NewBlobUploader(ctx.Context, db, cfg.L2Config.BlobUploaderConfig, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
// Finish start all blob-uploader functions.
log.Info("Start blob-uploader successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run blob uploader cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/blob_uploader/app"
func main() {
app.Run()
}

View File

@@ -94,16 +94,27 @@
"propose_interval_milliseconds": 100,
"max_block_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"chunk_timeout_sec": 300
"chunk_timeout_sec": 300,
"max_uncompressed_batch_bytes_size": 4194304
},
"batch_proposer_config": {
"propose_interval_milliseconds": 1000,
"batch_timeout_sec": 300,
"max_chunks_per_batch": 45
"max_chunks_per_batch": 45,
"max_uncompressed_batch_bytes_size": 4194304
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 20,
"bundle_timeout_sec": 36000
},
"blob_uploader_config": {
"start_batch": 0,
"aws_s3_config": {
"bucket": "blob-data",
"region": "us-west-2",
"access_key": "ACCESSKEY",
"secret_key": "SECRETKEY"
}
}
},
"db_config": {

View File

@@ -4,6 +4,10 @@ go 1.22
require (
github.com/agiledragon/gomonkey/v2 v2.12.0
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/config v1.29.14
github.com/aws/aws-sdk-go-v2/credentials v1.17.67
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0
github.com/consensys/gnark-crypto v0.16.0
github.com/crate-crypto/go-kzg-4844 v1.1.0
github.com/gin-gonic/gin v1.9.1
@@ -11,8 +15,8 @@ require (
github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7
github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
@@ -22,6 +26,20 @@ require (
require (
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
github.com/aws/smithy-go v1.22.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect

View File

@@ -8,6 +8,42 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 h1:BCG7DCXEXpNCcpwCxg1oi9pkJWH2+eZzTn9MY56MbVw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0 h1:fV4XIU5sn/x8gjRouoJpDVHj+ExJaUk4prYF+eb6qTs=
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0/go.mod h1:qbn305Je/IofWBJ4bJz/Q7pDEtnnoInw/dGt71v6rHE=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@@ -249,10 +285,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1 h1:6aKqJSal+QVdB5HMWMs0JTbAIZ6/iAHJx9qizz0w9dU=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6 h1:vb2XLvQwCf+F/ifP6P/lfeiQrHY6+Yb/E3R4KHXLqSE=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7 h1:1rN1qocsQlOyk1VCpIEF1J5pfQbLAi1pnMZSLQS37jQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250626110859-cc9a1dd82de7/go.mod h1:pDCZ4iGvEGmdIe4aSAGBrb7XSrKEML6/L/wEMmNxOdk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -24,21 +24,25 @@ type L2Config struct {
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
// The bundle_proposer config
BundleProposerConfig *BundleProposerConfig `json:"bundle_proposer_config"`
// The blob_uploader config
BlobUploaderConfig *BlobUploaderConfig `json:"blob_uploader_config"`
}
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
}
// BatchProposerConfig loads batch_proposer configuration items.
type BatchProposerConfig struct {
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
}
// BundleProposerConfig loads bundle_proposer configuration items.
@@ -46,3 +50,17 @@ type BundleProposerConfig struct {
MaxBatchNumPerBundle uint64 `json:"max_batch_num_per_bundle"`
BundleTimeoutSec uint64 `json:"bundle_timeout_sec"`
}
// BlobUploaderConfig loads blob_uploader configuration items.
type BlobUploaderConfig struct {
StartBatch uint64 `json:"start_batch"`
AWSS3Config *AWSS3Config `json:"aws_s3_config"`
}
// AWSS3Config loads s3_uploader configuration items.
type AWSS3Config struct {
Bucket string `json:"bucket"`
Region string `json:"region"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
}

View File

@@ -0,0 +1 @@
package blob_uploader

View File

@@ -0,0 +1,251 @@
package blob_uploader
import (
"context"
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
)
// BlobUploader is responsible for uploading blobs to blob storage services.
type BlobUploader struct {
ctx context.Context
cfg *config.BlobUploaderConfig
s3Uploader *S3Uploader
blobUploadOrm *orm.BlobUpload
batchOrm *orm.Batch
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
metrics *blobUploaderMetrics
}
// NewBlobUploader will return a new instance of BlobUploader
func NewBlobUploader(ctx context.Context, db *gorm.DB, cfg *config.BlobUploaderConfig, reg prometheus.Registerer) (*BlobUploader, error) {
var s3Uploader *S3Uploader
var err error
if cfg.AWSS3Config != nil {
s3Uploader, err = NewS3Uploader(cfg.AWSS3Config)
if err != nil {
return nil, fmt.Errorf("new blob uploader failed, err: %w", err)
}
}
blobUploader := &BlobUploader{
ctx: ctx,
cfg: cfg,
s3Uploader: s3Uploader,
batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
blobUploadOrm: orm.NewBlobUpload(db),
}
blobUploader.metrics = initBlobUploaderMetrics(reg)
return blobUploader, nil
}
func (b *BlobUploader) UploadBlobToS3() {
// skip upload if s3 uploader is not configured
if b.s3Uploader == nil {
return
}
// get un-uploaded batches from database in ascending order by their index.
dbBatch, err := b.GetFirstUnuploadedBatchByPlatform(b.ctx, b.cfg.StartBatch, types.BlobStoragePlatformS3)
if err != nil {
log.Error("Failed to fetch unuploaded batch", "err", err)
return
}
// nothing to do if we don't have any pending batches
if dbBatch == nil {
log.Debug("no pending batches to upload")
return
}
// construct blob
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
blob, err := b.constructBlobCodec(dbBatch)
if err != nil {
log.Error("failed to construct constructBlobCodec payload ", "codecVersion", codecVersion, "batch index", dbBatch.Index, "err", err)
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
if updateErr := b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusFailed); updateErr != nil {
log.Error("failed to update blob upload status to failed", "batch index", dbBatch.Index, "err", updateErr)
}
return
}
// calculate versioned blob hash
versionedBlobHash, err := utils.CalculateVersionedBlobHash(*blob)
if err != nil {
log.Error("failed to calculate versioned blob hash", "batch index", dbBatch.Index, "err", err)
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
// update status to failed
if updateErr := b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusFailed); updateErr != nil {
log.Error("failed to update blob upload status to failed", "batch index", dbBatch.Index, "err", updateErr)
}
return
}
// upload blob data to s3 bucket
key := common.BytesToHash(versionedBlobHash[:]).Hex()
err = b.s3Uploader.UploadData(b.ctx, blob[:], key)
if err != nil {
log.Error("failed to upload blob data to AWS S3", "batch index", dbBatch.Index, "versioned blob hash", key, "err", err)
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
// update status to failed
if updateErr := b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusFailed); updateErr != nil {
log.Error("failed to update blob upload status to failed", "batch index", dbBatch.Index, "err", updateErr)
}
return
}
// update status to uploaded
if err = b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusUploaded); err != nil {
log.Error("failed to update blob upload status to uploaded", "batch index", dbBatch.Index, "err", err)
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
return
}
b.metrics.rollupBlobUploaderUploadToS3SuccessTotal.Inc()
log.Info("Successfully uploaded blob to S3", "batch index", dbBatch.Index, "versioned blob hash", key)
}
func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, error) {
var dbChunks []*orm.Chunk
dbChunks, err := b.chunkOrm.GetChunksInRange(b.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
return nil, fmt.Errorf("failed to get chunks in range: %v", err)
}
// check codec version
for _, dbChunk := range dbChunks {
if dbBatch.CodecVersion != dbChunk.CodecVersion {
return nil, fmt.Errorf("batch codec version is different from chunk codec version, batch index: %d, chunk index: %d, batch codec version: %d, chunk codec version: %d", dbBatch.Index, dbChunk.Index, dbBatch.CodecVersion, dbChunk.CodecVersion)
}
}
chunks := make([]*encoding.Chunk, len(dbChunks))
var allBlocks []*encoding.Block // collect blocks for CodecV7
for i, c := range dbChunks {
blocks, getErr := b.l2BlockOrm.GetL2BlocksInRange(b.ctx, c.StartBlockNumber, c.EndBlockNumber)
if getErr != nil {
return nil, fmt.Errorf("failed to get blocks in range for batch %d: %w", dbBatch.Index, getErr)
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
allBlocks = append(allBlocks, blocks...)
}
var encodingBatch *encoding.Batch
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV0:
return nil, fmt.Errorf("codec version 0 doesn't support blob, batch index: %d", dbBatch.Index)
case encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4, encoding.CodecV5, encoding.CodecV6:
encodingBatch = &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
Chunks: chunks,
}
case encoding.CodecV7:
encodingBatch = &encoding.Batch{
Index: dbBatch.Index,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
Chunks: chunks,
PrevL1MessageQueueHash: common.HexToHash(dbBatch.PrevL1MessageQueueHash),
PostL1MessageQueueHash: common.HexToHash(dbBatch.PostL1MessageQueueHash),
Blocks: allBlocks,
}
default:
return nil, fmt.Errorf("unsupported codec version, batch index: %d, batch codec version: %d", dbBatch.Index, codecVersion)
}
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
}
daBatch, err := codec.NewDABatch(encodingBatch)
if err != nil {
return nil, fmt.Errorf("failed to create DA batch: %w", err)
}
if daBatch.Blob() == nil {
return nil, fmt.Errorf("codec version doesn't support blob, batch index: %d, batch codec version: %d", dbBatch.Index, dbBatch.CodecVersion)
}
return daBatch.Blob(), nil
}
// GetFirstUnuploadedBatchByPlatform retrieves the first batch that either hasn't been uploaded to corresponding blob storage service
// The batch must have a commit_tx_hash (committed).
func (b *BlobUploader) GetFirstUnuploadedBatchByPlatform(ctx context.Context, startBatch uint64, platform types.BlobStoragePlatform) (*orm.Batch, error) {
batchIndex, err := b.blobUploadOrm.GetNextBatchIndexToUploadByPlatform(ctx, startBatch, platform)
if err != nil {
return nil, err
}
var batch *orm.Batch
for {
var err error
batch, err = b.batchOrm.GetBatchByIndex(ctx, batchIndex)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
log.Debug("got batch not proposed for blob uploading", "batch_index", batchIndex, "platform", platform.String())
return nil, nil
}
return nil, err
}
// to check if the parent batch uploaded
// if no, there is a batch revert happened, we need to fallback to upload previous batch
// skip the check if the parent batch is genesis batch
if batchIndex <= 1 || batchIndex == startBatch {
break
}
fields := map[string]interface{}{
"batch_index = ?": batchIndex - 1,
"batch_hash = ?": batch.ParentBatchHash,
"platform = ?": platform,
"status = ?": types.BlobUploadStatusUploaded,
}
blobUpload, err := b.blobUploadOrm.GetBlobUploads(ctx, fields, nil, 1)
if err != nil {
return nil, err
}
if len(blobUpload) == 0 {
batchIndex--
continue
}
break
}
if len(batch.CommitTxHash) == 0 {
log.Debug("got batch not committed for blob uploading", "batch_index", batchIndex, "platform", platform.String())
return nil, nil
}
return batch, nil
}

View File

@@ -0,0 +1,34 @@
package blob_uploader
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type blobUploaderMetrics struct {
rollupBlobUploaderUploadToS3SuccessTotal prometheus.Counter
rollupBlobUploaderUploadToS3FailedTotal prometheus.Counter
}
var (
initBlobUploaderMetricsOnce sync.Once
blobUploaderMetric *blobUploaderMetrics
)
func initBlobUploaderMetrics(reg prometheus.Registerer) *blobUploaderMetrics {
initBlobUploaderMetricsOnce.Do(func() {
blobUploaderMetric = &blobUploaderMetrics{
rollupBlobUploaderUploadToS3SuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_blob_uploader_upload_to_s3_success_total",
Help: "The total number of upload blob to S3 runs success total",
}),
rollupBlobUploaderUploadToS3FailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_blob_uploader_upload_to_s3_failed_total",
Help: "The total number of upload blob to S3 runs failed total",
}),
}
})
return blobUploaderMetric
}

View File

@@ -0,0 +1,66 @@
package blob_uploader
import (
"bytes"
"context"
"fmt"
"time"
"scroll-tech/rollup/internal/config"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
// S3 Uploader is responsible for uploading data to AWS S3.
type S3Uploader struct {
client *s3.Client
bucket string
region string
timeout time.Duration
}
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
// load AWS config
var opts []func(*awsconfig.LoadOptions) error
opts = append(opts, awsconfig.WithRegion(cfg.Region))
// if AccessKey && SecretKey provided, use it
if cfg.AccessKey != "" && cfg.SecretKey != "" {
opts = append(opts, awsconfig.WithCredentialsProvider(
credentials.NewStaticCredentialsProvider(
cfg.AccessKey,
cfg.SecretKey,
"",
)),
)
}
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to load default config: %w", err)
}
return &S3Uploader{
client: s3.NewFromConfig(awsCfg),
bucket: cfg.Bucket,
region: cfg.Region,
timeout: 30 * time.Second,
}, nil
}
// UploadData uploads data to s3 bucket
func (u *S3Uploader) UploadData(ctx context.Context, data []byte, objectKey string) error {
uploadCtx, cancel := context.WithTimeout(ctx, u.timeout)
defer cancel()
_, err := u.client.PutObject(uploadCtx, &s3.PutObjectInput{
Bucket: aws.String(u.bucket),
Key: aws.String(objectKey),
Body: bytes.NewReader(data),
ContentType: aws.String("application/octet-stream"),
})
return err
}

View File

@@ -179,13 +179,13 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
txHash, _, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
if err != nil {
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
return
}
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, txHash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
@@ -195,7 +195,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.lastBlobBaseFee = blobBaseFee
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
log.Info("Update l1 base fee", "txHash", txHash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
}
}
}

View File

@@ -104,10 +104,15 @@ type StrategyParams struct {
}
// bestParams maps your 2h/5h/12h windows to their best rules.
// Timeouts are in seconds, 2, 5 and 12 hours (and same + 20 mins to account for
// time to create batch currently roughly, as time is measured from block creation)
var bestParams = map[uint64]StrategyParams{
2 * 3600: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
5 * 3600: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
12 * 3600: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
7200: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
8400: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
18000: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
19200: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
42800: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
44400: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
@@ -147,6 +152,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
}
strategy, ok := bestParams[uint64(cfg.BatchSubmission.TimeoutSec)]
if !ok {
return nil, fmt.Errorf("invalid timeout for batch submission: %v", cfg.BatchSubmission.TimeoutSec)
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
@@ -164,7 +174,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
l1RollupABI: bridgeAbi.ScrollChainABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: bestParams[uint64(cfg.BatchSubmission.TimeoutSec)],
batchStrategy: strategy,
cfg: cfg,
chainCfg: chainCfg,
}
@@ -210,7 +220,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
chunk := &encoding.Chunk{Blocks: []*encoding.Block{{Header: genesis}}}
err = r.db.Transaction(func(dbTX *gorm.DB) error {
if err = r.l2BlockOrm.InsertL2Blocks(r.ctx, chunk.Blocks); err != nil {
if err = r.l2BlockOrm.InsertL2Blocks(r.ctx, chunk.Blocks, dbTX); err != nil {
return fmt.Errorf("failed to insert genesis block: %v", err)
}
@@ -271,11 +281,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
// submit genesis batch to L1 rollup contract
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil)
txHash, _, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil)
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -319,8 +329,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
// nothing to do if we don't have any pending batches
if len(dbBatches) == 0 {
return
}
// if backlog outgrow max size, forcesubmit enough oldest batches
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -328,9 +345,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
var forceSubmit bool
oldestBatchTimestamp := dbBatches[0].CreatedAt
startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
if err != nil {
log.Error("failed to get first chunk", "err", err, "batch index", dbBatches[0].Index, "chunk index", dbBatches[0].StartChunkIndex)
return
}
oldestBlockTimestamp := time.Unix(int64(startChunk.StartBlockTime), 0)
// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBatchTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBlockTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}
@@ -341,10 +364,12 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
if !forceSubmit {
// check if we should skip submitting the batch based on the fee target
skip, err := r.skipSubmitByFee(oldestBatchTimestamp)
skip, err := r.skipSubmitByFee(oldestBlockTimestamp, r.metrics)
// return if not hitting target price
if skip {
log.Debug("Skipping batch submission", "reason", err)
log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
log.Debug("first batch index", dbBatches[0].Index)
log.Debug("backlog count", backlogCount)
return
}
if err != nil {
@@ -352,7 +377,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
}
var batchesToSubmit []*dbBatchWithChunksAndParent
var batchesToSubmit []*dbBatchWithChunks
for i, dbBatch := range dbBatches {
var dbChunks []*orm.Chunk
var dbParentBatch *orm.Batch
@@ -408,10 +433,9 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunksAndParent{
Batch: dbBatch,
Chunks: dbChunks,
ParentBatch: dbParentBatch,
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
@@ -427,7 +451,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}
if forceSubmit {
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "created at", batchesToSubmit[0].Batch.CreatedAt)
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "first block created at", oldestBlockTimestamp)
}
// We have at least 1 batch to commit
@@ -441,7 +465,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
@@ -452,7 +476,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
txHash, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
txHash, blobBaseFee, err := r.commitSender.SendTransaction(r.contextIDFromBatches(codecVersion, batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
if err != nil {
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
@@ -492,32 +516,31 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerCommitLatency.Set(time.Since(oldestBlockTimestamp).Seconds())
r.metrics.rollupL2RelayerCommitPrice.Set(float64(blobBaseFee))
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
func (r *Layer2Relayer) contextIDFromBatches(batches []*dbBatchWithChunksAndParent) string {
contextIDs := []string{"v7"}
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
for _, batch := range batches {
contextIDs = append(contextIDs, batch.Batch.Hash)
}
return strings.Join(contextIDs, "-")
}
func (r *Layer2Relayer) batchHashesFromContextID(contextID string) []string {
if strings.HasPrefix(contextID, "v7-") {
return strings.Split(contextID, "-")[1:]
parts := strings.SplitN(contextID, "-", 2)
if len(parts) == 2 && strings.HasPrefix(parts[0], "v") {
return strings.Split(parts[1], "-")
}
return []string{contextID}
}
type dbBatchWithChunksAndParent struct {
Batch *orm.Batch
Chunks []*orm.Chunk
ParentBatch *orm.Batch
type dbBatchWithChunks struct {
Batch *orm.Batch
Chunks []*orm.Chunk
}
// ProcessPendingBundles submits proof to layer 1 rollup contract
@@ -666,7 +689,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
@@ -675,7 +698,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
}
txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil)
txHash, _, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil)
if err != nil {
log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index,
"start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex,
@@ -871,7 +894,7 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
}
}
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*dbBatchWithChunksAndParent, firstBatch, lastBatch *orm.Batch) ([]byte, []*kzg4844.Blob, uint64, uint64, error) {
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*dbBatchWithChunks, firstBatch, lastBatch *orm.Batch) ([]byte, []*kzg4844.Blob, uint64, uint64, error) {
var maxBlockHeight uint64
var totalGasUsed uint64
blobs := make([]*kzg4844.Blob, 0, len(batchesToSubmit))
@@ -902,7 +925,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
encodingBatch := &encoding.Batch{
Index: b.Batch.Index,
ParentBatchHash: common.HexToHash(b.ParentBatch.Hash),
ParentBatchHash: common.HexToHash(b.Batch.ParentBatchHash),
PrevL1MessageQueueHash: common.HexToHash(b.Batch.PrevL1MessageQueueHash),
PostL1MessageQueueHash: common.HexToHash(b.Batch.PostL1MessageQueueHash),
Blocks: batchBlocks,
@@ -1074,7 +1097,7 @@ func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime t
// skipSubmitByFee returns (true, nil) when submission should be skipped right now
// because the blobfee is above target and the timeout window hasnt yet elapsed.
// Otherwise returns (false, err)
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetrics) (bool, error) {
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)
hist, err := r.fetchBlobFeeHistory(windowSec)
@@ -1089,6 +1112,11 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1]
currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
// if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf(

Some files were not shown because too many files have changed in this diff Show More