mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
1 Commits
feat/valid
...
v4.5.47
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e520e7769 |
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-08-18
|
||||
toolchain: nightly-2025-02-14
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-08-18
|
||||
toolchain: nightly-2025-02-14
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
6
.github/workflows/intermediate-docker.yml
vendored
6
.github/workflows/intermediate-docker.yml
vendored
@@ -22,9 +22,11 @@ on:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
- nightly-2025-08-18
|
||||
default: "nightly-2025-08-18"
|
||||
- nightly-2025-02-14
|
||||
default: "nightly-2023-12-03"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
required: false
|
||||
|
||||
2196
Cargo.lock
generated
2196
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
43
Cargo.toml
43
Cargo.toml
@@ -14,16 +14,15 @@ edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.5.47"
|
||||
version = "4.5.8"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "5c361ad" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "5c361ad" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "5c361ad" }
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "ad0efe7" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "ad0efe7" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "ad0efe7" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll"] }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
@@ -31,8 +30,7 @@ metrics-tracing-context = "0.16.0"
|
||||
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.3", default-features = false, features = ["tiny-keccak"] }
|
||||
alloy-sol-types = { version = "1.3", default-features = false }
|
||||
alloy-primitives = { version = "1.2", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
@@ -48,20 +46,21 @@ once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
[patch.crates-io]
|
||||
revm = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
|
||||
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "feat/rkyv" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.46"
|
||||
var tag = "v4.5.47"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -66,7 +66,6 @@ type AssetConfig struct {
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Features string `json:"features,omitempty"`
|
||||
Verifiers []AssetConfig `json:"verifiers"`
|
||||
}
|
||||
|
||||
|
||||
@@ -140,10 +140,3 @@ func DumpVk(forkName, filePath string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set dynamic feature flags that control libzkp runtime behavior
|
||||
func SetDynamicFeature(feats string) {
|
||||
cFeats := goToCString(feats)
|
||||
defer freeCString(cFeats)
|
||||
C.set_dynamic_feature(cFeats)
|
||||
}
|
||||
|
||||
@@ -54,7 +54,4 @@ char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_le
|
||||
// Release memory allocated for a string returned by gen_wrapped_proof
|
||||
void release_string(char* string_ptr);
|
||||
|
||||
void set_dynamic_feature(const char* feats);
|
||||
|
||||
|
||||
#endif /* LIBZKP_H */
|
||||
|
||||
@@ -67,9 +67,6 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Features != "" {
|
||||
libzkp.SetDynamicFeature(cfg.Features)
|
||||
}
|
||||
libzkp.InitVerifier(string(configBytes))
|
||||
|
||||
v := &Verifier{
|
||||
|
||||
45
crates/gpu_override/.cargo/config.toml
Normal file
45
crates/gpu_override/.cargo/config.toml
Normal file
@@ -0,0 +1,45 @@
|
||||
|
||||
[patch."https://github.com/openvm-org/openvm.git"]
|
||||
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
|
||||
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.2.1" }
|
||||
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
|
||||
2374
crates/gpu_override/Cargo.lock
generated
2374
crates/gpu_override/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
23
crates/gpu_override/Makefile
Normal file
23
crates/gpu_override/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: build update clean
|
||||
|
||||
ZKVM_COMMIT ?= freebuild
|
||||
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
|
||||
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
|
||||
|
||||
GIT_REV ?= $(shell git rev-parse --short HEAD)
|
||||
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
|
||||
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
|
||||
$(info ZK_GPU_VERSION is ${ZK_VERSION})
|
||||
|
||||
clean:
|
||||
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
|
||||
# build gpu prover, never touch lock file
|
||||
build:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
|
||||
version:
|
||||
echo ${GO_TAG}-${GIT_REV}-${ZK_VERSION}
|
||||
# update Cargo.lock while override config has been updated
|
||||
#update:
|
||||
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
|
||||
10
crates/gpu_override/print_plonky3gpu_version.sh
Executable file
10
crates/gpu_override/print_plonky3gpu_version.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
higher_plonky3_item=`grep "plonky3-gpu" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
|
||||
|
||||
higher_version=`echo $higher_plonky3_item | awk '{print $1}'`
|
||||
|
||||
higher_commit=`echo $higher_plonky3_item | cut -d ' ' -f2 | cut -c-7`
|
||||
|
||||
echo "$higher_version"
|
||||
echo "$higher_commit"
|
||||
@@ -13,7 +13,6 @@ libzkp = { path = "../libzkp" }
|
||||
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
|
||||
sbv-primitives = { workspace = true, features = ["scroll"] }
|
||||
sbv-utils = { workspace = true, features = ["scroll"] }
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
|
||||
eyre.workspace = true
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ pub fn init(config: &str) -> eyre::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_client() -> impl libzkp::tasks::ChunkInterpreter {
|
||||
pub fn get_client() -> rpc_client::RpcClient<'static> {
|
||||
GLOBAL_L2GETH_CLI
|
||||
.get()
|
||||
.expect("must has been inited")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use alloy::{
|
||||
providers::{Provider, ProviderBuilder},
|
||||
providers::{Provider, ProviderBuilder, RootProvider},
|
||||
rpc::client::ClientBuilder,
|
||||
transports::layers::RetryBackoffLayer,
|
||||
};
|
||||
@@ -49,13 +49,13 @@ pub struct RpcConfig {
|
||||
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
|
||||
pub struct RpcClientCore {
|
||||
/// rpc prover
|
||||
client: alloy::rpc::client::RpcClient,
|
||||
provider: RootProvider<Network>,
|
||||
rt: tokio::runtime::Runtime,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct RpcClient<'a, T: Provider<Network>> {
|
||||
provider: T,
|
||||
pub struct RpcClient<'a> {
|
||||
provider: &'a RootProvider<Network>,
|
||||
handle: &'a tokio::runtime::Handle,
|
||||
}
|
||||
|
||||
@@ -75,78 +75,80 @@ impl RpcClientCore {
|
||||
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
|
||||
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
|
||||
|
||||
Ok(Self { client, rt })
|
||||
Ok(Self {
|
||||
provider: ProviderBuilder::<_, _, Network>::default().connect_client(client),
|
||||
rt,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_client(&self) -> RpcClient<'_, impl Provider<Network>> {
|
||||
pub fn get_client(&self) -> RpcClient {
|
||||
RpcClient {
|
||||
provider: ProviderBuilder::<_, _, Network>::default()
|
||||
.connect_client(self.client.clone()),
|
||||
provider: &self.provider,
|
||||
handle: self.rt.handle(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
|
||||
impl ChunkInterpreter for RpcClient<'_> {
|
||||
fn try_fetch_block_witness(
|
||||
&self,
|
||||
block_hash: sbv_primitives::B256,
|
||||
prev_witness: Option<&sbv_core::BlockWitness>,
|
||||
) -> Result<sbv_core::BlockWitness> {
|
||||
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
|
||||
) -> Result<sbv_primitives::types::BlockWitness> {
|
||||
async fn fetch_witness_async(
|
||||
provider: impl Provider<Network>,
|
||||
provider: &RootProvider<Network>,
|
||||
block_hash: sbv_primitives::B256,
|
||||
prev_witness: Option<&sbv_core::BlockWitness>,
|
||||
) -> Result<sbv_core::BlockWitness> {
|
||||
use sbv_utils::rpc::ProviderExt;
|
||||
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
|
||||
) -> Result<sbv_primitives::types::BlockWitness> {
|
||||
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
|
||||
|
||||
let (chain_id, block_num, prev_state_root) = if let Some(w) = prev_witness {
|
||||
(w.chain_id, w.header.number + 1, w.header.state_root)
|
||||
} else {
|
||||
let chain_id = provider.get_chain_id().await?;
|
||||
let block = provider
|
||||
.get_block_by_hash(block_hash)
|
||||
.full()
|
||||
.await?
|
||||
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
|
||||
let chain_id = provider.get_chain_id().await?;
|
||||
|
||||
let parent_block = provider
|
||||
.get_block_by_hash(block.header.parent_hash)
|
||||
.await?
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"parent block for block {} should exist",
|
||||
block.header.number
|
||||
)
|
||||
})?;
|
||||
let block = provider
|
||||
.get_block_by_hash(block_hash)
|
||||
.full()
|
||||
.await?
|
||||
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
|
||||
|
||||
(
|
||||
chain_id,
|
||||
block.header.number,
|
||||
parent_block.header.state_root,
|
||||
)
|
||||
let number = block.header.number;
|
||||
let parent_hash = block.header.parent_hash;
|
||||
if number == 0 {
|
||||
eyre::bail!("no number in header or use block 0");
|
||||
}
|
||||
|
||||
let mut witness_builder = WitnessBuilder::new()
|
||||
.block(block)
|
||||
.chain_id(chain_id)
|
||||
.execution_witness(provider.debug_execution_witness(number.into()).await?);
|
||||
|
||||
let prev_state_root = match prev_witness {
|
||||
Some(witness) => {
|
||||
if witness.header.number != number - 1 {
|
||||
eyre::bail!(
|
||||
"the ref witness is not the previous block, expected {} get {}",
|
||||
number - 1,
|
||||
witness.header.number,
|
||||
);
|
||||
}
|
||||
witness.header.state_root
|
||||
}
|
||||
None => {
|
||||
let parent_block = provider
|
||||
.get_block_by_hash(parent_hash)
|
||||
.await?
|
||||
.expect("parent block should exist");
|
||||
|
||||
parent_block.header.state_root
|
||||
}
|
||||
};
|
||||
witness_builder = witness_builder.prev_state_root(prev_state_root);
|
||||
|
||||
let req = provider
|
||||
.dump_block_witness(block_num)
|
||||
.with_chain_id(chain_id)
|
||||
.with_prev_state_root(prev_state_root);
|
||||
|
||||
let witness = req
|
||||
.send()
|
||||
.await
|
||||
.transpose()
|
||||
.ok_or_else(|| eyre::eyre!("Block witness {block_num} not available"))??;
|
||||
|
||||
Ok(witness)
|
||||
Ok(witness_builder.build()?)
|
||||
}
|
||||
|
||||
tracing::debug!("fetch witness for {block_hash}");
|
||||
self.handle.block_on(fetch_witness_async(
|
||||
&self.provider,
|
||||
block_hash,
|
||||
prev_witness,
|
||||
))
|
||||
self.handle
|
||||
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
|
||||
}
|
||||
|
||||
fn try_fetch_storage_node(
|
||||
@@ -154,7 +156,7 @@ impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
|
||||
node_hash: sbv_primitives::B256,
|
||||
) -> Result<sbv_primitives::Bytes> {
|
||||
async fn fetch_storage_node_async(
|
||||
provider: impl Provider<Network>,
|
||||
provider: &RootProvider<Network>,
|
||||
node_hash: sbv_primitives::B256,
|
||||
) -> Result<sbv_primitives::Bytes> {
|
||||
let ret = provider
|
||||
@@ -166,7 +168,7 @@ impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
|
||||
|
||||
tracing::debug!("fetch storage node for {node_hash}");
|
||||
self.handle
|
||||
.block_on(fetch_storage_node_async(&self.provider, node_hash))
|
||||
.block_on(fetch_storage_node_async(self.provider, node_hash))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,10 +194,10 @@ mod tests {
|
||||
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
|
||||
let client = client_core.get_client();
|
||||
|
||||
// latest - 1 block in 2025.9.11
|
||||
// latest - 1 block in 2025.6.15
|
||||
let block_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0x093fb6bf2e556a659b35428ac447cd9f0635382fc40ffad417b5910824f9e932",
|
||||
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -205,10 +207,10 @@ mod tests {
|
||||
.try_fetch_block_witness(block_hash, None)
|
||||
.expect("should success");
|
||||
|
||||
// block selected in 2025.9.11
|
||||
// latest block in 2025.6.15
|
||||
let block_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0x77cc84dd7a4dedf6fe5fb9b443aeb5a4fb0623ad088a365d3232b7b23fc848e5",
|
||||
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
@@ -218,4 +220,26 @@ mod tests {
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
|
||||
fn test_try_fetch_storage_node() {
|
||||
let config = create_config_from_env();
|
||||
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
|
||||
let client = client_core.get_client();
|
||||
|
||||
// the root node (state root) of the block in unittest above
|
||||
let node_hash = B256::from(
|
||||
hex::const_decode_to_array(
|
||||
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// This is expected to fail since we're using a dummy hash, but it tests the code path
|
||||
let node = client
|
||||
.try_fetch_storage_node(node_hash)
|
||||
.expect("should success");
|
||||
println!("{}", serde_json::to_string_pretty(&node).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ scroll-zkvm-verifier.workspace = true
|
||||
|
||||
alloy-primitives.workspace = true #depress the effect of "native-keccak"
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
base64.workspace = true
|
||||
serde.workspace = true
|
||||
serde_derive.workspace = true
|
||||
@@ -19,7 +18,6 @@ tracing.workspace = true
|
||||
eyre.workspace = true
|
||||
|
||||
git-version = "0.3.5"
|
||||
bincode = { version = "2", features = ["serde"] }
|
||||
serde_stacker = "0.1"
|
||||
regex = "1.11"
|
||||
c-kzg = { version = "2.0", features = ["serde"] }
|
||||
|
||||
@@ -11,27 +11,6 @@ use serde_json::value::RawValue;
|
||||
use std::path::Path;
|
||||
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
|
||||
|
||||
/// global features: use legacy encoding for witness
|
||||
static mut LEGACY_WITNESS_ENCODING: bool = false;
|
||||
pub(crate) fn witness_use_legacy_mode() -> bool {
|
||||
unsafe { LEGACY_WITNESS_ENCODING }
|
||||
}
|
||||
|
||||
pub fn set_dynamic_feature(feats: &str) {
|
||||
for feat_s in feats.split(':') {
|
||||
match feat_s.trim().to_lowercase().as_str() {
|
||||
"legacy_witness" => {
|
||||
tracing::info!("set witness encoding for legacy mode");
|
||||
unsafe {
|
||||
// the function is only called while initialize step
|
||||
LEGACY_WITNESS_ENCODING = true;
|
||||
}
|
||||
}
|
||||
s => tracing::warn!("unrecognized dynamic feature: {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn the coordinator's chunk task into a json string for formal chunk proving
|
||||
/// task (with full witnesses)
|
||||
pub fn checkout_chunk_task(
|
||||
@@ -53,6 +32,7 @@ pub fn gen_universal_task(
|
||||
task_json: &str,
|
||||
fork_name_str: &str,
|
||||
expected_vk: &[u8],
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
) -> eyre::Result<(B256, String, String)> {
|
||||
use proofs::*;
|
||||
use tasks::*;
|
||||
@@ -76,9 +56,10 @@ pub fn gen_universal_task(
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_chunk_task(task, fork_name_str.into()))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
|
||||
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
|
||||
})
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Batch as i32 => {
|
||||
|
||||
@@ -9,8 +9,8 @@ use scroll_zkvm_types::{
|
||||
chunk::ChunkInfo,
|
||||
proof::{EvmProof, OpenVmEvmProof, ProofEnum, StarkProof},
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs},
|
||||
types_agg::AggregationInput,
|
||||
utils::{serialize_vk, vec_as_base64},
|
||||
types_agg::{AggregationInput, ProgramCommitment},
|
||||
utils::vec_as_base64,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
@@ -172,7 +172,7 @@ impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
|
||||
fn from(value: &WrappedProof<Metadata>) -> Self {
|
||||
Self {
|
||||
public_values: value.proof.public_values(),
|
||||
commitment: serialize_vk::deserialize(&value.vk),
|
||||
commitment: ProgramCommitment::deserialize(&value.vk),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -252,7 +252,6 @@ mod tests {
|
||||
batch_hash: B256::repeat_byte(4),
|
||||
withdraw_root: B256::repeat_byte(5),
|
||||
msg_queue_hash: B256::repeat_byte(6),
|
||||
encryption_key: None,
|
||||
};
|
||||
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
|
||||
BundleProofMetadata {
|
||||
|
||||
@@ -16,11 +16,6 @@ use crate::{
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
|
||||
|
||||
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
|
||||
let config = bincode::config::standard();
|
||||
Ok(bincode::serde::encode_to_vec(task, config)?)
|
||||
}
|
||||
|
||||
fn check_aggregation_proofs<Metadata>(
|
||||
proofs: &[proofs::WrappedProof<Metadata>],
|
||||
fork_name: ForkName,
|
||||
@@ -30,9 +25,9 @@ where
|
||||
{
|
||||
panic_catch(|| {
|
||||
for w in proofs.windows(2) {
|
||||
// w[1].metadata
|
||||
// .pi_hash_info()
|
||||
// .validate(w[0].metadata.pi_hash_info(), fork_name);
|
||||
w[1].metadata
|
||||
.pi_hash_info()
|
||||
.validate(w[0].metadata.pi_hash_info(), fork_name);
|
||||
}
|
||||
})
|
||||
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
|
||||
@@ -42,9 +37,13 @@ where
|
||||
|
||||
/// Generate required staff for chunk proving
|
||||
pub fn gen_universal_chunk_task(
|
||||
task: ChunkProvingTask,
|
||||
mut task: ChunkProvingTask,
|
||||
fork_name: ForkName,
|
||||
interpreter: Option<impl ChunkInterpreter>,
|
||||
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
|
||||
if let Some(interpreter) = interpreter {
|
||||
task.prepare_task_via_interpret(interpreter)?;
|
||||
}
|
||||
let chunk_total_gas = task.stats().total_gas_used;
|
||||
let chunk_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
|
||||
@@ -4,9 +4,9 @@ use eyre::Result;
|
||||
use sbv_primitives::{B256, U256};
|
||||
use scroll_zkvm_types::{
|
||||
batch::{
|
||||
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8,
|
||||
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, LegacyBatchWitness,
|
||||
ReferenceHeader, N_BLOB_BYTES,
|
||||
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
|
||||
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
|
||||
ToArchievedWitness, N_BLOB_BYTES,
|
||||
},
|
||||
public_inputs::ForkName,
|
||||
task::ProvingTask,
|
||||
@@ -84,12 +84,6 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
|
||||
|
||||
fn try_from(value: BatchProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
let legacy_witness = LegacyBatchWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.batch_header.batch_hash().to_string(),
|
||||
@@ -99,7 +93,7 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.into_vec()],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
@@ -167,10 +161,10 @@ impl BatchProvingTask {
|
||||
assert_eq!(p, kzg_proof);
|
||||
}
|
||||
|
||||
let point_eval_witness = Some(build_point_eval_witness(
|
||||
kzg_commitment.into_inner(),
|
||||
kzg_proof.into_inner(),
|
||||
));
|
||||
let point_eval_witness = PointEvalWitness {
|
||||
kzg_commitment: kzg_commitment.into_inner(),
|
||||
kzg_proof: kzg_proof.into_inner(),
|
||||
};
|
||||
|
||||
let reference_header = match fork_name {
|
||||
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
|
||||
@@ -189,7 +183,6 @@ impl BatchProvingTask {
|
||||
blob_bytes: self.blob_bytes.clone(),
|
||||
reference_header,
|
||||
point_eval_witness,
|
||||
version: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +192,12 @@ impl BatchProvingTask {
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let witness = self.build_guest_input();
|
||||
let metadata = BatchInfo::from(&witness);
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve batch witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved batch witness fail: {e}"))?;
|
||||
let metadata: BatchInfo = archieved_witness.into();
|
||||
|
||||
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use crate::proofs::BatchProof;
|
||||
use eyre::Result;
|
||||
use scroll_zkvm_types::{
|
||||
bundle::{BundleInfo, BundleWitness},
|
||||
bundle::{BundleInfo, BundleWitness, ToArchievedWitness},
|
||||
public_inputs::ForkName,
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
|
||||
/// Message indicating a sanity check failure.
|
||||
@@ -41,7 +40,6 @@ impl BundleProvingTask {
|
||||
|
||||
fn build_guest_input(&self) -> BundleWitness {
|
||||
BundleWitness {
|
||||
version: 0,
|
||||
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
|
||||
batch_infos: self
|
||||
.batch_proofs
|
||||
@@ -58,7 +56,12 @@ impl BundleProvingTask {
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let witness = self.build_guest_input();
|
||||
let metadata = BundleInfo::from(&witness);
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve bundle witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved bundle witness fail: {e}"))?;
|
||||
let metadata: BundleInfo = archieved_witness.into();
|
||||
|
||||
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
|
||||
|
||||
@@ -71,12 +74,6 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
|
||||
|
||||
fn try_from(value: BundleProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
//to_rkyv_bytes::<RancorError>(&witness)?.into_vec()
|
||||
unimplemented!();
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
@@ -86,7 +83,7 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
use super::chunk_interpreter::*;
|
||||
use eyre::Result;
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::B256;
|
||||
use sbv_primitives::{types::BlockWitness, B256};
|
||||
use scroll_zkvm_types::{
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness},
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, ToArchievedWitness},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
|
||||
/// The type aligned with coordinator's defination
|
||||
@@ -68,18 +66,12 @@ impl TryFrom<ChunkProvingTask> for ProvingTask {
|
||||
|
||||
fn try_from(value: ChunkProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
let legacy_witness = LegacyChunkWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: Vec::new(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
@@ -91,7 +83,7 @@ impl ChunkProvingTask {
|
||||
let num_txs = self
|
||||
.block_witnesses
|
||||
.iter()
|
||||
.map(|b| b.transactions.len())
|
||||
.map(|b| b.transaction.len())
|
||||
.sum::<usize>();
|
||||
let total_gas_used = self
|
||||
.block_witnesses
|
||||
@@ -127,11 +119,9 @@ impl ChunkProvingTask {
|
||||
|
||||
fn build_guest_input(&self) -> ChunkWitness {
|
||||
ChunkWitness::new(
|
||||
0,
|
||||
&self.block_witnesses,
|
||||
self.prev_msg_queue_hash,
|
||||
self.fork_name.to_lowercase().as_str().into(),
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -141,14 +131,18 @@ impl ChunkProvingTask {
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
|
||||
let witness = self.build_guest_input();
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
|
||||
|
||||
let ret = ChunkInfo::try_from(witness).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
let ret = ChunkInfo::try_from(archieved_witness).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// this method check the validate of current task (there may be missing storage node)
|
||||
/// and try fixing it until everything is ok
|
||||
#[deprecated]
|
||||
pub fn prepare_task_via_interpret(
|
||||
&mut self,
|
||||
interpreter: impl ChunkInterpreter,
|
||||
@@ -172,8 +166,13 @@ impl ChunkProvingTask {
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
let witness = self.build_guest_input();
|
||||
let archieved = ToArchievedWitness::create(&witness)
|
||||
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
|
||||
let archieved_witness = archieved
|
||||
.access()
|
||||
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
|
||||
|
||||
match execute(witness) {
|
||||
match execute(archieved_witness) {
|
||||
Ok(_) => return Ok(()),
|
||||
Err(e) => {
|
||||
if let Some(caps) = err_parse_re.captures(&e) {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use eyre::Result;
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{Bytes, B256};
|
||||
use sbv_primitives::{types::BlockWitness, Bytes, B256};
|
||||
|
||||
/// An interpreter which is cirtical in translating chunk data
|
||||
/// since we need to grep block witness and storage node data
|
||||
|
||||
@@ -17,10 +17,10 @@ pub struct Verifier {
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir);
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
|
||||
Self {
|
||||
verifier: UniversalVerifier::setup(verifier_bin).expect("Setting up chunk verifier"),
|
||||
verifier: UniversalVerifier::setup(&verifier_bin).expect("Setting up chunk verifier"),
|
||||
fork,
|
||||
}
|
||||
}
|
||||
@@ -32,16 +32,12 @@ impl ProofVerifier for Verifier {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
|
||||
assert!(proof.pi_hash_check(self.fork));
|
||||
self.verifier
|
||||
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
|
||||
.unwrap()
|
||||
UniversalVerifier::verify_stark_proof(proof.as_root_proof(), &proof.vk).unwrap()
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
|
||||
assert!(proof.pi_hash_check(self.fork));
|
||||
self.verifier
|
||||
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
|
||||
.unwrap()
|
||||
UniversalVerifier::verify_stark_proof(proof.as_root_proof(), &proof.vk).unwrap()
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
|
||||
|
||||
@@ -153,12 +153,17 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
expected_vk: *const u8,
|
||||
expected_vk_len: usize,
|
||||
) -> HandlingResult {
|
||||
let mut interpreter = None;
|
||||
let task_json = if task_type == TaskType::Chunk as i32 {
|
||||
let pre_task_str = c_char_to_str(task);
|
||||
let cli = l2geth::get_client();
|
||||
match libzkp::checkout_chunk_task(pre_task_str, cli) {
|
||||
Ok(str) => str,
|
||||
Ok(str) => {
|
||||
interpreter.replace(cli);
|
||||
str
|
||||
}
|
||||
Err(e) => {
|
||||
println!("gen_universal_task failed at pre interpret step, error: {e}");
|
||||
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
|
||||
return failed_handling_result();
|
||||
}
|
||||
@@ -173,8 +178,13 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
&[]
|
||||
};
|
||||
|
||||
let ret =
|
||||
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), expected_vk);
|
||||
let ret = libzkp::gen_universal_task(
|
||||
task_type,
|
||||
&task_json,
|
||||
c_char_to_str(fork_name),
|
||||
expected_vk,
|
||||
interpreter,
|
||||
);
|
||||
|
||||
if let Ok((pi_hash, meta_json, task_json)) = ret {
|
||||
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
|
||||
@@ -245,10 +255,3 @@ pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
|
||||
let _ = CString::from_raw(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn set_dynamic_feature(feats: *const c_char) {
|
||||
let feats_str = c_char_to_str(feats);
|
||||
libzkp::set_dynamic_feature(feats_str);
|
||||
}
|
||||
|
||||
@@ -33,7 +33,3 @@ clap = { version = "4.5", features = ["derive"] }
|
||||
ctor = "0.2.8"
|
||||
url = { version = "2.5.4", features = ["serde",] }
|
||||
serde_bytes = "0.11.15"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
cuda = ["scroll-zkvm-prover/cuda"]
|
||||
@@ -1,5 +1,3 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::path::Path;
|
||||
|
||||
use super::CircuitsHandler;
|
||||
use async_trait::async_trait;
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::ProofType;
|
||||
use scroll_zkvm_prover::{Prover, ProverConfig};
|
||||
@@ -11,12 +12,10 @@ pub struct UniversalHandler {
|
||||
prover: Prover,
|
||||
}
|
||||
|
||||
/// Safe for current usage as `CircuitsHandler` trait (protected inside of Mutex and NEVER extract
|
||||
/// the instance out by `into_inner`)
|
||||
unsafe impl Send for UniversalHandler {}
|
||||
|
||||
impl UniversalHandler {
|
||||
pub fn new(workspace_path: impl AsRef<Path>, _proof_type: ProofType) -> Result<Self> {
|
||||
pub fn new(workspace_path: impl AsRef<Path>, proof_type: ProofType) -> Result<Self> {
|
||||
let path_app_exe = workspace_path.as_ref().join("app.vmexe");
|
||||
let path_app_config = workspace_path.as_ref().join("openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
@@ -26,14 +25,16 @@ impl UniversalHandler {
|
||||
segment_len,
|
||||
};
|
||||
|
||||
let prover = Prover::setup(config, None)?;
|
||||
let use_evm = proof_type == ProofType::Bundle;
|
||||
|
||||
let prover = Prover::setup(config, use_evm, None)?;
|
||||
Ok(Self { prover })
|
||||
}
|
||||
|
||||
/// get_prover get the inner prover, later we would replace chunk/batch/bundle_prover with
|
||||
/// universal prover, before that, use bundle_prover as the represent one
|
||||
pub fn get_prover(&mut self) -> &mut Prover {
|
||||
&mut self.prover
|
||||
pub fn get_prover(&self) -> &Prover {
|
||||
&self.prover
|
||||
}
|
||||
|
||||
pub fn get_task_from_input(input: &str) -> Result<ProvingTask> {
|
||||
@@ -44,7 +45,14 @@ impl UniversalHandler {
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for Mutex<UniversalHandler> {
|
||||
async fn get_proof_data(&self, u_task: &ProvingTask, need_snark: bool) -> Result<String> {
|
||||
let mut handler_self = self.lock().await;
|
||||
let handler_self = self.lock().await;
|
||||
|
||||
if need_snark && handler_self.prover.evm_prover.is_none() {
|
||||
eyre::bail!(
|
||||
"do not init prover for evm (vk: {})",
|
||||
BASE64_STANDARD.encode(handler_self.get_prover().get_app_vk())
|
||||
)
|
||||
}
|
||||
|
||||
let proof = handler_self
|
||||
.get_prover()
|
||||
|
||||
@@ -7,8 +7,14 @@ import (
|
||||
|
||||
// SenderConfig The config for transaction sender
|
||||
type SenderConfig struct {
|
||||
// The RPC endpoint of the ethereum or scroll public node.
|
||||
// The RPC endpoint of the ethereum or scroll public node (for backward compatibility).
|
||||
// If WriteEndpoints is specified, this endpoint will be used only for reading.
|
||||
// If WriteEndpoints is empty, this endpoint will be used for both reading and writing.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The RPC endpoints to send transactions to (optional).
|
||||
// If specified, transactions will be sent to all these endpoints in parallel.
|
||||
// If empty, transactions will be sent to the Endpoint.
|
||||
WriteEndpoints []string `json:"write_endpoints,omitempty"`
|
||||
// The time to trigger check pending txs in sender.
|
||||
CheckPendingTime uint64 `json:"check_pending_time"`
|
||||
// The number of blocks to wait to escalate increase gas price of the transaction.
|
||||
|
||||
@@ -56,6 +56,7 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.WriteEndpoints = []string{cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, cfg.L2Config.RelayerConfig.SenderConfig.Endpoint}
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/holiman/uint256"
|
||||
@@ -67,7 +68,8 @@ type FeeData struct {
|
||||
type Sender struct {
|
||||
config *config.SenderConfig
|
||||
gethClient *gethclient.Client
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
client *ethclient.Client // The client to retrieve on chain data (read-only)
|
||||
writeClients []*ethclient.Client // The clients to send transactions to (write operations)
|
||||
transactionSigner *TransactionSigner
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
@@ -90,9 +92,10 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
return nil, fmt.Errorf("invalid params, EscalateMultipleNum; %v, EscalateMultipleDen: %v", config.EscalateMultipleNum, config.EscalateMultipleDen)
|
||||
}
|
||||
|
||||
// Initialize read client
|
||||
rpcClient, err := rpc.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
return nil, fmt.Errorf("failed to dial read client, err: %w", err)
|
||||
}
|
||||
|
||||
client := ethclient.NewClient(rpcClient)
|
||||
@@ -105,12 +108,42 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
return nil, fmt.Errorf("failed to create transaction signer, err: %w", err)
|
||||
}
|
||||
|
||||
// Initialize write clients
|
||||
var writeClients []*ethclient.Client
|
||||
if len(config.WriteEndpoints) > 0 {
|
||||
// Use specified write endpoints
|
||||
for i, endpoint := range config.WriteEndpoints {
|
||||
writeRpcClient, err := rpc.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial write client %d (endpoint: %s), err: %w", i, endpoint, err)
|
||||
}
|
||||
writeClient := ethclient.NewClient(writeRpcClient)
|
||||
|
||||
// Verify the write client is connected to the same chain
|
||||
writeChainID, err := writeClient.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get chain ID from write client %d (endpoint: %s), err: %w", i, endpoint, err)
|
||||
}
|
||||
if writeChainID.Cmp(chainID) != 0 {
|
||||
return nil, fmt.Errorf("write client %d (endpoint: %s) has different chain ID %s, expected %s", i, endpoint, writeChainID.String(), chainID.String())
|
||||
}
|
||||
|
||||
writeClients = append(writeClients, writeClient)
|
||||
}
|
||||
log.Info("initialized sender with multiple write clients", "service", service, "name", name, "readEndpoint", config.Endpoint, "writeEndpoints", config.WriteEndpoints)
|
||||
} else {
|
||||
// Use read client for writing (backward compatibility)
|
||||
writeClients = append(writeClients, client)
|
||||
log.Info("initialized sender with single client", "service", service, "name", name, "endpoint", config.Endpoint)
|
||||
}
|
||||
|
||||
// Create sender instance first and then initialize nonce
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
gethClient: gethclient.New(rpcClient),
|
||||
client: client,
|
||||
writeClients: writeClients,
|
||||
chainID: chainID,
|
||||
transactionSigner: transactionSigner,
|
||||
db: db,
|
||||
@@ -169,6 +202,82 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
|
||||
}
|
||||
}
|
||||
|
||||
// sendTransactionToMultipleClients sends a transaction to all write clients in parallel
|
||||
// and returns success if at least one client succeeds
|
||||
func (s *Sender) sendTransactionToMultipleClients(signedTx *gethTypes.Transaction) error {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if len(s.writeClients) == 1 {
|
||||
// Single client - use direct approach
|
||||
return s.writeClients[0].SendTransaction(ctx, signedTx)
|
||||
}
|
||||
|
||||
// Multiple clients - send in parallel
|
||||
type result struct {
|
||||
endpoint string
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan result, len(s.writeClients))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Send transaction to all write clients in parallel
|
||||
for i, client := range s.writeClients {
|
||||
wg.Add(1)
|
||||
// Determine endpoint URL for this client
|
||||
endpoint := s.config.WriteEndpoints[i]
|
||||
|
||||
go func(ep string, writeClient *ethclient.Client) {
|
||||
defer wg.Done()
|
||||
err := writeClient.SendTransaction(ctx, signedTx)
|
||||
resultChan <- result{endpoint: ep, err: err}
|
||||
}(endpoint, client)
|
||||
}
|
||||
|
||||
// Wait for all goroutines to finish
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultChan)
|
||||
}()
|
||||
|
||||
// Collect results
|
||||
var errs []error
|
||||
for res := range resultChan {
|
||||
if res.err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s: %w", res.endpoint, res.err))
|
||||
log.Warn("failed to send transaction to write client",
|
||||
"endpoint", res.endpoint,
|
||||
"txHash", signedTx.Hash().Hex(),
|
||||
"nonce", signedTx.Nonce(),
|
||||
"from", s.transactionSigner.GetAddr().String(),
|
||||
"error", res.err)
|
||||
} else {
|
||||
log.Info("successfully sent transaction to write client",
|
||||
"endpoint", res.endpoint,
|
||||
"txHash", signedTx.Hash().Hex(),
|
||||
"nonce", signedTx.Nonce(),
|
||||
"from", s.transactionSigner.GetAddr().String())
|
||||
}
|
||||
}
|
||||
|
||||
// Check if at least one client succeeded
|
||||
if len(errs) < len(s.writeClients) {
|
||||
successCount := len(s.writeClients) - len(errs)
|
||||
if len(errs) > 0 {
|
||||
log.Info("transaction partially succeeded",
|
||||
"txHash", signedTx.Hash().Hex(),
|
||||
"successCount", successCount,
|
||||
"totalClients", len(s.writeClients),
|
||||
"failures", errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// All clients failed
|
||||
return fmt.Errorf("failed to send transaction to all %d write clients: %w", len(s.writeClients), errors.Join(errs...))
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob) (common.Hash, uint64, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
@@ -230,7 +339,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, signedTx); err != nil {
|
||||
if err := s.sendTransactionToMultipleClients(signedTx); err != nil {
|
||||
// Delete the transaction from the pending transaction table if it fails to send.
|
||||
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
|
||||
log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr)
|
||||
@@ -645,7 +754,7 @@ func (s *Sender) checkPendingTransaction() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil {
|
||||
if err := s.sendTransactionToMultipleClients(newSignedTx); err != nil {
|
||||
if strings.Contains(err.Error(), "nonce too low") {
|
||||
// When we receive a 'nonce too low' error but cannot find the transaction receipt, it indicates another transaction with this nonce has already been processed, so this transaction will never be mined and should be marked as failed.
|
||||
log.Warn("nonce too low detected, marking all non-confirmed transactions with same nonce as failed", "nonce", originalTx.Nonce(), "address", s.transactionSigner.GetAddr().Hex(), "txHash", originalTx.Hash().Hex(), "newTxHash", newSignedTx.Hash().Hex(), "err", err)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2025-08-18"
|
||||
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]
|
||||
components = ["llvm-tools", "rustc-dev"]
|
||||
channel = "nightly-2025-02-14"
|
||||
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: prover prover_cpu lint tests_binary test_e2e_run test_run
|
||||
.PHONY: prover lint tests_binary
|
||||
|
||||
RUST_MIN_STACK ?= 16777216
|
||||
export RUST_MIN_STACK
|
||||
@@ -36,16 +36,14 @@ E2E_HANDLE_SET ?= ../tests/prover-e2e/testset.json
|
||||
DUMP_DIR ?= .work
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -Z unstable-options --lockfile-path ../crates/gpu_override/Cargo.lock -p prover
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override build
|
||||
|
||||
version:
|
||||
echo ${GO_TAG}-${GIT_REV}-${ZK_VERSION}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override version
|
||||
|
||||
prover_cpu:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
|
||||
|
||||
clean:
|
||||
cargo clean -Z unstable-options --release -p prover --lockfile-path ../crates/gpu_override/Cargo.lock
|
||||
|
||||
tests_binary:
|
||||
cargo clean && cargo test --release --no-run
|
||||
|
||||
Reference in New Issue
Block a user