Compare commits

..

7 Commits

Author SHA1 Message Date
Morty
3dd9ab6c35 address comment 2025-09-11 23:12:19 +08:00
Morty
94da520beb Merge branch 'feat-gpo-moving-average' of https://github.com/scroll-tech/scroll into feat-gpo-moving-average 2025-09-11 03:04:45 +08:00
Morty
5004902063 address comments 2025-09-11 03:04:35 +08:00
yiweichi
4d9fd5530e chore: auto version bump [bot] 2025-09-10 18:58:17 +00:00
Morty
ecb00c1390 Merge branch 'develop' into feat-gpo-moving-average 2025-09-11 02:58:03 +08:00
yiweichi
277e71efec chore: auto version bump [bot] 2025-09-08 12:00:53 +00:00
Morty
197b0a3dd8 feat(gas-oracle): support moving average base fee 2025-09-08 19:56:28 +08:00
36 changed files with 3037 additions and 2155 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2025-08-18
toolchain: nightly-2025-02-14
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -33,7 +33,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2025-08-18
toolchain: nightly-2025-02-14
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -22,9 +22,11 @@ on:
required: true
type: choice
options:
- nightly-2023-12-03
- nightly-2022-12-10
- 1.86.0
- nightly-2025-08-18
default: "nightly-2025-08-18"
- nightly-2025-02-14
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
required: false

2196
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,16 +14,15 @@ edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.5.47"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "5c361ad" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "5c361ad" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "5c361ad" }
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "ad0efe7" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "ad0efe7" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "ad0efe7" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll"] }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
metrics = "0.23.0"
metrics-util = "0.17"
@@ -31,8 +30,7 @@ metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.3", default-features = false, features = ["tiny-keccak"] }
alloy-sol-types = { version = "1.3", default-features = false }
alloy-primitives = { version = "1.2", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "1", default-features = false }
@@ -48,20 +46,21 @@ once_cell = "1.20"
base64 = "0.22"
[patch.crates-io]
revm = { git = "https://github.com/scroll-tech/revm" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm" }
revm-context = { git = "https://github.com/scroll-tech/revm" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm" }
revm-database = { git = "https://github.com/scroll-tech/revm" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm" }
revm-handler = { git = "https://github.com/scroll-tech/revm" }
revm-inspector = { git = "https://github.com/scroll-tech/revm" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm" }
revm-precompile = { git = "https://github.com/scroll-tech/revm" }
revm-primitives = { git = "https://github.com/scroll-tech/revm" }
revm-state = { git = "https://github.com/scroll-tech/revm" }
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "feat/rkyv" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
[profile.maxperf]
inherits = "release"

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.46"
var tag = "v4.5.47"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -66,7 +66,6 @@ type AssetConfig struct {
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
MinProverVersion string `json:"min_prover_version"`
Features string `json:"features,omitempty"`
Verifiers []AssetConfig `json:"verifiers"`
}

View File

@@ -140,10 +140,3 @@ func DumpVk(forkName, filePath string) error {
return nil
}
// Set dynamic feature flags that control libzkp runtime behavior
func SetDynamicFeature(feats string) {
cFeats := goToCString(feats)
defer freeCString(cFeats)
C.set_dynamic_feature(cFeats)
}

View File

@@ -54,7 +54,4 @@ char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_le
// Release memory allocated for a string returned by gen_wrapped_proof
void release_string(char* string_ptr);
void set_dynamic_feature(const char* feats);
#endif /* LIBZKP_H */

View File

@@ -67,9 +67,6 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return nil, err
}
if cfg.Features != "" {
libzkp.SetDynamicFeature(cfg.Features)
}
libzkp.InitVerifier(string(configBytes))
v := &Verifier{

View File

@@ -0,0 +1,45 @@
[patch."https://github.com/openvm-org/openvm.git"]
openvm-build = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-continuations = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-instructions ={ git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-native-circuit = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-native-compiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-native-recursion = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-native-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-rv32im-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.3.0-pipe", default-features = false }
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.2.1" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
.PHONY: build update clean
ZKVM_COMMIT ?= freebuild
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
GIT_REV ?= $(shell git rev-parse --short HEAD)
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
$(info ZK_GPU_VERSION is ${ZK_VERSION})
clean:
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# build gpu prover, never touch lock file
build:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
version:
echo ${GO_TAG}-${GIT_REV}-${ZK_VERSION}
# update Cargo.lock while override config has been updated
#update:
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock

View File

@@ -0,0 +1,10 @@
#!/bin/bash
higher_plonky3_item=`grep "plonky3-gpu" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
higher_version=`echo $higher_plonky3_item | awk '{print $1}'`
higher_commit=`echo $higher_plonky3_item | cut -d ' ' -f2 | cut -c-7`
echo "$higher_version"
echo "$higher_commit"

View File

@@ -13,7 +13,6 @@ libzkp = { path = "../libzkp" }
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
sbv-primitives = { workspace = true, features = ["scroll"] }
sbv-utils = { workspace = true, features = ["scroll"] }
sbv-core = { workspace = true, features = ["scroll"] }
eyre.workspace = true

View File

@@ -11,7 +11,7 @@ pub fn init(config: &str) -> eyre::Result<()> {
Ok(())
}
pub fn get_client() -> impl libzkp::tasks::ChunkInterpreter {
pub fn get_client() -> rpc_client::RpcClient<'static> {
GLOBAL_L2GETH_CLI
.get()
.expect("must has been inited")

View File

@@ -1,5 +1,5 @@
use alloy::{
providers::{Provider, ProviderBuilder},
providers::{Provider, ProviderBuilder, RootProvider},
rpc::client::ClientBuilder,
transports::layers::RetryBackoffLayer,
};
@@ -49,13 +49,13 @@ pub struct RpcConfig {
/// so it can be run in block mode (i.e. inside dynamic library without a global entry)
pub struct RpcClientCore {
/// rpc prover
client: alloy::rpc::client::RpcClient,
provider: RootProvider<Network>,
rt: tokio::runtime::Runtime,
}
#[derive(Clone, Copy)]
pub struct RpcClient<'a, T: Provider<Network>> {
provider: T,
pub struct RpcClient<'a> {
provider: &'a RootProvider<Network>,
handle: &'a tokio::runtime::Handle,
}
@@ -75,78 +75,80 @@ impl RpcClientCore {
let retry_layer = RetryBackoffLayer::new(config.max_retry, config.backoff, config.cups);
let client = ClientBuilder::default().layer(retry_layer).http(rpc);
Ok(Self { client, rt })
Ok(Self {
provider: ProviderBuilder::<_, _, Network>::default().connect_client(client),
rt,
})
}
pub fn get_client(&self) -> RpcClient<'_, impl Provider<Network>> {
pub fn get_client(&self) -> RpcClient {
RpcClient {
provider: ProviderBuilder::<_, _, Network>::default()
.connect_client(self.client.clone()),
provider: &self.provider,
handle: self.rt.handle(),
}
}
}
impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
impl ChunkInterpreter for RpcClient<'_> {
fn try_fetch_block_witness(
&self,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_core::BlockWitness>,
) -> Result<sbv_core::BlockWitness> {
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
async fn fetch_witness_async(
provider: impl Provider<Network>,
provider: &RootProvider<Network>,
block_hash: sbv_primitives::B256,
prev_witness: Option<&sbv_core::BlockWitness>,
) -> Result<sbv_core::BlockWitness> {
use sbv_utils::rpc::ProviderExt;
prev_witness: Option<&sbv_primitives::types::BlockWitness>,
) -> Result<sbv_primitives::types::BlockWitness> {
use sbv_utils::{rpc::ProviderExt, witness::WitnessBuilder};
let (chain_id, block_num, prev_state_root) = if let Some(w) = prev_witness {
(w.chain_id, w.header.number + 1, w.header.state_root)
} else {
let chain_id = provider.get_chain_id().await?;
let block = provider
.get_block_by_hash(block_hash)
.full()
.await?
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
let chain_id = provider.get_chain_id().await?;
let parent_block = provider
.get_block_by_hash(block.header.parent_hash)
.await?
.ok_or_else(|| {
eyre::eyre!(
"parent block for block {} should exist",
block.header.number
)
})?;
let block = provider
.get_block_by_hash(block_hash)
.full()
.await?
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
(
chain_id,
block.header.number,
parent_block.header.state_root,
)
let number = block.header.number;
let parent_hash = block.header.parent_hash;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let mut witness_builder = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?);
let prev_state_root = match prev_witness {
Some(witness) => {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
}
None => {
let parent_block = provider
.get_block_by_hash(parent_hash)
.await?
.expect("parent block should exist");
parent_block.header.state_root
}
};
witness_builder = witness_builder.prev_state_root(prev_state_root);
let req = provider
.dump_block_witness(block_num)
.with_chain_id(chain_id)
.with_prev_state_root(prev_state_root);
let witness = req
.send()
.await
.transpose()
.ok_or_else(|| eyre::eyre!("Block witness {block_num} not available"))??;
Ok(witness)
Ok(witness_builder.build()?)
}
tracing::debug!("fetch witness for {block_hash}");
self.handle.block_on(fetch_witness_async(
&self.provider,
block_hash,
prev_witness,
))
self.handle
.block_on(fetch_witness_async(self.provider, block_hash, prev_witness))
}
fn try_fetch_storage_node(
@@ -154,7 +156,7 @@ impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
async fn fetch_storage_node_async(
provider: impl Provider<Network>,
provider: &RootProvider<Network>,
node_hash: sbv_primitives::B256,
) -> Result<sbv_primitives::Bytes> {
let ret = provider
@@ -166,7 +168,7 @@ impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
tracing::debug!("fetch storage node for {node_hash}");
self.handle
.block_on(fetch_storage_node_async(&self.provider, node_hash))
.block_on(fetch_storage_node_async(self.provider, node_hash))
}
}
@@ -192,10 +194,10 @@ mod tests {
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// latest - 1 block in 2025.9.11
// latest - 1 block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0x093fb6bf2e556a659b35428ac447cd9f0635382fc40ffad417b5910824f9e932",
b"0x9535a6970bc4db9031749331a214e35ed8c8a3f585f6f456d590a0bc780a1368",
)
.unwrap(),
);
@@ -205,10 +207,10 @@ mod tests {
.try_fetch_block_witness(block_hash, None)
.expect("should success");
// block selected in 2025.9.11
// latest block in 2025.6.15
let block_hash = B256::from(
hex::const_decode_to_array(
b"0x77cc84dd7a4dedf6fe5fb9b443aeb5a4fb0623ad088a365d3232b7b23fc848e5",
b"0xd47088cdb6afc68aa082e633bb7da9340d29c73841668afacfb9c1e66e557af0",
)
.unwrap(),
);
@@ -218,4 +220,26 @@ mod tests {
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_storage_node() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
// the root node (state root) of the block in unittest above
let node_hash = B256::from(
hex::const_decode_to_array(
b"0xb9e67403a2eb35afbb0475fe942918cf9a330a1d7532704c24554506be62b27c",
)
.unwrap(),
);
// This is expected to fail since we're using a dummy hash, but it tests the code path
let node = client
.try_fetch_storage_node(node_hash)
.expect("should success");
println!("{}", serde_json::to_string_pretty(&node).unwrap());
}
}

View File

@@ -10,7 +10,6 @@ scroll-zkvm-verifier.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
sbv-core = { workspace = true, features = ["scroll"] }
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
@@ -19,7 +18,6 @@ tracing.workspace = true
eyre.workspace = true
git-version = "0.3.5"
bincode = { version = "2", features = ["serde"] }
serde_stacker = "0.1"
regex = "1.11"
c-kzg = { version = "2.0", features = ["serde"] }

View File

@@ -11,27 +11,6 @@ use serde_json::value::RawValue;
use std::path::Path;
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
/// global features: use legacy encoding for witness
static mut LEGACY_WITNESS_ENCODING: bool = false;
pub(crate) fn witness_use_legacy_mode() -> bool {
unsafe { LEGACY_WITNESS_ENCODING }
}
pub fn set_dynamic_feature(feats: &str) {
for feat_s in feats.split(':') {
match feat_s.trim().to_lowercase().as_str() {
"legacy_witness" => {
tracing::info!("set witness encoding for legacy mode");
unsafe {
// the function is only called while initialize step
LEGACY_WITNESS_ENCODING = true;
}
}
s => tracing::warn!("unrecognized dynamic feature: {s}"),
}
}
}
/// Turn the coordinator's chunk task into a json string for formal chunk proving
/// task (with full witnesses)
pub fn checkout_chunk_task(
@@ -53,6 +32,7 @@ pub fn gen_universal_task(
task_json: &str,
fork_name_str: &str,
expected_vk: &[u8],
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
use proofs::*;
use tasks::*;
@@ -76,9 +56,10 @@ pub fn gen_universal_task(
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_chunk_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
})
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {

View File

@@ -9,8 +9,8 @@ use scroll_zkvm_types::{
chunk::ChunkInfo,
proof::{EvmProof, OpenVmEvmProof, ProofEnum, StarkProof},
public_inputs::{ForkName, MultiVersionPublicInputs},
types_agg::AggregationInput,
utils::{serialize_vk, vec_as_base64},
types_agg::{AggregationInput, ProgramCommitment},
utils::vec_as_base64,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
@@ -172,7 +172,7 @@ impl<Metadata> From<&WrappedProof<Metadata>> for AggregationInput {
fn from(value: &WrappedProof<Metadata>) -> Self {
Self {
public_values: value.proof.public_values(),
commitment: serialize_vk::deserialize(&value.vk),
commitment: ProgramCommitment::deserialize(&value.vk),
}
}
}
@@ -252,7 +252,6 @@ mod tests {
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
encryption_key: None,
};
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
BundleProofMetadata {

View File

@@ -16,11 +16,6 @@ use crate::{
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
let config = bincode::config::standard();
Ok(bincode::serde::encode_to_vec(task, config)?)
}
fn check_aggregation_proofs<Metadata>(
proofs: &[proofs::WrappedProof<Metadata>],
fork_name: ForkName,
@@ -30,9 +25,9 @@ where
{
panic_catch(|| {
for w in proofs.windows(2) {
// w[1].metadata
// .pi_hash_info()
// .validate(w[0].metadata.pi_hash_info(), fork_name);
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
}
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
@@ -42,9 +37,13 @@ where
/// Generate required staff for chunk proving
pub fn gen_universal_chunk_task(
task: ChunkProvingTask,
mut task: ChunkProvingTask,
fork_name: ForkName,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
if let Some(interpreter) = interpreter {
task.prepare_task_via_interpret(interpreter)?;
}
let chunk_total_gas = task.stats().total_gas_used;
let chunk_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;

View File

@@ -4,9 +4,9 @@ use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
ToArchievedWitness, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
@@ -84,12 +84,6 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
fn try_from(value: BatchProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
let serialized_witness = if crate::witness_use_legacy_mode() {
let legacy_witness = LegacyBatchWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
Ok(ProvingTask {
identifier: value.batch_header.batch_hash().to_string(),
@@ -99,7 +93,7 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
.into_iter()
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![serialized_witness],
serialized_witness: vec![to_rkyv_bytes::<RancorError>(&witness)?.into_vec()],
vk: Vec::new(),
})
}
@@ -167,10 +161,10 @@ impl BatchProvingTask {
assert_eq!(p, kzg_proof);
}
let point_eval_witness = Some(build_point_eval_witness(
kzg_commitment.into_inner(),
kzg_proof.into_inner(),
));
let point_eval_witness = PointEvalWitness {
kzg_commitment: kzg_commitment.into_inner(),
kzg_proof: kzg_proof.into_inner(),
};
let reference_header = match fork_name {
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
@@ -189,7 +183,6 @@ impl BatchProvingTask {
blob_bytes: self.blob_bytes.clone(),
reference_header,
point_eval_witness,
version: 0,
}
}
@@ -199,7 +192,12 @@ impl BatchProvingTask {
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let metadata = BatchInfo::from(&witness);
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve batch witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved batch witness fail: {e}"))?;
let metadata: BatchInfo = archieved_witness.into();
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;

View File

@@ -1,10 +1,9 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
bundle::{BundleInfo, BundleWitness, ToArchievedWitness},
public_inputs::ForkName,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// Message indicating a sanity check failure.
@@ -41,7 +40,6 @@ impl BundleProvingTask {
fn build_guest_input(&self) -> BundleWitness {
BundleWitness {
version: 0,
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
batch_infos: self
.batch_proofs
@@ -58,7 +56,12 @@ impl BundleProvingTask {
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let metadata = BundleInfo::from(&witness);
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve bundle witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved bundle witness fail: {e}"))?;
let metadata: BundleInfo = archieved_witness.into();
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
@@ -71,12 +74,6 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
fn try_from(value: BundleProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
let serialized_witness = if crate::witness_use_legacy_mode() {
//to_rkyv_bytes::<RancorError>(&witness)?.into_vec()
unimplemented!();
} else {
super::encode_task_to_witness(&witness)?
};
Ok(ProvingTask {
identifier: value.identifier(),
@@ -86,7 +83,7 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
.into_iter()
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
.collect(),
serialized_witness: vec![serialized_witness],
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
vk: Vec::new(),
})
}

View File

@@ -1,11 +1,9 @@
use super::chunk_interpreter::*;
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::B256;
use sbv_primitives::{types::BlockWitness, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness},
chunk::{execute, ChunkInfo, ChunkWitness, ToArchievedWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
/// The type aligned with coordinator's defination
@@ -68,18 +66,12 @@ impl TryFrom<ChunkProvingTask> for ProvingTask {
fn try_from(value: ChunkProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
let serialized_witness = if crate::witness_use_legacy_mode() {
let legacy_witness = LegacyChunkWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
Ok(ProvingTask {
identifier: value.identifier(),
fork_name: value.fork_name,
aggregated_proofs: Vec::new(),
serialized_witness: vec![serialized_witness],
serialized_witness: vec![witness.rkyv_serialize(None)?.to_vec()],
vk: Vec::new(),
})
}
@@ -91,7 +83,7 @@ impl ChunkProvingTask {
let num_txs = self
.block_witnesses
.iter()
.map(|b| b.transactions.len())
.map(|b| b.transaction.len())
.sum::<usize>();
let total_gas_used = self
.block_witnesses
@@ -127,11 +119,9 @@ impl ChunkProvingTask {
fn build_guest_input(&self) -> ChunkWitness {
ChunkWitness::new(
0,
&self.block_witnesses,
self.prev_msg_queue_hash,
self.fork_name.to_lowercase().as_str().into(),
None,
)
}
@@ -141,14 +131,18 @@ impl ChunkProvingTask {
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
let ret = ChunkInfo::try_from(witness).map_err(|e| eyre::eyre!("{e}"))?;
let ret = ChunkInfo::try_from(archieved_witness).map_err(|e| eyre::eyre!("{e}"))?;
Ok(ret)
}
/// this method check the validate of current task (there may be missing storage node)
/// and try fixing it until everything is ok
#[deprecated]
pub fn prepare_task_via_interpret(
&mut self,
interpreter: impl ChunkInterpreter,
@@ -172,8 +166,13 @@ impl ChunkProvingTask {
let mut attempts = 0;
loop {
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
match execute(witness) {
match execute(archieved_witness) {
Ok(_) => return Ok(()),
Err(e) => {
if let Some(caps) = err_parse_re.captures(&e) {

View File

@@ -1,6 +1,5 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{Bytes, B256};
use sbv_primitives::{types::BlockWitness, Bytes, B256};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data

View File

@@ -17,10 +17,10 @@ pub struct Verifier {
impl Verifier {
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
let verifier_bin = Path::new(assets_dir);
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
Self {
verifier: UniversalVerifier::setup(verifier_bin).expect("Setting up chunk verifier"),
verifier: UniversalVerifier::setup(&verifier_bin).expect("Setting up chunk verifier"),
fork,
}
}
@@ -32,16 +32,12 @@ impl ProofVerifier for Verifier {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
self.verifier
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
UniversalVerifier::verify_stark_proof(proof.as_root_proof(), &proof.vk).unwrap()
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
self.verifier
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
UniversalVerifier::verify_stark_proof(proof.as_root_proof(), &proof.vk).unwrap()
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();

View File

@@ -153,12 +153,17 @@ pub unsafe extern "C" fn gen_universal_task(
expected_vk: *const u8,
expected_vk_len: usize,
) -> HandlingResult {
let mut interpreter = None;
let task_json = if task_type == TaskType::Chunk as i32 {
let pre_task_str = c_char_to_str(task);
let cli = l2geth::get_client();
match libzkp::checkout_chunk_task(pre_task_str, cli) {
Ok(str) => str,
Ok(str) => {
interpreter.replace(cli);
str
}
Err(e) => {
println!("gen_universal_task failed at pre interpret step, error: {e}");
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");
return failed_handling_result();
}
@@ -173,8 +178,13 @@ pub unsafe extern "C" fn gen_universal_task(
&[]
};
let ret =
libzkp::gen_universal_task(task_type, &task_json, c_char_to_str(fork_name), expected_vk);
let ret = libzkp::gen_universal_task(
task_type,
&task_json,
c_char_to_str(fork_name),
expected_vk,
interpreter,
);
if let Ok((pi_hash, meta_json, task_json)) = ret {
let expected_pi_hash = pi_hash.0.map(|byte| byte as c_char);
@@ -245,10 +255,3 @@ pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
let _ = CString::from_raw(ptr);
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn set_dynamic_feature(feats: *const c_char) {
let feats_str = c_char_to_str(feats);
libzkp::set_dynamic_feature(feats_str);
}

View File

@@ -33,7 +33,3 @@ clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = { version = "2.5.4", features = ["serde",] }
serde_bytes = "0.11.15"
[features]
default = []
cuda = ["scroll-zkvm-prover/cuda"]

View File

@@ -1,5 +1,3 @@
#![allow(dead_code)]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Serialize, Deserialize, Default)]

View File

@@ -2,6 +2,7 @@ use std::path::Path;
use super::CircuitsHandler;
use async_trait::async_trait;
use base64::{prelude::BASE64_STANDARD, Engine};
use eyre::Result;
use scroll_proving_sdk::prover::ProofType;
use scroll_zkvm_prover::{Prover, ProverConfig};
@@ -11,12 +12,10 @@ pub struct UniversalHandler {
prover: Prover,
}
/// Safe for current usage as `CircuitsHandler` trait (protected inside of Mutex and NEVER extract
/// the instance out by `into_inner`)
unsafe impl Send for UniversalHandler {}
impl UniversalHandler {
pub fn new(workspace_path: impl AsRef<Path>, _proof_type: ProofType) -> Result<Self> {
pub fn new(workspace_path: impl AsRef<Path>, proof_type: ProofType) -> Result<Self> {
let path_app_exe = workspace_path.as_ref().join("app.vmexe");
let path_app_config = workspace_path.as_ref().join("openvm.toml");
let segment_len = Some((1 << 22) - 100);
@@ -26,14 +25,16 @@ impl UniversalHandler {
segment_len,
};
let prover = Prover::setup(config, None)?;
let use_evm = proof_type == ProofType::Bundle;
let prover = Prover::setup(config, use_evm, None)?;
Ok(Self { prover })
}
/// get_prover get the inner prover, later we would replace chunk/batch/bundle_prover with
/// universal prover, before that, use bundle_prover as the represent one
pub fn get_prover(&mut self) -> &mut Prover {
&mut self.prover
pub fn get_prover(&self) -> &Prover {
&self.prover
}
pub fn get_task_from_input(input: &str) -> Result<ProvingTask> {
@@ -44,7 +45,14 @@ impl UniversalHandler {
#[async_trait]
impl CircuitsHandler for Mutex<UniversalHandler> {
async fn get_proof_data(&self, u_task: &ProvingTask, need_snark: bool) -> Result<String> {
let mut handler_self = self.lock().await;
let handler_self = self.lock().await;
if need_snark && handler_self.prover.evm_prover.is_none() {
eyre::bail!(
"do not init prover for evm (vk: {})",
BASE64_STANDARD.encode(handler_self.get_prover().get_app_vk())
)
}
let proof = handler_self
.get_prover()

View File

@@ -21,7 +21,8 @@
"check_committed_batches_window_minutes": 5,
"l1_base_fee_default": 15000000000,
"l1_blob_base_fee_default": 1,
"l1_blob_base_fee_threshold": 0
"l1_blob_base_fee_threshold": 0,
"calculate_average_fees_window_size": 100
},
"gas_oracle_sender_signer_config": {
"signer_type": "PrivateKey",

View File

@@ -103,6 +103,9 @@ type GasOracleConfig struct {
// L1BlobBaseFeeThreshold the threshold of L1 blob base fee to enter the default gas price mode
L1BlobBaseFeeThreshold uint64 `json:"l1_blob_base_fee_threshold"`
// CalculateAverageFeesWindowSize the number of blocks used for average fee calculation
CalculateAverageFeesWindowSize int `json:"calculate_average_fees_window_size"`
}
// SignerConfig - config of signer, contains type and config corresponding to type

View File

@@ -105,23 +105,20 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.metrics.rollupL1RelayerGasPriceOraclerRunTotal.Inc()
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
limit := r.cfg.GasOracleConfig.CalculateAverageFeesWindowSize
blocks, err := r.l1BlockOrm.GetLatestL1Blocks(r.ctx, limit)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
log.Error("Failed to GetLatestL1Blocks from db", "limit", limit, "err", err)
return
}
blocks, err := r.l1BlockOrm.GetL1Blocks(r.ctx, map[string]interface{}{
"number": latestBlockHeight,
})
if err != nil {
log.Error("Failed to GetL1Blocks from db", "height", latestBlockHeight, "err", err)
return
}
if len(blocks) != 1 {
log.Error("Block not exist", "height", latestBlockHeight)
// nothing to do if we don't have any l1 blocks
if len(blocks) == 0 {
log.Warn("No l1 blocks to process", "limit", limit)
return
}
block := blocks[0]
if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
@@ -130,8 +127,8 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
baseFee := block.BaseFee
blobBaseFee := block.BlobBaseFee
// calculate the average base fee and blob base fee of the last N blocks
baseFee, blobBaseFee := r.calculateAverageFees(blocks)
// include the token exchange rate in the fee data if alternative gas token enabled
if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled {
@@ -154,8 +151,24 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
log.Error("Invalid exchange rate", "exchangeRate", exchangeRate)
return
}
baseFee = uint64(math.Ceil(float64(baseFee) / exchangeRate))
blobBaseFee = uint64(math.Ceil(float64(blobBaseFee) / exchangeRate))
// Check for overflow in exchange rate calculation
adjustedBaseFee := math.Ceil(float64(baseFee) / exchangeRate)
adjustedBlobBaseFee := math.Ceil(float64(blobBaseFee) / exchangeRate)
if adjustedBaseFee > float64(^uint64(0)) {
log.Error("Base fee overflow after exchange rate adjustment", "originalBaseFee", baseFee, "exchangeRate", exchangeRate, "adjustedBaseFee", adjustedBaseFee)
baseFee = ^uint64(0) // Set to max uint64
} else {
baseFee = uint64(adjustedBaseFee)
}
if adjustedBlobBaseFee > float64(^uint64(0)) {
log.Error("Blob base fee overflow after exchange rate adjustment", "originalBlobBaseFee", blobBaseFee, "exchangeRate", exchangeRate, "adjustedBlobBaseFee", adjustedBlobBaseFee)
blobBaseFee = ^uint64(0) // Set to max uint64
} else {
blobBaseFee = uint64(adjustedBlobBaseFee)
}
}
if r.shouldUpdateGasOracle(baseFee, blobBaseFee) {
@@ -163,7 +176,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
// If we are not committing batches due to high fees then we shouldn't update fees to prevent users from paying high l1_data_fee
// Also, set fees to some default value, because we have already updated fees to some high values, probably
var reachTimeout bool
if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && block.BlobBaseFee > r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold && err == nil {
if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && blobBaseFee > r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold && err == nil {
if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault {
return
}
@@ -175,13 +188,13 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
}
data, err := r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
if err != nil {
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "baseFee", baseFee, "blobBaseFee", blobBaseFee, "err", err)
return
}
txHash, _, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
if err != nil {
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "baseFee", baseFee, "blobBaseFee", blobBaseFee, "err", err)
return
}
@@ -287,3 +300,44 @@ func (r *Layer1Relayer) commitBatchReachTimeout() (bool, error) {
// Because batches[0].CommittedAt is nil in this case, this will only continue for a short time window.
return len(batches) == 0 || (batches[0].Index != 0 && batches[0].CommittedAt != nil && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
}
// calculateAverageFees returns the average base fee and blob base fee.
// Uses big.Int for intermediate calculations to avoid overflow.
func (r *Layer1Relayer) calculateAverageFees(blocks []orm.L1Block) (avgBaseFee uint64, avgBlobBaseFee uint64) {
if len(blocks) == 0 {
return 0, 0
}
// Use big.Int to handle large sums without overflow
totalBaseFee := big.NewInt(0)
totalBlobBaseFee := big.NewInt(0)
count := big.NewInt(int64(len(blocks)))
for _, b := range blocks {
totalBaseFee.Add(totalBaseFee, big.NewInt(0).SetUint64(b.BaseFee))
totalBlobBaseFee.Add(totalBlobBaseFee, big.NewInt(0).SetUint64(b.BlobBaseFee))
}
// Calculate averages
avgBaseFeeBig := big.NewInt(0).Div(totalBaseFee, count)
avgBlobBaseFeeBig := big.NewInt(0).Div(totalBlobBaseFee, count)
// Check if results fit in uint64
maxUint64 := big.NewInt(0).SetUint64(^uint64(0))
if avgBaseFeeBig.Cmp(maxUint64) > 0 {
log.Error("Average base fee exceeds uint64 max, capping at max value", "calculatedAvg", avgBaseFeeBig.String())
avgBaseFee = ^uint64(0)
} else {
avgBaseFee = avgBaseFeeBig.Uint64()
}
if avgBlobBaseFeeBig.Cmp(maxUint64) > 0 {
log.Error("Average blob base fee exceeds uint64 max, capping at max value", "calculatedAvg", avgBlobBaseFeeBig.String())
avgBlobBaseFee = ^uint64(0)
} else {
avgBlobBaseFee = avgBlobBaseFeeBig.Uint64()
}
return avgBaseFee, avgBlobBaseFee
}

View File

@@ -71,6 +71,20 @@ func (o *L1Block) GetL1Blocks(ctx context.Context, fields map[string]interface{}
return l1Blocks, nil
}
// GetLatestL1Blocks get the latest N l1 blocks ordered by block number descending
func (o *L1Block) GetLatestL1Blocks(ctx context.Context, limit int) ([]L1Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L1Block{})
db = db.Order("number DESC")
db = db.Limit(limit)
var l1Blocks []L1Block
if err := db.Find(&l1Blocks).Error; err != nil {
return nil, fmt.Errorf("L1Block.GetLatestL1Blocks error: %w, limit: %d", err, limit)
}
return l1Blocks, nil
}
// GetBlobFeesInRange returns all blob_base_fee values for blocks
// with number ∈ [startBlock..endBlock], ordered by block number ascending.
func (o *L1Block) GetBlobFeesInRange(ctx context.Context, startBlock, endBlock uint64) ([]uint64, error) {

View File

@@ -1,4 +1,3 @@
[toolchain]
channel = "nightly-2025-08-18"
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]
components = ["llvm-tools", "rustc-dev"]
channel = "nightly-2025-02-14"
targets = ["riscv32im-unknown-none-elf", "x86_64-unknown-linux-gnu"]

View File

@@ -1,4 +1,4 @@
.PHONY: prover prover_cpu lint tests_binary test_e2e_run test_run
.PHONY: prover lint tests_binary
RUST_MIN_STACK ?= 16777216
export RUST_MIN_STACK
@@ -36,16 +36,14 @@ E2E_HANDLE_SET ?= ../tests/prover-e2e/testset.json
DUMP_DIR ?= .work
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -Z unstable-options --lockfile-path ../crates/gpu_override/Cargo.lock -p prover
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override build
version:
echo ${GO_TAG}-${GIT_REV}-${ZK_VERSION}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override version
prover_cpu:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
clean:
cargo clean -Z unstable-options --release -p prover --lockfile-path ../crates/gpu_override/Cargo.lock
tests_binary:
cargo clean && cargo test --release --no-run