diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 1eacfef6c1..2870d3dccc 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -137,6 +137,8 @@ pub struct TreeConfig { account_worker_count: usize, /// Whether to enable V2 storage proofs. enable_proof_v2: bool, + /// Whether to disable cache metrics recording (can be expensive with large cached state). + disable_cache_metrics: bool, } impl Default for TreeConfig { @@ -166,6 +168,7 @@ impl Default for TreeConfig { storage_worker_count: default_storage_worker_count(), account_worker_count: default_account_worker_count(), enable_proof_v2: false, + disable_cache_metrics: false, } } } @@ -198,6 +201,7 @@ impl TreeConfig { storage_worker_count: usize, account_worker_count: usize, enable_proof_v2: bool, + disable_cache_metrics: bool, ) -> Self { Self { persistence_threshold, @@ -224,6 +228,7 @@ impl TreeConfig { storage_worker_count, account_worker_count, enable_proof_v2, + disable_cache_metrics, } } @@ -516,4 +521,15 @@ impl TreeConfig { self.enable_proof_v2 = enable_proof_v2; self } + + /// Returns whether cache metrics recording is disabled. + pub const fn disable_cache_metrics(&self) -> bool { + self.disable_cache_metrics + } + + /// Setter for whether to disable cache metrics recording. + pub const fn without_cache_metrics(mut self, disable_cache_metrics: bool) -> Self { + self.disable_cache_metrics = disable_cache_metrics; + self + } } diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 2a45b15c18..0f0b23b4ea 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -606,12 +606,21 @@ pub(crate) struct SavedCache { /// A guard to track in-flight usage of this cache. /// The cache is considered available if the strong count is 1. usage_guard: Arc<()>, + + /// Whether to skip cache metrics recording (can be expensive with large cached state). + disable_cache_metrics: bool, } impl SavedCache { /// Creates a new instance with the internals pub(super) fn new(hash: B256, caches: ExecutionCache, metrics: CachedStateMetrics) -> Self { - Self { hash, caches, metrics, usage_guard: Arc::new(()) } + Self { hash, caches, metrics, usage_guard: Arc::new(()), disable_cache_metrics: false } + } + + /// Sets whether to disable cache metrics recording. + pub(super) const fn with_disable_cache_metrics(mut self, disable: bool) -> Self { + self.disable_cache_metrics = disable; + self } /// Returns the hash for this cache @@ -619,9 +628,9 @@ impl SavedCache { self.hash } - /// Splits the cache into its caches and metrics, consuming it. - pub(crate) fn split(self) -> (ExecutionCache, CachedStateMetrics) { - (self.caches, self.metrics) + /// Splits the cache into its caches, metrics, and `disable_cache_metrics` flag, consuming it. + pub(crate) fn split(self) -> (ExecutionCache, CachedStateMetrics, bool) { + (self.caches, self.metrics, self.disable_cache_metrics) } /// Returns true if the cache is available for use (no other tasks are currently using it). @@ -645,7 +654,13 @@ impl SavedCache { } /// Updates the metrics for the [`ExecutionCache`]. + /// + /// Note: This can be expensive with large cached state as it iterates over + /// all storage entries. Use `with_disable_cache_metrics(true)` to skip. pub(crate) fn update_metrics(&self) { + if self.disable_cache_metrics { + return; + } self.metrics.storage_cache_size.set(self.caches.total_storage_slots() as f64); self.metrics.account_cache_size.set(self.caches.account_cache.entry_count() as f64); self.metrics.code_cache_size.set(self.caches.code_cache.entry_count() as f64); diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index ed179afa8b..1803929c89 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -139,6 +139,8 @@ where disable_parallel_sparse_trie: bool, /// Maximum concurrency for prewarm task. prewarm_max_concurrency: usize, + /// Whether to disable cache metrics recording. + disable_cache_metrics: bool, } impl PayloadProcessor @@ -171,6 +173,7 @@ where sparse_state_trie: Arc::default(), disable_parallel_sparse_trie: config.disable_parallel_sparse_trie(), prewarm_max_concurrency: config.prewarm_max_concurrency(), + disable_cache_metrics: config.disable_cache_metrics(), } } } @@ -300,7 +303,7 @@ where // Build a state provider for the multiproof task let provider = provider_builder.build().expect("failed to build provider"); let provider = if let Some(saved_cache) = saved_cache { - let (cache, metrics) = saved_cache.split(); + let (cache, metrics, _) = saved_cache.split(); Box::new(CachedStateProvider::new(provider, cache, metrics)) as Box } else { @@ -477,6 +480,7 @@ where debug!("creating new execution cache on cache miss"); let cache = ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size); SavedCache::new(parent_hash, cache, CachedStateMetrics::zeroed()) + .with_disable_cache_metrics(self.disable_cache_metrics) } } @@ -558,6 +562,7 @@ where block_with_parent: BlockWithParent, bundle_state: &BundleState, ) { + let disable_cache_metrics = self.disable_cache_metrics; self.execution_cache.update_with_guard(|cached| { if cached.as_ref().is_some_and(|c| c.executed_block_hash() != block_with_parent.parent) { debug!( @@ -571,7 +576,8 @@ where // Take existing cache (if any) or create fresh caches let (caches, cache_metrics) = match cached.take() { Some(existing) => { - existing.split() + let (c, m, _) = existing.split(); + (c, m) } None => ( ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size), @@ -580,7 +586,8 @@ where }; // Insert the block's bundle state into cache - let new_cache = SavedCache::new(block_with_parent.block.hash, caches, cache_metrics); + let new_cache = SavedCache::new(block_with_parent.block.hash, caches, cache_metrics) + .with_disable_cache_metrics(disable_cache_metrics); if new_cache.cache().insert_state(bundle_state).is_err() { *cached = None; debug!(target: "engine::caching", "cleared execution cache on update error"); diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 494e2d0f26..6021098627 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -278,8 +278,9 @@ where execution_cache.update_with_guard(|cached| { // consumes the `SavedCache` held by the prewarming task, which releases its usage // guard - let (caches, cache_metrics) = saved_cache.split(); - let new_cache = SavedCache::new(hash, caches, cache_metrics); + let (caches, cache_metrics, disable_cache_metrics) = saved_cache.split(); + let new_cache = SavedCache::new(hash, caches, cache_metrics) + .with_disable_cache_metrics(disable_cache_metrics); // Insert state into cache while holding the lock // Access the BundleState through the shared ExecutionOutcome diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 8662f797c7..d7c320fc52 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -37,6 +37,7 @@ pub struct DefaultEngineValues { storage_worker_count: Option, account_worker_count: Option, enable_proof_v2: bool, + cache_metrics_disabled: bool, } impl DefaultEngineValues { @@ -172,6 +173,12 @@ impl DefaultEngineValues { self.enable_proof_v2 = v; self } + + /// Set whether to disable cache metrics by default + pub const fn with_cache_metrics_disabled(mut self, v: bool) -> Self { + self.cache_metrics_disabled = v; + self + } } impl Default for DefaultEngineValues { @@ -197,6 +204,7 @@ impl Default for DefaultEngineValues { storage_worker_count: None, account_worker_count: None, enable_proof_v2: false, + cache_metrics_disabled: false, } } } @@ -320,6 +328,10 @@ pub struct EngineArgs { /// Enable V2 storage proofs for state root calculations #[arg(long = "engine.enable-proof-v2", default_value_t = DefaultEngineValues::get_global().enable_proof_v2)] pub enable_proof_v2: bool, + + /// Disable cache metrics recording, which can take up to 50ms with large cached state. + #[arg(long = "engine.disable-cache-metrics", default_value_t = DefaultEngineValues::get_global().cache_metrics_disabled)] + pub cache_metrics_disabled: bool, } #[allow(deprecated)] @@ -346,6 +358,7 @@ impl Default for EngineArgs { storage_worker_count, account_worker_count, enable_proof_v2, + cache_metrics_disabled, } = DefaultEngineValues::get_global().clone(); Self { persistence_threshold, @@ -371,6 +384,7 @@ impl Default for EngineArgs { storage_worker_count, account_worker_count, enable_proof_v2, + cache_metrics_disabled, } } } @@ -407,6 +421,7 @@ impl EngineArgs { } config = config.with_enable_proof_v2(self.enable_proof_v2); + config = config.without_cache_metrics(self.cache_metrics_disabled); config } @@ -458,6 +473,7 @@ mod tests { storage_worker_count: Some(16), account_worker_count: Some(8), enable_proof_v2: false, + cache_metrics_disabled: true, }; let parsed_args = CommandParser::::parse_from([ @@ -488,6 +504,7 @@ mod tests { "16", "--engine.account-worker-count", "8", + "--engine.disable-cache-metrics", ]) .args; diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index cba041cbe7..c2de9fae56 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -993,6 +993,9 @@ Engine: --engine.enable-proof-v2 Enable V2 storage proofs for state root calculations + --engine.disable-cache-metrics + Disable cache metrics recording, which can take up to 50ms with large cached state + ERA: --era.enable Enable import from ERA1 files diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 328a22c445..3766d7ed9d 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -993,6 +993,9 @@ Engine: --engine.enable-proof-v2 Enable V2 storage proofs for state root calculations + --engine.disable-cache-metrics + Disable cache metrics recording, which can take up to 50ms with large cached state + ERA: --era.enable Enable import from ERA1 files