diff --git a/Cargo.lock b/Cargo.lock index dd503bc12b..ca2435d4a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8100,6 +8100,7 @@ dependencies = [ "sysinfo", "tempfile", "thiserror 2.0.18", + "tracing", ] [[package]] diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 905c4fbb78..0d45eda7fd 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -21,7 +21,7 @@ use std::{ time::Instant, }; use thiserror::Error; -use tracing::{debug, error}; +use tracing::{debug, error, instrument}; /// Writes parts of reth's in memory tree state to the database and static files. /// @@ -77,6 +77,7 @@ where /// Prunes block data before the given block number according to the configured prune /// configuration. + #[instrument(level = "debug", target = "engine::persistence", skip_all, fields(block_num))] fn prune_before(&mut self, block_num: u64) -> Result { debug!(target: "engine::persistence", ?block_num, "Running pruner"); let start_time = Instant::now(); @@ -135,6 +136,7 @@ where Ok(()) } + #[instrument(level = "debug", target = "engine::persistence", skip_all, fields(new_tip_num))] fn on_remove_blocks_above( &self, new_tip_num: u64, @@ -152,6 +154,7 @@ where Ok(new_tip_hash.map(|hash| BlockNumHash { hash, number: new_tip_num })) } + #[instrument(level = "debug", target = "engine::persistence", skip_all, fields(block_count = blocks.len()))] fn on_save_blocks( &mut self, blocks: Vec>, diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index 3232a78532..c445e52f12 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -15,7 +15,7 @@ use reth_stages_types::StageId; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; -use tracing::debug; +use tracing::{debug, instrument}; /// Result of [`Pruner::run`] execution. pub type PrunerResult = Result; @@ -114,6 +114,12 @@ where /// /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data /// to prune. + #[instrument( + name = "Pruner::run_with_provider", + level = "debug", + target = "pruner", + skip(self, provider) + )] pub fn run_with_provider( &mut self, provider: &Provider, @@ -162,6 +168,7 @@ where /// /// Returns a list of stats per pruned segment, total number of entries pruned, and /// [`PruneProgress`]. + #[instrument(level = "debug", target = "pruner", skip_all, fields(segments = self.segments.len()))] fn prune_segments( &mut self, provider: &Provider, @@ -330,6 +337,7 @@ where /// /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data /// to prune. + #[instrument(name = "Pruner::run", level = "debug", target = "pruner", skip(self))] pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { let provider = self.provider_factory.database_provider_rw()?; let result = self.run_with_provider(&provider, tip_block_number); diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 2dd2517acf..ccb3f5b0b1 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -18,6 +18,7 @@ reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-tracing.workspace = true +tracing.workspace = true reth-static-file-types.workspace = true # ethereum diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 5b4acad700..769473ec97 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -11,7 +11,7 @@ use reth_db_api::{ }; use reth_libmdbx::{ffi::MDBX_dbi, CommitLatency, Transaction, TransactionKind, WriteFlags, RW}; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; -use reth_tracing::tracing::{debug, trace, warn}; +use reth_tracing::tracing::{debug, instrument, trace, warn}; use std::{ backtrace::Backtrace, collections::HashMap, @@ -307,6 +307,7 @@ impl DbTx for Tx { }) } + #[instrument(name = "Tx::commit", level = "debug", target = "providers::db", skip_all)] fn commit(self) -> Result<(), DatabaseError> { self.execute_with_close_transaction_metric(TransactionOutcome::Commit, |this| { match this.inner.commit().map_err(|e| DatabaseError::Commit(e.into())) { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 83443d2692..2fd840c015 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -3513,6 +3513,12 @@ impl DBProvider for DatabaseProvider } /// Commit database transaction, static files, and pending `RocksDB` batches. + #[instrument( + name = "DatabaseProvider::commit", + level = "debug", + target = "providers::db", + skip_all + )] fn commit(self) -> ProviderResult<()> { // For unwinding it makes more sense to commit the database first, since if // it is interrupted before the static files commit, we can just diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 3d42feedb0..ed0b53b80b 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -187,7 +187,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block if self.check_distance_against_limit(EPOCH_SLOTS)? { tracing::warn!( - target: "provider::historical_sp", + target: "providers::historical_sp", target = self.block_number, "Attempt to calculate state root for an old block might result in OOM" ); @@ -204,7 +204,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block if self.check_distance_against_limit(EPOCH_SLOTS * 10)? { tracing::warn!( - target: "provider::historical_sp", + target: "providers::historical_sp", target = self.block_number, "Attempt to calculate storage root for an old block might result in OOM" ); diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index cd2cfb216d..65da2e6217 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -932,7 +932,7 @@ impl StaticFileProvider { } debug!( - target: "provider::static_file", + target: "providers::static_file", ?segment, ?block_height, "Deleting static file below block" @@ -941,7 +941,7 @@ impl StaticFileProvider { // now we need to wipe the static file, this will take care of updating the index and // advance the lowest tracked block height for the segment. let header = self.delete_jar(segment, block_height).inspect_err(|err| { - warn!( target: "provider::static_file", ?segment, %block_height, ?err, "Failed to delete static file below block") + warn!( target: "providers::static_file", ?segment, %block_height, ?err, "Failed to delete static file below block") })?; deleted_headers.push(header); @@ -967,7 +967,7 @@ impl StaticFileProvider { } else { let file = self.path.join(segment.filename(&fixed_block_range)); debug!( - target: "provider::static_file", + target: "providers::static_file", ?file, ?fixed_block_range, ?block, @@ -998,14 +998,14 @@ impl StaticFileProvider { while let Some(block_height) = self.get_highest_static_file_block(segment) { debug!( - target: "provider::static_file", + target: "providers::static_file", ?segment, ?block_height, "Deleting static file jar" ); let header = self.delete_jar(segment, block_height).inspect_err(|err| { - warn!(target: "provider::static_file", ?segment, %block_height, ?err, "Failed to delete static file jar") + warn!(target: "providers::static_file", ?segment, %block_height, ?err, "Failed to delete static file jar") })?; deleted_headers.push(header); @@ -1025,12 +1025,12 @@ impl StaticFileProvider { let key = (fixed_block_range.end(), segment); // Avoid using `entry` directly to avoid a write lock in the common case. - trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Getting provider"); + trace!(target: "providers::static_file", ?segment, ?fixed_block_range, "Getting provider"); let mut provider: StaticFileJarProvider<'_, N> = if let Some(jar) = self.map.get(&key) { - trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Jar found in cache"); + trace!(target: "providers::static_file", ?segment, ?fixed_block_range, "Jar found in cache"); jar.into() } else { - trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Creating jar from scratch"); + trace!(target: "providers::static_file", ?segment, ?fixed_block_range, "Creating jar from scratch"); let path = self.path.join(segment.filename(fixed_block_range)); let jar = NippyJar::load(&path).map_err(ProviderError::other)?; self.map.entry(key).insert(LoadedJar::new(jar)?).downgrade().into() @@ -1105,7 +1105,7 @@ impl StaticFileProvider { segment_max_block: Option, ) -> ProviderResult<()> { debug!( - target: "provider::static_file", + target: "providers::static_file", ?segment, ?segment_max_block, "Updating provider index" @@ -1217,20 +1217,20 @@ impl StaticFileProvider { } // Update the cached provider. - debug!(target: "provider::static_file", ?segment, "Inserting updated jar into cache"); + debug!(target: "providers::static_file", ?segment, "Inserting updated jar into cache"); self.map.insert((fixed_range.end(), segment), LoadedJar::new(jar)?); // Delete any cached provider that no longer has an associated jar. - debug!(target: "provider::static_file", ?segment, "Cleaning up jar map"); + debug!(target: "providers::static_file", ?segment, "Cleaning up jar map"); self.map.retain(|(end, seg), _| !(*seg == segment && *end > fixed_range.end())); } None => { - debug!(target: "provider::static_file", ?segment, "Removing segment from index"); + debug!(target: "providers::static_file", ?segment, "Removing segment from index"); indexes.remove(segment); } }; - debug!(target: "provider::static_file", ?segment, "Updated provider index"); + debug!(target: "providers::static_file", ?segment, "Updated provider index"); Ok(()) } @@ -2335,7 +2335,7 @@ impl StaticFileWriter for StaticFileProvider { return Err(ProviderError::ReadOnlyStaticFileAccess); } - trace!(target: "provider::static_file", ?block, ?segment, "Getting static file writer."); + trace!(target: "providers::static_file", ?block, ?segment, "Getting static file writer."); self.writers.get_or_create(segment, || { StaticFileProviderRW::new(segment, block, Arc::downgrade(&self.0), self.metrics.clone()) }) @@ -2437,7 +2437,7 @@ impl ChangeSetReader for StaticFileProvider { // This is not expected but means we are out of the range / file somehow, and can't // continue debug!( - target: "provider::static_file", + target: "providers::static_file", ?low, ?mid, ?high, @@ -2556,7 +2556,7 @@ impl StorageChangeSetReader for StaticFileProvider { } } else { debug!( - target: "provider::static_file", + target: "providers::static_file", ?low, ?mid, ?high, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 272724057b..5c16293da7 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -20,7 +20,7 @@ use std::{ sync::{Arc, Weak}, time::Instant, }; -use tracing::debug; +use tracing::{debug, instrument}; /// Represents different pruning strategies for various static file segments. #[derive(Debug, Clone, Copy)] @@ -112,8 +112,14 @@ impl StaticFileWriters { Ok(StaticFileProviderRWRefMut(write_guard)) } + #[instrument( + name = "StaticFileWriters::commit", + level = "debug", + target = "providers::static_file", + skip_all + )] pub(crate) fn commit(&self) -> ProviderResult<()> { - debug!(target: "provider::static_file", "Committing all static file segments"); + debug!(target: "providers::static_file", "Committing all static file segments"); for writer_lock in [ &self.headers, @@ -129,7 +135,7 @@ impl StaticFileWriters { } } - debug!(target: "provider::static_file", "Committed all static file segments"); + debug!(target: "providers::static_file", "Committed all static file segments"); Ok(()) } @@ -156,8 +162,14 @@ impl StaticFileWriters { /// /// Must be called after `sync_all` was called on individual writers. /// Returns an error if any writer has prune queued. + #[instrument( + name = "StaticFileWriters::finalize", + level = "debug", + target = "providers::static_file", + skip_all + )] pub(crate) fn finalize(&self) -> ProviderResult<()> { - debug!(target: "provider::static_file", "Finalizing all static file segments into disk"); + debug!(target: "providers::static_file", "Finalizing all static file segments into disk"); for writer_lock in [ &self.headers, @@ -173,7 +185,7 @@ impl StaticFileWriters { } } - debug!(target: "provider::static_file", "Finalized all static file segments into disk"); + debug!(target: "providers::static_file", "Finalized all static file segments into disk"); Ok(()) } } @@ -318,7 +330,7 @@ impl StaticFileProviderRW { } debug!( - target: "provider::static_file", + target: "providers::static_file", segment = ?self.writer.user_header().segment(), path = ?self.data_path, pruned_rows, @@ -359,6 +371,12 @@ impl StaticFileProviderRW { /// If `sync_all()` was not called, this will call it first to ensure data is persisted. /// /// Returns an error if prune is queued (use [`Self::commit`] instead). + #[instrument( + name = "StaticFileProviderRW::finalize", + level = "debug", + target = "providers::static_file", + skip_all + )] pub fn finalize(&mut self) -> ProviderResult<()> { if self.prune_on_commit.is_some() { return Err(StaticFileWriterError::FinalizeWithPruneQueued.into()); @@ -376,13 +394,19 @@ impl StaticFileProviderRW { } /// Commits configuration changes to disk and updates the reader index with the new changes. + #[instrument( + name = "StaticFileProviderRW::commit", + level = "debug", + target = "providers::static_file", + skip_all + )] pub fn commit(&mut self) -> ProviderResult<()> { let start = Instant::now(); // Truncates the data file if instructed to. if let Some(strategy) = self.prune_on_commit.take() { debug!( - target: "provider::static_file", + target: "providers::static_file", segment = ?self.writer.user_header().segment(), "Pruning data on commit" ); @@ -408,7 +432,7 @@ impl StaticFileProviderRW { if self.writer.is_dirty() { debug!( - target: "provider::static_file", + target: "providers::static_file", segment = ?self.writer.user_header().segment(), "Committing writer to disk" ); @@ -425,7 +449,7 @@ impl StaticFileProviderRW { } debug!( - target: "provider::static_file", + target: "providers::static_file", segment = ?self.writer.user_header().segment(), path = ?self.data_path, duration = ?start.elapsed(), @@ -446,7 +470,7 @@ impl StaticFileProviderRW { let start = Instant::now(); debug!( - target: "provider::static_file", + target: "providers::static_file", segment = ?self.writer.user_header().segment(), "Committing writer to disk (without sync)" ); @@ -463,7 +487,7 @@ impl StaticFileProviderRW { } debug!( - target: "provider::static_file", + target: "providers::static_file", segment = ?self.writer.user_header().segment(), path = ?self.data_path, duration = ?start.elapsed(),