diff --git a/Cargo.lock b/Cargo.lock index 7d6e821472..68308a1ee3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7907,6 +7907,7 @@ dependencies = [ "reth-stages-types", "reth-static-file", "reth-static-file-types", + "reth-storage-api", "reth-tasks", "reth-trie", "reth-trie-common", diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index fa9d54b4fa..1847e24e2f 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -50,6 +50,7 @@ reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } reth-static-file.workspace = true reth-tasks.workspace = true +reth-storage-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-trie-common.workspace = true diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 13584c4baa..1b3739bb46 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -21,6 +21,7 @@ use reth_node_builder::NodeTypesWithDB; use reth_primitives_traits::ValueWithSubKey; use reth_provider::{providers::ProviderNodeTypes, ChangeSetReader, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; +use reth_storage_api::StorageChangeSetReader; use tracing::error; /// The arguments for the `reth db get` command @@ -82,6 +83,33 @@ impl Command { table.view(&GetValueViewer { tool, key, subkey, end_key, end_subkey, raw })? } Subcommand::StaticFile { segment, key, subkey, raw } => { + if let StaticFileSegment::StorageChangeSets = segment { + let storage_key = + table_subkey::(subkey.as_deref()).ok(); + let key = table_key::(&key)?; + + let provider = tool.provider_factory.static_file_provider(); + + if let Some(storage_key) = storage_key { + let entry = provider.get_storage_before_block( + key.block_number(), + key.address(), + storage_key, + )?; + + if let Some(entry) = entry { + println!("{}", serde_json::to_string_pretty(&entry)?); + } else { + error!(target: "reth::cli", "No content for the given table key."); + } + return Ok(()); + } + + let changesets = provider.storage_changeset(key.block_number())?; + println!("{}", serde_json::to_string_pretty(&changesets)?); + return Ok(()); + } + let (key, subkey, mask): (u64, _, _) = match segment { StaticFileSegment::Headers => ( table_key::(&key)?, @@ -112,6 +140,9 @@ impl Command { AccountChangesetMask::MASK, ) } + StaticFileSegment::StorageChangeSets => { + unreachable!("storage changesets handled above"); + } }; // handle account changesets differently if a subkey is provided. @@ -190,6 +221,9 @@ impl Command { StaticFileSegment::AccountChangeSets => { unreachable!("account changeset static files are special cased before this match") } + StaticFileSegment::StorageChangeSets => { + unreachable!("storage changeset static files are special cased before this match") + } } } } diff --git a/crates/cli/commands/src/db/settings.rs b/crates/cli/commands/src/db/settings.rs index 9bfe01b52a..82578e3410 100644 --- a/crates/cli/commands/src/db/settings.rs +++ b/crates/cli/commands/src/db/settings.rs @@ -69,6 +69,11 @@ pub enum SetCommand { #[clap(action(ArgAction::Set))] value: bool, }, + /// Store storage changesets in static files instead of the database + StorageChangesets { + #[clap(action(ArgAction::Set))] + value: bool, + }, } impl Command { @@ -115,6 +120,7 @@ impl Command { transaction_hash_numbers_in_rocksdb: _, account_history_in_rocksdb: _, account_changesets_in_static_files: _, + storage_changesets_in_static_files: _, } = settings.unwrap_or_else(StorageSettings::legacy); // Update the setting based on the key @@ -167,6 +173,14 @@ impl Command { settings.account_history_in_rocksdb = value; println!("Set account_history_in_rocksdb = {}", value); } + SetCommand::StorageChangesets { value } => { + if settings.storage_changesets_in_static_files == value { + println!("storage_changesets_in_static_files is already set to {}", value); + return Ok(()); + } + settings.storage_changesets_in_static_files = value; + println!("Set storage_changesets_in_static_files = {}", value); + } } // Write updated settings diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index a23b4374ab..c1f43fef89 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -91,6 +91,9 @@ impl Command { StaticFileSegment::AccountChangeSets => { writer.prune_account_changesets(highest_block)?; } + StaticFileSegment::StorageChangeSets => { + writer.prune_storage_changesets(highest_block)?; + } } } } diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 3f979f7d65..b7e9d705de 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -438,6 +438,8 @@ pub struct BlocksPerFileConfig { pub transaction_senders: Option, /// Number of blocks per file for the account changesets segment. pub account_change_sets: Option, + /// Number of blocks per file for the storage changesets segment. + pub storage_change_sets: Option, } impl StaticFilesConfig { @@ -451,6 +453,7 @@ impl StaticFilesConfig { receipts, transaction_senders, account_change_sets, + storage_change_sets, } = self.blocks_per_file; eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0"); eyre::ensure!( @@ -469,6 +472,10 @@ impl StaticFilesConfig { account_change_sets != Some(0), "Account changesets segment blocks per file must be greater than 0" ); + eyre::ensure!( + storage_change_sets != Some(0), + "Storage changesets segment blocks per file must be greater than 0" + ); Ok(()) } @@ -480,6 +487,7 @@ impl StaticFilesConfig { receipts, transaction_senders, account_change_sets, + storage_change_sets, } = self.blocks_per_file; let mut map = StaticFileMap::default(); @@ -492,6 +500,7 @@ impl StaticFilesConfig { StaticFileSegment::Receipts => receipts, StaticFileSegment::TransactionSenders => transaction_senders, StaticFileSegment::AccountChangeSets => account_change_sets, + StaticFileSegment::StorageChangeSets => storage_change_sets, }; if let Some(blocks_per_file) = blocks_per_file { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3d4913780c..1e04d19dba 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -32,7 +32,8 @@ use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, Sealed use reth_provider::{ BlockExecutionOutput, BlockExecutionResult, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StageCheckpointReader, - StateProviderBox, StateProviderFactory, StateReader, TransactionVariant, + StateProviderBox, StateProviderFactory, StateReader, StorageChangeSetReader, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; @@ -317,6 +318,7 @@ where

::Provider: BlockReader + StageCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, C: ConfigureEvm + 'static, T: PayloadTypes>, diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index b5f1272b67..4664c1906c 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1323,7 +1323,7 @@ mod tests { use reth_provider::{ providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, LatestStateProvider, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StorageChangeSetReader, }; use reth_trie::MultiProof; use reth_trie_db::ChangesetCache; @@ -1350,6 +1350,7 @@ mod tests { + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, > + Clone + Send diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 138fad5b16..2372256cab 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -39,7 +39,7 @@ use reth_provider::{ providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, HashedPostStateProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, - StateProviderFactory, StateReader, + StateProviderFactory, StateReader, StorageChangeSetReader, }; use reth_revm::db::{states::bundle_state::BundleRetention, State}; use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; @@ -144,6 +144,7 @@ where + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, > + BlockReader

+ ChangeSetReader @@ -1336,6 +1337,7 @@ where + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, > + BlockReader
+ StateProviderFactory diff --git a/crates/node/core/src/args/static_files.rs b/crates/node/core/src/args/static_files.rs index d0048022cf..ac710d0c9e 100644 --- a/crates/node/core/src/args/static_files.rs +++ b/crates/node/core/src/args/static_files.rs @@ -2,6 +2,7 @@ use clap::Args; use reth_config::config::{BlocksPerFileConfig, StaticFilesConfig}; +use reth_storage_api::StorageSettings; /// Blocks per static file when running in `--minimal` node. /// @@ -40,6 +41,10 @@ pub struct StaticFilesArgs { #[arg(long = "static-files.blocks-per-file.account-change-sets")] pub blocks_per_file_account_change_sets: Option, + /// Number of blocks per file for the storage changesets segment. + #[arg(long = "static-files.blocks-per-file.storage-change-sets")] + pub blocks_per_file_storage_change_sets: Option, + /// Store receipts in static files instead of the database. /// /// When enabled, receipts will be written to static files on disk instead of the database. @@ -68,6 +73,16 @@ pub struct StaticFilesArgs { /// the node has been initialized, changing this flag requires re-syncing from scratch. #[arg(long = "static-files.account-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] pub account_changesets: bool, + + /// Store storage changesets in static files. + /// + /// When enabled, storage changesets will be written to static files on disk instead of the + /// database. + /// + /// Note: This setting can only be configured at genesis initialization. Once + /// the node has been initialized, changing this flag requires re-syncing from scratch. + #[arg(long = "static-files.storage-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)] + pub storage_changesets: bool, } impl StaticFilesArgs { @@ -98,9 +113,25 @@ impl StaticFilesArgs { account_change_sets: self .blocks_per_file_account_change_sets .or(config.blocks_per_file.account_change_sets), + storage_change_sets: self + .blocks_per_file_storage_change_sets + .or(config.blocks_per_file.storage_change_sets), }, } } + + /// Converts the static files arguments into [`StorageSettings`]. + pub const fn to_settings(&self) -> StorageSettings { + #[cfg(feature = "edge")] + let base = StorageSettings::edge(); + #[cfg(not(feature = "edge"))] + let base = StorageSettings::legacy(); + + base.with_receipts_in_static_files(self.receipts) + .with_transaction_senders_in_static_files(self.transaction_senders) + .with_account_changesets_in_static_files(self.account_changesets) + .with_storage_changesets_in_static_files(self.storage_changesets) + } } impl Default for StaticFilesArgs { @@ -111,9 +142,11 @@ impl Default for StaticFilesArgs { blocks_per_file_receipts: None, blocks_per_file_transaction_senders: None, blocks_per_file_account_change_sets: None, + blocks_per_file_storage_change_sets: None, receipts: default_static_file_flag(), transaction_senders: default_static_file_flag(), account_changesets: default_static_file_flag(), + storage_changesets: default_static_file_flag(), } } } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 98502fdd11..225c957c1c 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -363,6 +363,7 @@ impl NodeConfig { .with_receipts_in_static_files(self.static_files.receipts) .with_transaction_senders_in_static_files(self.static_files.transaction_senders) .with_account_changesets_in_static_files(self.static_files.account_changesets) + .with_storage_changesets_in_static_files(self.static_files.storage_changesets) .with_transaction_hash_numbers_in_rocksdb(self.rocksdb.all || self.rocksdb.tx_hash) .with_storages_history_in_rocksdb(self.rocksdb.all || self.rocksdb.storages_history) .with_account_history_in_rocksdb(self.rocksdb.all || self.rocksdb.account_history) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index e37dbaa441..36f2f6ede6 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -1,4 +1,4 @@ -use super::collect_history_indices; +use super::{collect_history_indices, collect_storage_history_indices}; use crate::{stages::utils::load_storage_history, StageCheckpoint, StageId}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db_api::{ @@ -8,7 +8,8 @@ use reth_db_api::{ }; use reth_provider::{ DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, - RocksDBProviderFactory, StorageSettingsCache, + RocksDBProviderFactory, StaticFileProviderFactory, StorageChangeSetReader, + StorageSettingsCache, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; @@ -54,6 +55,8 @@ where + PruneCheckpointWriter + StorageSettingsCache + RocksDBProviderFactory + + StorageChangeSetReader + + StaticFileProviderFactory + reth_provider::NodePrimitivesProvider, { /// Return the id of the stage @@ -121,7 +124,9 @@ where } info!(target: "sync::stages::index_storage_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices"); - let collector = + let collector = if provider.cached_storage_settings().storage_changesets_in_static_files { + collect_storage_history_indices(provider, range.clone(), &self.etl_config)? + } else { collect_history_indices::<_, tables::StorageChangeSets, tables::StoragesHistory, _>( provider, BlockNumberAddress::range(range.clone()), @@ -130,7 +135,8 @@ where }, |(key, value)| (key.block_number(), AddressStorageKey((key.address(), value.key))), &self.etl_config, - )?; + )? + }; info!(target: "sync::stages::index_storage_history::exec", "Loading indices into database"); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index c7ab0f7f01..cd9d5ebd43 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -9,7 +9,7 @@ use reth_db_api::{ use reth_primitives_traits::{GotExpected, SealedHeader}; use reth_provider::{ ChangeSetReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, - StageCheckpointWriter, StatsReader, TrieWriter, + StageCheckpointWriter, StatsReader, StorageChangeSetReader, TrieWriter, }; use reth_stages_api::{ BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, @@ -159,6 +159,7 @@ where + StatsReader + HeaderProvider + ChangeSetReader + + StorageChangeSetReader + StageCheckpointReader + StageCheckpointWriter, { diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs index e81f8f1856..c4345fedb9 100644 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -6,7 +6,7 @@ use reth_primitives_traits::{GotExpected, SealedHeader}; use reth_provider::{ BlockNumReader, ChainStateBlockReader, ChangeSetReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, - StageCheckpointWriter, TrieWriter, + StageCheckpointWriter, StorageChangeSetReader, TrieWriter, }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneSegment, MERKLE_CHANGESETS_RETENTION_BLOCKS, @@ -167,7 +167,8 @@ impl MerkleChangeSets { + HeaderProvider + ChainStateBlockReader + BlockNumReader - + ChangeSetReader, + + ChangeSetReader + + StorageChangeSetReader, { let target_start = target_range.start; let target_end = target_range.end; @@ -308,6 +309,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + ChangeSetReader + + StorageChangeSetReader + BlockNumReader, { fn id(&self) -> StageId { diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index c5a8dee347..cd447ba9b4 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -5,7 +5,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, models::{ sharded_key::NUM_OF_INDICES_IN_SHARD, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ShardedKey, + AccountBeforeTx, AddressStorageKey, BlockNumberAddress, ShardedKey, }, table::{Decode, Decompress, Table}, transaction::DbTx, @@ -19,7 +19,7 @@ use reth_provider::{ }; use reth_stages_api::StageError; use reth_static_file_types::StaticFileSegment; -use reth_storage_api::ChangeSetReader; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -102,15 +102,15 @@ where } /// Allows collecting indices from a cache with a custom insert fn -fn collect_indices( - cache: impl Iterator)>, +fn collect_indices( + cache: impl Iterator)>, mut insert_fn: F, ) -> Result<(), StageError> where - F: FnMut(Address, Vec) -> Result<(), StageError>, + F: FnMut(K, Vec) -> Result<(), StageError>, { - for (address, indices) in cache { - insert_fn(address, indices)? + for (key, indices) in cache { + insert_fn(key, indices)? } Ok(()) } @@ -174,6 +174,62 @@ where Ok(collector) } +/// Collects storage history indices using a provider that implements `StorageChangeSetReader`. +pub(crate) fn collect_storage_history_indices( + provider: &Provider, + range: impl RangeBounds, + etl_config: &EtlConfig, +) -> Result, StageError> +where + Provider: DBProvider + StorageChangeSetReader + StaticFileProviderFactory, +{ + let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone()); + let mut cache: HashMap> = HashMap::default(); + + let mut insert_fn = |key: AddressStorageKey, indices: Vec| { + let last = indices.last().expect("qed"); + collector.insert( + StorageShardedKey::new(key.0 .0, key.0 .1, *last), + BlockNumberList::new_pre_sorted(indices.into_iter()), + )?; + Ok::<(), StageError>(()) + }; + + let range = to_range(range); + let static_file_provider = provider.static_file_provider(); + + let total_changesets = static_file_provider.storage_changeset_count()?; + let interval = (total_changesets / 1000).max(1); + + let walker = static_file_provider.walk_storage_changeset_range(range); + + let mut flush_counter = 0; + let mut current_block_number = u64::MAX; + + for (idx, changeset_result) in walker.enumerate() { + let (BlockNumberAddress((block_number, address)), storage) = changeset_result?; + cache.entry(AddressStorageKey((address, storage.key))).or_default().push(block_number); + + if idx > 0 && idx % interval == 0 && total_changesets > 1000 { + info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices"); + } + + if block_number != current_block_number { + current_block_number = block_number; + flush_counter += 1; + } + + if flush_counter > DEFAULT_CACHE_THRESHOLD { + collect_indices(cache.drain(), &mut insert_fn)?; + flush_counter = 0; + } + } + + collect_indices(cache.into_iter(), insert_fn)?; + + Ok(collector) +} + /// Loads account history indices into the database via `EitherWriter`. /// /// Works with [`EitherWriter`] to support both MDBX and `RocksDB` backends. diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 9a3e5e35c8..791172be70 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -55,6 +55,11 @@ pub enum StaticFileSegment { /// * address 0xbb, account info /// * address 0xcc, account info AccountChangeSets, + /// Static File segment responsible for the `StorageChangeSets` table. + /// + /// Storage changeset static files append block-by-block changesets sorted by address and + /// storage slot. + StorageChangeSets, } impl StaticFileSegment { @@ -71,6 +76,7 @@ impl StaticFileSegment { Self::Receipts => "receipts", Self::TransactionSenders => "transaction-senders", Self::AccountChangeSets => "account-change-sets", + Self::StorageChangeSets => "storage-change-sets", } } @@ -83,6 +89,7 @@ impl StaticFileSegment { Self::Receipts, Self::TransactionSenders, Self::AccountChangeSets, + Self::StorageChangeSets, ] .into_iter() } @@ -99,7 +106,8 @@ impl StaticFileSegment { Self::Transactions | Self::Receipts | Self::TransactionSenders | - Self::AccountChangeSets => 1, + Self::AccountChangeSets | + Self::StorageChangeSets => 1, } } @@ -161,14 +169,14 @@ impl StaticFileSegment { pub const fn is_tx_based(&self) -> bool { match self { Self::Receipts | Self::Transactions | Self::TransactionSenders => true, - Self::Headers | Self::AccountChangeSets => false, + Self::Headers | Self::AccountChangeSets | Self::StorageChangeSets => false, } } - /// Returns `true` if the segment is [`StaticFileSegment::AccountChangeSets`] + /// Returns `true` if the segment is change-based. pub const fn is_change_based(&self) -> bool { match self { - Self::AccountChangeSets => true, + Self::AccountChangeSets | Self::StorageChangeSets => true, Self::Receipts | Self::Transactions | Self::Headers | Self::TransactionSenders => false, } } @@ -180,7 +188,8 @@ impl StaticFileSegment { Self::Receipts | Self::Transactions | Self::TransactionSenders | - Self::AccountChangeSets => false, + Self::AccountChangeSets | + Self::StorageChangeSets => false, } } @@ -259,10 +268,10 @@ impl<'de> Visitor<'de> for SegmentHeaderVisitor { let tx_range = seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(2, &self))?; - let segment = + let segment: StaticFileSegment = seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?; - let changeset_offsets = if segment == StaticFileSegment::AccountChangeSets { + let changeset_offsets = if segment.is_change_based() { // Try to read the 5th field (changeset_offsets) // If it doesn't exist (old format), this will return None match seq.next_element()? { @@ -309,8 +318,8 @@ impl Serialize for SegmentHeader { where S: Serializer, { - // We serialize an extra field, the changeset offsets, for account changesets - let len = if self.segment.is_account_change_sets() { 5 } else { 4 }; + // We serialize an extra field, the changeset offsets, for change-based segments + let len = if self.segment.is_change_based() { 5 } else { 4 }; let mut state = serializer.serialize_struct("SegmentHeader", len)?; state.serialize_field("expected_block_range", &self.expected_block_range)?; @@ -318,7 +327,7 @@ impl Serialize for SegmentHeader { state.serialize_field("tx_range", &self.tx_range)?; state.serialize_field("segment", &self.segment)?; - if self.segment.is_account_change_sets() { + if self.segment.is_change_based() { state.serialize_field("changeset_offsets", &self.changeset_offsets)?; } @@ -672,6 +681,12 @@ mod tests { "static_file_account-change-sets_1123233_11223233", None, ), + ( + StaticFileSegment::StorageChangeSets, + 1_123_233..=11_223_233, + "static_file_storage-change-sets_1123233_11223233", + None, + ), ( StaticFileSegment::Headers, 2..=30, @@ -755,6 +770,13 @@ mod tests { segment: StaticFileSegment::AccountChangeSets, changeset_offsets: Some(vec![ChangesetOffset { offset: 1, num_changes: 1 }; 100]), }, + SegmentHeader { + expected_block_range: SegmentRangeInclusive::new(0, 200), + block_range: Some(SegmentRangeInclusive::new(0, 100)), + tx_range: None, + segment: StaticFileSegment::StorageChangeSets, + changeset_offsets: Some(vec![ChangesetOffset { offset: 1, num_changes: 1 }; 100]), + }, ]; // Check that we test all segments assert_eq!( @@ -788,6 +810,7 @@ mod tests { StaticFileSegment::Receipts => "receipts", StaticFileSegment::TransactionSenders => "transaction-senders", StaticFileSegment::AccountChangeSets => "account-change-sets", + StaticFileSegment::StorageChangeSets => "storage-change-sets", }; assert_eq!(static_str, expected_str); } @@ -806,6 +829,7 @@ mod tests { StaticFileSegment::Receipts => "Receipts", StaticFileSegment::TransactionSenders => "TransactionSenders", StaticFileSegment::AccountChangeSets => "AccountChangeSets", + StaticFileSegment::StorageChangeSets => "StorageChangeSets", }; assert_eq!(ser, format!("\"{expected_str}\"")); } diff --git a/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__StorageChangeSets.snap b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__StorageChangeSets.snap new file mode 100644 index 0000000000..c1b94903bd --- /dev/null +++ b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__StorageChangeSets.snap @@ -0,0 +1,5 @@ +--- +source: crates/static-file/types/src/segment.rs +expression: "Bytes::from(serialized)" +--- +0x01000000000000000000000000000000c800000000000000010000000000000000640000000000000000050000000164000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000000000000000000000000000000000000 diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs index a12e9b6dab..b17dccabfb 100644 --- a/crates/storage/db-api/src/models/metadata.rs +++ b/crates/storage/db-api/src/models/metadata.rs @@ -31,6 +31,9 @@ pub struct StorageSettings { /// Whether this node should read and write account changesets from static files. #[serde(default)] pub account_changesets_in_static_files: bool, + /// Whether this node should read and write storage changesets from static files. + #[serde(default)] + pub storage_changesets_in_static_files: bool, } impl StorageSettings { @@ -59,6 +62,7 @@ impl StorageSettings { receipts_in_static_files: true, transaction_senders_in_static_files: true, account_changesets_in_static_files: true, + storage_changesets_in_static_files: true, storages_history_in_rocksdb: false, transaction_hash_numbers_in_rocksdb: true, account_history_in_rocksdb: false, @@ -78,6 +82,7 @@ impl StorageSettings { transaction_hash_numbers_in_rocksdb: false, account_history_in_rocksdb: false, account_changesets_in_static_files: false, + storage_changesets_in_static_files: false, } } @@ -117,6 +122,12 @@ impl StorageSettings { self } + /// Sets the `storage_changesets_in_static_files` flag to the provided value. + pub const fn with_storage_changesets_in_static_files(mut self, value: bool) -> Self { + self.storage_changesets_in_static_files = value; + self + } + /// Returns `true` if any tables are configured to be stored in `RocksDB`. pub const fn any_in_rocksdb(&self) -> bool { self.transaction_hash_numbers_in_rocksdb || diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 8d2e31e875..67bc3ea0d1 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -29,8 +29,8 @@ pub use blocks::*; pub use integer_list::IntegerList; pub use metadata::*; pub use reth_db_models::{ - AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices, - StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StorageBeforeTx, + StoredBlockBodyIndices, StoredBlockWithdrawals, }; pub use sharded_key::ShardedKey; @@ -230,6 +230,7 @@ impl_compression_for_compact!( StaticFileBlockWithdrawals, Bytecode, AccountBeforeTx, + StorageBeforeTx, TransactionSigned, CompactU256, StageCheckpoint, diff --git a/crates/storage/db-models/src/lib.rs b/crates/storage/db-models/src/lib.rs index db1c99b5e1..49bcfe7a3b 100644 --- a/crates/storage/db-models/src/lib.rs +++ b/crates/storage/db-models/src/lib.rs @@ -19,6 +19,10 @@ pub use accounts::AccountBeforeTx; pub mod blocks; pub use blocks::{StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockWithdrawals}; +/// Storage +pub mod storage; +pub use storage::StorageBeforeTx; + /// Client Version pub mod client_version; pub use client_version::ClientVersion; diff --git a/crates/storage/db-models/src/storage.rs b/crates/storage/db-models/src/storage.rs new file mode 100644 index 0000000000..4de05901aa --- /dev/null +++ b/crates/storage/db-models/src/storage.rs @@ -0,0 +1,48 @@ +use alloy_primitives::{Address, B256, U256}; +use reth_primitives_traits::ValueWithSubKey; + +/// Storage entry as it is saved in the static files. +/// +/// [`B256`] is the subkey. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +pub struct StorageBeforeTx { + /// Address for the storage entry. Acts as `DupSort::SubKey` in static files. + pub address: Address, + /// Storage key. + pub key: B256, + /// Value on storage key. + pub value: U256, +} + +impl ValueWithSubKey for StorageBeforeTx { + type SubKey = B256; + + fn get_subkey(&self) -> Self::SubKey { + self.key + } +} + +// NOTE: Removing reth_codec and manually encode subkey +// and compress second part of the value. If we have compression +// over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageBeforeTx { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + buf.put_slice(self.address.as_slice()); + buf.put_slice(&self.key[..]); + self.value.to_compact(buf) + 52 + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let address = Address::from_slice(&buf[..20]); + let key = B256::from_slice(&buf[20..52]); + let (value, out) = U256::from_compact(&buf[52..], len - 52); + (Self { address, key, value }, out) + } +} diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 4b0eae7f7a..d6a01283dd 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -4,7 +4,7 @@ use crate::{ HeaderTerminalDifficulties, }; use alloy_primitives::{Address, BlockHash}; -use reth_db_api::{table::Table, AccountChangeSets}; +use reth_db_api::{models::StorageBeforeTx, table::Table, AccountChangeSets}; // HEADER MASKS add_static_file_mask! { @@ -54,3 +54,9 @@ add_static_file_mask! { #[doc = "Mask for selecting a single changeset from `AccountChangesets` static file segment"] AccountChangesetMask, ::Value, 0b1 } + +// STORAGE CHANGESET MASKS +add_static_file_mask! { + #[doc = "Mask for selecting a single changeset from `StorageChangesets` static file segment"] + StorageChangesetMask, StorageBeforeTx, 0b1 +} diff --git a/crates/storage/provider/src/changeset_walker.rs b/crates/storage/provider/src/changeset_walker.rs index f31eed8e8a..5eb521e3a7 100644 --- a/crates/storage/provider/src/changeset_walker.rs +++ b/crates/storage/provider/src/changeset_walker.rs @@ -1,10 +1,12 @@ -//! Account changeset iteration support for walking through historical account state changes in +//! Account/storage changeset iteration support for walking through historical state changes in //! static files. use crate::ProviderResult; use alloy_primitives::BlockNumber; use reth_db::models::AccountBeforeTx; -use reth_storage_api::ChangeSetReader; +use reth_db_api::models::BlockNumberAddress; +use reth_primitives_traits::StorageEntry; +use reth_storage_api::{ChangeSetReader, StorageChangeSetReader}; use std::ops::{Bound, RangeBounds}; /// Iterator that walks account changesets from static files in a block range. @@ -97,3 +99,78 @@ where None } } + +/// Iterator that walks storage changesets from static files in a block range. +#[derive(Debug)] +pub struct StaticFileStorageChangesetWalker

{ + /// Static file provider + provider: P, + /// End block (exclusive). `None` means iterate until exhausted. + end_block: Option, + /// Current block being processed + current_block: BlockNumber, + /// Changesets for current block + current_changesets: Vec<(BlockNumberAddress, StorageEntry)>, + /// Index within current block's changesets + changeset_index: usize, +} + +impl

StaticFileStorageChangesetWalker

{ + /// Create a new static file storage changeset walker. + pub fn new(provider: P, range: impl RangeBounds) -> Self { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + 1, + Bound::Unbounded => 0, + }; + + let end_block = match range.end_bound() { + Bound::Included(&n) => Some(n + 1), + Bound::Excluded(&n) => Some(n), + Bound::Unbounded => None, + }; + + Self { + provider, + end_block, + current_block: start, + current_changesets: Vec::new(), + changeset_index: 0, + } + } +} + +impl

Iterator for StaticFileStorageChangesetWalker

+where + P: StorageChangeSetReader, +{ + type Item = ProviderResult<(BlockNumberAddress, StorageEntry)>; + + fn next(&mut self) -> Option { + if let Some(changeset) = self.current_changesets.get(self.changeset_index).copied() { + self.changeset_index += 1; + return Some(Ok(changeset)); + } + + if !self.current_changesets.is_empty() { + self.current_block += 1; + } + + while self.end_block.is_none_or(|end| self.current_block < end) { + match self.provider.storage_changeset(self.current_block) { + Ok(changesets) if !changesets.is_empty() => { + self.current_changesets = changesets; + self.changeset_index = 1; + return Some(Ok(self.current_changesets[0])); + } + Ok(_) => self.current_block += 1, + Err(e) => { + self.current_block += 1; + return Some(Err(e)); + } + } + } + + None + } +} diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs index c6ba79d031..efa1032420 100644 --- a/crates/storage/provider/src/either_writer.rs +++ b/crates/storage/provider/src/either_writer.rs @@ -17,20 +17,20 @@ use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber, B25 use rayon::slice::ParallelSliceMut; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRW}, - models::AccountBeforeTx, + models::{AccountBeforeTx, StorageBeforeTx}, static_file::TransactionSenderMask, table::Value, transaction::{CursorMutTy, CursorTy, DbTx, DbTxMut, DupCursorMutTy, DupCursorTy}, }; use reth_db_api::{ cursor::DbCursorRW, - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress, ShardedKey}, tables, tables::BlockNumberList, }; use reth_errors::ProviderError; use reth_node_types::NodePrimitives; -use reth_primitives_traits::ReceiptTy; +use reth_primitives_traits::{ReceiptTy, StorageEntry}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ChangeSetReader, DBProvider, NodePrimitivesProvider, StorageSettingsCache}; use reth_storage_errors::provider::ProviderResult; @@ -171,6 +171,27 @@ impl<'a> EitherWriter<'a, (), ()> { } } + /// Creates a new [`EitherWriter`] for storage changesets based on storage settings. + pub fn new_storage_changesets

( + provider: &'a P, + block_number: BlockNumber, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory, + P::Tx: DbTxMut, + { + if provider.cached_storage_settings().storage_changesets_in_static_files { + Ok(EitherWriter::StaticFile( + provider + .get_static_file_writer(block_number, StaticFileSegment::StorageChangeSets)?, + )) + } else { + Ok(EitherWriter::Database( + provider.tx_ref().cursor_dup_write::()?, + )) + } + } + /// Returns the destination for writing receipts. /// /// The rules are as follows: @@ -208,6 +229,19 @@ impl<'a> EitherWriter<'a, (), ()> { } } + /// Returns the destination for writing storage changesets. + /// + /// This determines the destination based solely on storage settings. + pub fn storage_changesets_destination( + provider: &P, + ) -> EitherWriterDestination { + if provider.cached_storage_settings().storage_changesets_in_static_files { + EitherWriterDestination::StaticFile + } else { + EitherWriterDestination::Database + } + } + /// Creates a new [`EitherWriter`] for storages history based on storage settings. pub fn new_storages_history

( provider: &P, @@ -651,6 +685,41 @@ where } } +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + CURSOR: DbDupCursorRW, +{ + /// Append storage changeset for a block. + /// + /// NOTE: This _sorts_ the changesets by address and storage key before appending. + pub fn append_storage_changeset( + &mut self, + block_number: BlockNumber, + mut changeset: Vec, + ) -> ProviderResult<()> { + changeset.par_sort_by_key(|change| (change.address, change.key)); + + match self { + Self::Database(cursor) => { + for change in changeset { + let storage_id = BlockNumberAddress((block_number, change.address)); + cursor.append_dup( + storage_id, + StorageEntry { key: change.key, value: change.value }, + )?; + } + } + Self::StaticFile(writer) => { + writer.append_storage_changeset(changeset, block_number)?; + } + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => return Err(ProviderError::UnsupportedProvider), + } + + Ok(()) + } +} + /// Represents a source for reading data, either from database, static files, or `RocksDB`. #[derive(Debug, Display)] pub enum EitherReader<'a, CURSOR, N> { @@ -987,6 +1056,19 @@ impl EitherWriterDestination { Self::Database } } + + /// Returns the destination for writing storage changesets based on storage settings. + pub fn storage_changesets

(provider: &P) -> Self + where + P: StorageSettingsCache, + { + // Write storage changesets to static files only if they're explicitly enabled + if provider.cached_storage_settings().storage_changesets_in_static_files { + Self::StaticFile + } else { + Self::Database + } + } } #[cfg(test)] diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index fecd87a0e8..005b94915b 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -711,6 +711,26 @@ impl StorageChangeSetReader for BlockchainProvider { ) -> ProviderResult> { self.consistent_provider()?.storage_changeset(block_number) } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + self.consistent_provider()?.get_storage_before_block(block_number, address, storage_key) + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.consistent_provider()?.storage_changesets_range(range) + } + + fn storage_changeset_count(&self) -> ProviderResult { + self.consistent_provider()?.storage_changeset_count() + } } impl ChangeSetReader for BlockchainProvider { diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index e0c503eae0..7fadea95da 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1347,6 +1347,138 @@ impl StorageChangeSetReader for ConsistentProvider { self.storage_provider.storage_changeset(block_number) } } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changeset = state + .block_ref() + .execution_output + .state + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .find_map(|revert: PlainStorageRevert| { + if revert.address != address { + return None + } + revert.storage_revert.into_iter().find_map(|(key, value)| { + let key = key.into(); + (key == storage_key) + .then(|| StorageEntry { key, value: value.to_previous_value() }) + }) + }); + Ok(changeset) + } else { + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.get_storage_before_block(block_number, address, storage_key) + } + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + let range = to_range(range); + let mut changesets = Vec::new(); + let database_start = range.start; + let mut database_end = range.end; + + if let Some(head_block) = &self.head_block { + database_end = head_block.anchor().number; + + let chain = head_block.chain().collect::>(); + for state in chain { + let block_changesets = state + .block_ref() + .execution_output + .state + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((state.number(), revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }); + + changesets.extend(block_changesets); + } + } + + if database_start < database_end { + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + checkpoint.block_number.map(|checkpoint| database_start > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(database_start)) + } + + let db_changesets = self + .storage_provider + .storage_changesets_range(database_start..=database_end - 1)?; + changesets.extend(db_changesets); + } + + changesets.sort_by_key(|(block_address, _)| block_address.block_number()); + + Ok(changesets) + } + + fn storage_changeset_count(&self) -> ProviderResult { + let mut count = 0; + if let Some(head_block) = &self.head_block { + for state in head_block.chain() { + count += state + .block_ref() + .execution_output + .state + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .map(|revert: PlainStorageRevert| revert.storage_revert.len()) + .sum::(); + } + } + + count += self.storage_provider.storage_changeset_count()?; + + Ok(count) + } } impl ChangeSetReader for ConsistentProvider { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e9e4e82c68..39f1e35473 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -40,7 +40,8 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StorageBeforeTx, StorageSettings, + StoredBlockBodyIndices, }, table::Table, tables, @@ -463,6 +464,8 @@ impl DatabaseProvider StorageChangeSetReader for DatabaseProvider &self, block_number: BlockNumber, ) -> ProviderResult> { - let range = block_number..=block_number; - let storage_range = BlockNumberAddress::range(range); - self.tx - .cursor_dup_read::()? - .walk_range(storage_range)? - .map(|result| -> ProviderResult<_> { Ok(result?) }) - .collect() + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.storage_changeset(block_number) + } else { + let range = block_number..=block_number; + let storage_range = BlockNumberAddress::range(range); + self.tx + .cursor_dup_read::()? + .walk_range(storage_range)? + .map(|result| -> ProviderResult<_> { Ok(result?) }) + .collect() + } + } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.get_storage_before_block(block_number, address, storage_key) + } else { + self.tx + .cursor_dup_read::()? + .seek_by_key_subkey(BlockNumberAddress((block_number, address)), storage_key)? + .filter(|entry| entry.key == storage_key) + .map(Ok) + .transpose() + } + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.storage_changesets_range(range) + } else { + self.tx + .cursor_dup_read::()? + .walk_range(BlockNumberAddress::range(range))? + .map(|result| -> ProviderResult<_> { Ok(result?) }) + .collect() + } + } + + fn storage_changeset_count(&self) -> ProviderResult { + if self.cached_storage_settings().storage_changesets_in_static_files { + self.static_file_provider.storage_changeset_count() + } else { + Ok(self.tx.entries::()?) + } } } @@ -2072,38 +2120,67 @@ impl StorageReader for DatabaseProvider &self, range: RangeInclusive, ) -> ProviderResult>> { - self.tx - .cursor_read::()? - .walk_range(BlockNumberAddress::range(range))? - // fold all storages and save its old state so we can remove it from HashedStorage - // it is needed as it is dup table. - .try_fold(BTreeMap::new(), |mut accounts: BTreeMap>, entry| { - let (BlockNumberAddress((_, address)), storage_entry) = entry?; - accounts.entry(address).or_default().insert(storage_entry.key); - Ok(accounts) - }) + if self.cached_storage_settings().storage_changesets_in_static_files { + self.storage_changesets_range(range)?.into_iter().try_fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, entry| { + let (BlockNumberAddress((_, address)), storage_entry) = entry; + accounts.entry(address).or_default().insert(storage_entry.key); + Ok(accounts) + }, + ) + } else { + self.tx + .cursor_read::()? + .walk_range(BlockNumberAddress::range(range))? + // fold all storages and save its old state so we can remove it from HashedStorage + // it is needed as it is dup table. + .try_fold( + BTreeMap::new(), + |mut accounts: BTreeMap>, entry| { + let (BlockNumberAddress((_, address)), storage_entry) = entry?; + accounts.entry(address).or_default().insert(storage_entry.key); + Ok(accounts) + }, + ) + } } fn changed_storages_and_blocks_with_range( &self, range: RangeInclusive, ) -> ProviderResult>> { - let mut changeset_cursor = self.tx.cursor_read::()?; - - let storage_changeset_lists = - changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold( + if self.cached_storage_settings().storage_changesets_in_static_files { + self.storage_changesets_range(range)?.into_iter().try_fold( BTreeMap::new(), - |mut storages: BTreeMap<(Address, B256), Vec>, entry| -> ProviderResult<_> { - let (index, storage) = entry?; + |mut storages: BTreeMap<(Address, B256), Vec>, (index, storage)| { storages .entry((index.address(), storage.key)) .or_default() .push(index.block_number()); Ok(storages) }, - )?; + ) + } else { + let mut changeset_cursor = self.tx.cursor_read::()?; - Ok(storage_changeset_lists) + let storage_changeset_lists = + changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold( + BTreeMap::new(), + |mut storages: BTreeMap<(Address, B256), Vec>, + entry| + -> ProviderResult<_> { + let (index, storage) = entry?; + storages + .entry((index.address(), storage.key)) + .or_default() + .push(index.block_number()); + Ok(storages) + }, + )?; + + Ok(storage_changeset_lists) + } } } @@ -2226,17 +2303,16 @@ impl StateWriter // Write storage changes tracing::trace!("Writing storage changes"); let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; - let mut storage_changeset_cursor = - self.tx_ref().cursor_dup_write::()?; for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; tracing::trace!(block_number, "Writing block change"); // sort changes by address. storage_changes.par_sort_unstable_by_key(|a| a.address); + let total_changes = + storage_changes.iter().map(|change| change.storage_revert.len()).sum(); + let mut changeset = Vec::with_capacity(total_changes); for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { - let storage_id = BlockNumberAddress((block_number, address)); - let mut storage = storage_revert .into_iter() .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) @@ -2264,9 +2340,13 @@ impl StateWriter tracing::trace!(?address, ?storage, "Writing storage reverts"); for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { - storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; + changeset.push(StorageBeforeTx { address, key, value }); } } + + let mut storage_changesets_writer = + EitherWriter::new_storage_changesets(self, block_number)?; + storage_changesets_writer.append_storage_changeset(block_number, changeset)?; } if !config.write_account_changesets { @@ -2427,8 +2507,19 @@ impl StateWriter block_bodies.first().expect("already checked if there are blocks").first_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.take::(storage_range)?; + let storage_changeset = if let Some(_highest_block) = self + .static_file_provider + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) && + self.cached_storage_settings().storage_changesets_in_static_files + { + let changesets = self.storage_changesets_range(range.clone())?; + let mut changeset_writer = + self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?; + changeset_writer.prune_storage_changesets(block)?; + changesets + } else { + self.take::(storage_range)? + }; let account_changeset = self.take::(range)?; // This is not working for blocks that are not at tip. as plain state is not the last @@ -2523,8 +2614,19 @@ impl StateWriter block_bodies.last().expect("already checked if there are blocks").last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.take::(storage_range)?; + let storage_changeset = if let Some(highest_block) = self + .static_file_provider + .get_highest_static_file_block(StaticFileSegment::StorageChangeSets) && + self.cached_storage_settings().storage_changesets_in_static_files + { + let changesets = self.storage_changesets_range(block + 1..=highest_block)?; + let mut changeset_writer = + self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?; + changeset_writer.prune_storage_changesets(block)?; + changesets + } else { + self.take::(storage_range)? + }; // This is not working for blocks that are not at tip. as plain state is not the last // state of end range. We should rename the functions or add support to access diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 359fcdd7ae..3d42feedb0 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,7 +14,7 @@ use reth_db_api::{ use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ BlockNumReader, BytecodeReader, DBProvider, NodePrimitivesProvider, StateProofProvider, - StorageRootProvider, StorageSettingsCache, + StorageChangeSetReader, StorageRootProvider, StorageSettingsCache, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -26,8 +26,8 @@ use reth_trie::{ TrieInputSorted, }; use reth_trie_db::{ - DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, + hashed_storage_from_reverts_with_provider, DatabaseHashedPostState, DatabaseProof, + DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }; use std::fmt::Debug; @@ -109,7 +109,7 @@ pub struct HistoricalStateProviderRef<'b, Provider> { lowest_available_blocks: LowestAvailableBlocks, } -impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> +impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader> HistoricalStateProviderRef<'b, Provider> { /// Create new `StateProvider` for historical block number @@ -210,7 +210,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader> ); } - Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) + hashed_storage_from_reverts_with_provider(self.provider, address, self.block_number) } /// Set the lowest block number at which the account history is available. @@ -242,6 +242,7 @@ impl< Provider: DBProvider + BlockNumReader + ChangeSetReader + + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, @@ -285,8 +286,8 @@ impl BlockHashReader } } -impl StateRootProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StateRootProvider for HistoricalStateProviderRef<'_, Provider> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; @@ -322,8 +323,8 @@ impl StateRootProvider } } -impl StorageRootProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StorageRootProvider for HistoricalStateProviderRef<'_, Provider> { fn storage_root( &self, @@ -361,8 +362,8 @@ impl StorageRootProvide } } -impl StateProofProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StateProofProvider for HistoricalStateProviderRef<'_, Provider> { /// Get account and storage proofs. fn proof( @@ -405,6 +406,7 @@ impl< + BlockNumReader + BlockHashReader + ChangeSetReader + + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, @@ -418,18 +420,16 @@ impl< ) -> ProviderResult> { match self.storage_history_lookup(address, storage_key)? { HistoryInfo::NotYetWritten => Ok(None), - HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( - self.tx() - .cursor_dup_read::()? - .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? - .filter(|entry| entry.key == storage_key) - .ok_or_else(|| ProviderError::StorageChangesetNotFound { - block_number: changeset_block_number, - address, - storage_key: Box::new(storage_key), - })? - .value, - )), + HistoryInfo::InChangeset(changeset_block_number) => self + .provider + .get_storage_before_block(changeset_block_number, address, storage_key)? + .ok_or_else(|| ProviderError::StorageChangesetNotFound { + block_number: changeset_block_number, + address, + storage_key: Box::new(storage_key), + }) + .map(|entry| entry.value) + .map(Some), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self .tx() .cursor_dup_read::()? @@ -462,7 +462,9 @@ pub struct HistoricalStateProvider { lowest_available_blocks: LowestAvailableBlocks, } -impl HistoricalStateProvider { +impl + HistoricalStateProvider +{ /// Create new `StateProvider` for historical block number pub fn new(provider: Provider, block_number: BlockNumber) -> Self { Self { provider, block_number, lowest_available_blocks: Default::default() } @@ -498,7 +500,7 @@ impl HistoricalStatePro } // Delegates all provider impls to [HistoricalStateProviderRef] -reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]); +reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -631,7 +633,7 @@ mod tests { use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_api::{ BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, - NodePrimitivesProvider, StorageSettingsCache, + NodePrimitivesProvider, StorageChangeSetReader, StorageSettingsCache, }; use reth_storage_errors::provider::ProviderError; @@ -647,6 +649,7 @@ mod tests { + BlockNumReader + BlockHashReader + ChangeSetReader + + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider, diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 97baab150e..8f7919f7f3 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -10,6 +10,7 @@ use reth_stages_types::StageId; use reth_storage_api::{ BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, PruneCheckpointReader, StageCheckpointReader, + StorageChangeSetReader, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -196,6 +197,7 @@ where F::Provider: StageCheckpointReader + PruneCheckpointReader + ChangeSetReader + + StorageChangeSetReader + DBProvider + BlockNumReader, { @@ -446,7 +448,11 @@ where impl DatabaseProviderROFactory for OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: StageCheckpointReader + PruneCheckpointReader + BlockNumReader + ChangeSetReader, + F::Provider: StageCheckpointReader + + PruneCheckpointReader + + BlockNumReader + + ChangeSetReader + + StorageChangeSetReader, { type Provider = OverlayStateProvider; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 8c7b5fb50a..dff1b6d303 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -3,10 +3,10 @@ use super::{ StaticFileJarProvider, StaticFileProviderRW, StaticFileProviderRWRefMut, }; use crate::{ - changeset_walker::StaticFileAccountChangesetWalker, to_range, BlockHashReader, BlockNumReader, - BlockReader, BlockSource, EitherWriter, EitherWriterDestination, HeaderProvider, - ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, + changeset_walker::{StaticFileAccountChangesetWalker, StaticFileStorageChangesetWalker}, + to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, EitherWriter, + EitherWriterDestination, HeaderProvider, ReceiptProvider, StageCheckpointReader, StatsReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, }; use alloy_consensus::{transaction::TransactionMeta, Header}; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; @@ -20,12 +20,12 @@ use reth_db::{ lockfile::StorageLock, static_file::{ iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, - StaticFileCursor, TransactionMask, TransactionSenderMask, + StaticFileCursor, StorageChangesetMask, TransactionMask, TransactionSenderMask, }, }; use reth_db_api::{ cursor::DbCursorRO, - models::{AccountBeforeTx, StoredBlockBodyIndices}, + models::{AccountBeforeTx, BlockNumberAddress, StorageBeforeTx, StoredBlockBodyIndices}, table::{Decompress, Table, Value}, tables, transaction::DbTx, @@ -35,6 +35,7 @@ use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::NodePrimitives; use reth_primitives_traits::{ AlloyBlockHeader as _, BlockBody as _, RecoveredBlock, SealedHeader, SignedTransaction, + StorageEntry, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ @@ -42,7 +43,8 @@ use reth_static_file_types::{ StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, }; use reth_storage_api::{ - BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageSettingsCache, + BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageChangeSetReader, + StorageSettingsCache, }; use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError}; use std::{ @@ -92,6 +94,8 @@ pub struct StaticFileWriteCtx { pub write_receipts: bool, /// Whether account changesets should be written to static files. pub write_account_changesets: bool, + /// Whether storage changesets should be written to static files. + pub write_storage_changesets: bool, /// The current chain tip block number (for pruning). pub tip: BlockNumber, /// The prune mode for receipts, if any. @@ -622,6 +626,35 @@ impl StaticFileProvider { Ok(()) } + /// Writes storage changesets for all blocks to the static file segment. + #[instrument(level = "debug", target = "providers::db", skip_all)] + fn write_storage_changesets( + w: &mut StaticFileProviderRWRefMut<'_, N>, + blocks: &[ExecutedBlock], + ) -> ProviderResult<()> { + for block in blocks { + let block_number = block.recovered_block().number(); + let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts(); + + for storage_block_reverts in reverts.storage { + let changeset = storage_block_reverts + .into_iter() + .flat_map(|revert| { + revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| { + StorageBeforeTx { + address: revert.address, + key: B256::new(key.to_be_bytes()), + value: revert_to_slot.to_previous_value(), + } + }) + }) + .collect::>(); + w.append_storage_changeset(changeset, block_number)?; + } + } + Ok(()) + } + /// Spawns a scoped thread that writes to a static file segment using the provided closure. /// /// The closure receives a mutable reference to the segment writer. After the closure completes, @@ -697,6 +730,15 @@ impl StaticFileProvider { ) }); + let h_storage_changesets = ctx.write_storage_changesets.then(|| { + self.spawn_segment_writer( + s, + StaticFileSegment::StorageChangeSets, + first_block_number, + |w| Self::write_storage_changesets(w, blocks), + ) + }); + h_headers.join().map_err(|_| StaticFileWriterError::ThreadPanic("headers"))??; h_txs.join().map_err(|_| StaticFileWriterError::ThreadPanic("transactions"))??; if let Some(h) = h_senders { @@ -709,6 +751,10 @@ impl StaticFileProvider { h.join() .map_err(|_| StaticFileWriterError::ThreadPanic("account_changesets"))??; } + if let Some(h) = h_storage_changesets { + h.join() + .map_err(|_| StaticFileWriterError::ThreadPanic("storage_changesets"))??; + } Ok(()) }) } @@ -1381,6 +1427,13 @@ impl StaticFileProvider { highest_tx, highest_block, )?, + StaticFileSegment::StorageChangeSets => self + .ensure_changeset_invariants_by_block::<_, tables::StorageChangeSets, _>( + provider, + segment, + highest_block, + |key| key.block_number(), + )?, } { debug!(target: "reth::providers::static_file", ?segment, unwind_target=unwind, "Invariants check returned unwind target"); update_unwind_target(unwind); @@ -1462,6 +1515,13 @@ impl StaticFileProvider { } true } + StaticFileSegment::StorageChangeSets => { + if EitherWriter::storage_changesets_destination(provider).is_database() { + debug!(target: "reth::providers::static_file", ?segment, "Skipping storage changesets segment: changesets stored in database"); + return false + } + true + } } } @@ -1594,9 +1654,9 @@ impl StaticFileProvider { let stage_id = match segment { StaticFileSegment::Headers => StageId::Headers, StaticFileSegment::Transactions => StageId::Bodies, - StaticFileSegment::Receipts | StaticFileSegment::AccountChangeSets => { - StageId::Execution - } + StaticFileSegment::Receipts | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => StageId::Execution, StaticFileSegment::TransactionSenders => StageId::SenderRecovery, }; let checkpoint_block_number = @@ -1651,7 +1711,9 @@ impl StaticFileProvider { StaticFileSegment::TransactionSenders => { writer.prune_transaction_senders(number, checkpoint_block_number)? } - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { unreachable!() } } @@ -1662,6 +1724,9 @@ impl StaticFileProvider { StaticFileSegment::AccountChangeSets => { writer.prune_account_changesets(checkpoint_block_number)?; } + StaticFileSegment::StorageChangeSets => { + writer.prune_storage_changesets(checkpoint_block_number)?; + } } debug!(target: "reth::providers::static_file", ?segment, "Committing writer after pruning"); writer.commit()?; @@ -1672,6 +1737,105 @@ impl StaticFileProvider { Ok(None) } + fn ensure_changeset_invariants_by_block( + &self, + provider: &Provider, + segment: StaticFileSegment, + highest_static_file_block: Option, + block_from_key: F, + ) -> ProviderResult> + where + Provider: DBProvider + BlockReader + StageCheckpointReader, + T: Table, + F: Fn(&T::Key) -> BlockNumber, + { + debug!( + target: "reth::providers::static_file", + ?segment, + ?highest_static_file_block, + "Ensuring changeset invariants" + ); + let mut db_cursor = provider.tx_ref().cursor_read::()?; + + if let Some((db_first_key, _)) = db_cursor.first()? { + let db_first_block = block_from_key(&db_first_key); + if let Some(highest_block) = highest_static_file_block && + !(db_first_block <= highest_block || highest_block + 1 == db_first_block) + { + info!( + target: "reth::providers::static_file", + ?db_first_block, + ?highest_block, + unwind_target = highest_block, + ?segment, + "Setting unwind target." + ); + return Ok(Some(highest_block)) + } + + if let Some((db_last_key, _)) = db_cursor.last()? && + highest_static_file_block + .is_none_or(|highest_block| block_from_key(&db_last_key) > highest_block) + { + debug!( + target: "reth::providers::static_file", + ?segment, + "Database has entries beyond static files, no unwind needed" + ); + return Ok(None) + } + } else { + debug!(target: "reth::providers::static_file", ?segment, "No database entries found"); + } + + let highest_static_file_block = highest_static_file_block.unwrap_or_default(); + + let stage_id = match segment { + StaticFileSegment::Headers => StageId::Headers, + StaticFileSegment::Transactions => StageId::Bodies, + StaticFileSegment::Receipts | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => StageId::Execution, + StaticFileSegment::TransactionSenders => StageId::SenderRecovery, + }; + let checkpoint_block_number = + provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; + + if checkpoint_block_number > highest_static_file_block { + info!( + target: "reth::providers::static_file", + checkpoint_block_number, + unwind_target = highest_static_file_block, + ?segment, + "Setting unwind target." + ); + return Ok(Some(highest_static_file_block)) + } + + if checkpoint_block_number < highest_static_file_block { + info!( + target: "reth::providers", + ?segment, + from = highest_static_file_block, + to = checkpoint_block_number, + "Unwinding static file segment." + ); + let mut writer = self.latest_writer(segment)?; + match segment { + StaticFileSegment::AccountChangeSets => { + writer.prune_account_changesets(checkpoint_block_number)?; + } + StaticFileSegment::StorageChangeSets => { + writer.prune_storage_changesets(checkpoint_block_number)?; + } + _ => unreachable!("invalid segment for changeset invariants"), + } + writer.commit()?; + } + + Ok(None) + } + /// Returns the earliest available block number that has not been expired and is still /// available. /// @@ -2212,6 +2376,124 @@ impl ChangeSetReader for StaticFileProvider { } } +impl StorageChangeSetReader for StaticFileProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + let provider = match self.get_segment_provider_for_block( + StaticFileSegment::StorageChangeSets, + block_number, + None, + ) { + Ok(provider) => provider, + Err(ProviderError::MissingStaticFileBlock(_, _)) => return Ok(Vec::new()), + Err(err) => return Err(err), + }; + + if let Some(offset) = provider.user_header().changeset_offset(block_number) { + let mut cursor = provider.cursor()?; + let mut changeset = Vec::with_capacity(offset.num_changes() as usize); + + for i in offset.changeset_range() { + if let Some(change) = cursor.get_one::(i.into())? { + let block_address = BlockNumberAddress((block_number, change.address)); + let entry = StorageEntry { key: change.key, value: change.value }; + changeset.push((block_address, entry)); + } + } + Ok(changeset) + } else { + Ok(Vec::new()) + } + } + + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult> { + let provider = match self.get_segment_provider_for_block( + StaticFileSegment::StorageChangeSets, + block_number, + None, + ) { + Ok(provider) => provider, + Err(ProviderError::MissingStaticFileBlock(_, _)) => return Ok(None), + Err(err) => return Err(err), + }; + + let user_header = provider.user_header(); + let Some(offset) = user_header.changeset_offset(block_number) else { + return Ok(None); + }; + + let mut cursor = provider.cursor()?; + let range = offset.changeset_range(); + let mut low = range.start; + let mut high = range.end; + + while low < high { + let mid = low + (high - low) / 2; + if let Some(change) = cursor.get_one::(mid.into())? { + match (change.address, change.key).cmp(&(address, storage_key)) { + std::cmp::Ordering::Less => low = mid + 1, + _ => high = mid, + } + } else { + debug!( + target: "provider::static_file", + ?low, + ?mid, + ?high, + ?range, + ?block_number, + ?address, + ?storage_key, + "Cannot continue binary search for storage changeset fetch" + ); + low = range.end; + break; + } + } + + if low < range.end && + let Some(change) = cursor + .get_one::(low.into())? + .filter(|change| change.address == address && change.key == storage_key) + { + return Ok(Some(StorageEntry { key: change.key, value: change.value })); + } + + Ok(None) + } + + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.walk_storage_changeset_range(range).collect() + } + + fn storage_changeset_count(&self) -> ProviderResult { + let mut count = 0; + + let static_files = iter_static_files(&self.path).map_err(ProviderError::other)?; + if let Some(changeset_segments) = static_files.get(StaticFileSegment::StorageChangeSets) { + for (_, header) in changeset_segments { + if let Some(changeset_offsets) = header.changeset_offsets() { + for offset in changeset_offsets { + count += offset.num_changes() as usize; + } + } + } + } + + Ok(count) + } +} + impl StaticFileProvider { /// Creates an iterator for walking through account changesets in the specified block range. /// @@ -2228,6 +2510,14 @@ impl StaticFileProvider { ) -> StaticFileAccountChangesetWalker { StaticFileAccountChangesetWalker::new(self.clone(), range) } + + /// Creates an iterator for walking through storage changesets in the specified block range. + pub fn walk_storage_changeset_range( + &self, + range: impl RangeBounds, + ) -> StaticFileStorageChangesetWalker { + StaticFileStorageChangesetWalker::new(self.clone(), range) + } } impl> HeaderProvider for StaticFileProvider { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index aa5b61171a..9fdcc1aee1 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -69,14 +69,19 @@ mod tests { use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; use alloy_primitives::{Address, BlockHash, Signature, TxNumber, B256, U160, U256}; use rand::seq::SliceRandom; - use reth_db::{models::AccountBeforeTx, test_utils::create_test_static_files_dir}; + use reth_db::{ + models::{AccountBeforeTx, StorageBeforeTx}, + test_utils::create_test_static_files_dir, + }; use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers}; use reth_ethereum_primitives::{EthPrimitives, Receipt, TransactionSigned}; use reth_primitives_traits::Account; use reth_static_file_types::{ find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }; - use reth_storage_api::{ChangeSetReader, ReceiptProvider, TransactionsProvider}; + use reth_storage_api::{ + ChangeSetReader, ReceiptProvider, StorageChangeSetReader, TransactionsProvider, + }; use reth_testing_utils::generators::{self, random_header_range}; use std::{collections::BTreeMap, fmt::Debug, fs, ops::Range, path::Path}; @@ -321,7 +326,9 @@ mod tests { // Append transaction/receipt if there's still a transaction count to append if tx_count > 0 { match segment { - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { panic!("non tx based segment") } StaticFileSegment::Transactions => { @@ -438,7 +445,9 @@ mod tests { // Prune transactions or receipts based on the segment type match segment { - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { panic!("non tx based segment") } StaticFileSegment::Transactions => { @@ -463,7 +472,9 @@ mod tests { // cumulative_gas_used & nonce as ids. if let Some(id) = expected_tx_tip { match segment { - StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => { + StaticFileSegment::Headers | + StaticFileSegment::AccountChangeSets | + StaticFileSegment::StorageChangeSets => { panic!("non tx based segment") } StaticFileSegment::Transactions => assert_eyre( @@ -1033,4 +1044,311 @@ mod tests { } } } + + #[test] + fn test_storage_changeset_static_files() { + let (static_dir, _) = create_test_static_files_dir(); + + let sf_rw = StaticFileProvider::::read_write(&static_dir) + .expect("Failed to create static file provider"); + + // Test writing and reading storage changesets + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + // Create test data for multiple blocks + let test_blocks = 10u64; + let entries_per_block = 5; + + for block_num in 0..test_blocks { + let changeset = (0..entries_per_block) + .map(|i| { + let mut addr = Address::ZERO; + addr.0[0] = block_num as u8; + addr.0[1] = i as u8; + StorageBeforeTx { + address: addr, + key: B256::with_last_byte(i as u8), + value: U256::from(block_num * 1000 + i as u64), + } + }) + .collect::>(); + + writer.append_storage_changeset(changeset, block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + // Verify data can be read back correctly + { + let provider = sf_rw + .get_segment_provider_for_block(StaticFileSegment::StorageChangeSets, 5, None) + .unwrap(); + + // Check that the segment header has changeset offsets + assert!(provider.user_header().changeset_offsets().is_some()); + let offsets = provider.user_header().changeset_offsets().unwrap(); + assert_eq!(offsets.len(), 10); // Should have 10 blocks worth of offsets + + // Verify each block has the expected number of changes + for (i, offset) in offsets.iter().enumerate() { + assert_eq!(offset.num_changes(), 5, "Block {} should have 5 changes", i); + } + } + } + + #[test] + fn test_get_storage_before_block() { + let (static_dir, _) = create_test_static_files_dir(); + + let sf_rw = StaticFileProvider::::read_write(&static_dir) + .expect("Failed to create static file provider"); + + let test_address = Address::from([1u8; 20]); + let other_address = Address::from([2u8; 20]); + let missing_address = Address::from([3u8; 20]); + let test_key = B256::with_last_byte(1); + let other_key = B256::with_last_byte(2); + + // Write changesets for multiple blocks + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + // Block 0: test_address and other_address change + writer + .append_storage_changeset( + vec![ + StorageBeforeTx { address: test_address, key: test_key, value: U256::ZERO }, + StorageBeforeTx { + address: other_address, + key: other_key, + value: U256::from(5), + }, + ], + 0, + ) + .unwrap(); + + // Block 1: only other_address changes + writer + .append_storage_changeset( + vec![StorageBeforeTx { + address: other_address, + key: other_key, + value: U256::from(7), + }], + 1, + ) + .unwrap(); + + // Block 2: test_address changes again + writer + .append_storage_changeset( + vec![StorageBeforeTx { + address: test_address, + key: test_key, + value: U256::from(9), + }], + 2, + ) + .unwrap(); + + writer.commit().unwrap(); + } + + // Test get_storage_before_block + { + let result = sf_rw.get_storage_before_block(0, test_address, test_key).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, test_key); + assert_eq!(entry.value, U256::ZERO); + + let result = sf_rw.get_storage_before_block(2, test_address, test_key).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, test_key); + assert_eq!(entry.value, U256::from(9)); + + let result = sf_rw.get_storage_before_block(1, test_address, test_key).unwrap(); + assert!(result.is_none()); + + let result = sf_rw.get_storage_before_block(2, missing_address, test_key).unwrap(); + assert!(result.is_none()); + + let result = sf_rw.get_storage_before_block(1, other_address, other_key).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, other_key); + } + } + + #[test] + fn test_storage_changeset_truncation() { + let (static_dir, _) = create_test_static_files_dir(); + + let blocks_per_file = 10; + let files_per_range = 3; + let file_set_count = 3; + let initial_file_count = files_per_range * file_set_count; + let tip = blocks_per_file * file_set_count - 1; + + // Setup: Create storage changesets for multiple blocks + { + let sf_rw: StaticFileProvider = + StaticFileProviderBuilder::read_write(&static_dir) + .with_blocks_per_file(blocks_per_file) + .build() + .expect("failed to create static file provider"); + + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + + for block_num in 0..=tip { + let num_changes = ((block_num % 5) + 1) as usize; + let mut changeset = Vec::with_capacity(num_changes); + + for i in 0..num_changes { + let mut address = Address::ZERO; + address.0[0] = block_num as u8; + address.0[1] = i as u8; + + changeset.push(StorageBeforeTx { + address, + key: B256::with_last_byte(i as u8), + value: U256::from(block_num * 1000 + i as u64), + }); + } + + writer.append_storage_changeset(changeset, block_num).unwrap(); + } + + writer.commit().unwrap(); + } + + fn validate_truncation( + sf_rw: &StaticFileProvider, + static_dir: impl AsRef, + expected_tip: Option, + expected_file_count: u64, + ) -> eyre::Result<()> { + let highest_block = + sf_rw.get_highest_static_file_block(StaticFileSegment::StorageChangeSets); + assert_eyre(highest_block, expected_tip, "block tip mismatch")?; + + assert_eyre( + count_files_without_lockfile(static_dir)?, + expected_file_count as usize, + "file count mismatch", + )?; + + if let Some(tip) = expected_tip { + let provider = sf_rw.get_segment_provider_for_block( + StaticFileSegment::StorageChangeSets, + tip, + None, + )?; + let offsets = provider.user_header().changeset_offsets(); + assert!(offsets.is_some(), "Should have changeset offsets"); + } + + Ok(()) + } + + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .with_blocks_per_file(blocks_per_file) + .build() + .expect("failed to create static file provider"); + + sf_rw.initialize_index().expect("Failed to initialize index"); + + // Case 1: Truncate to block 20 + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + writer.prune_storage_changesets(20).unwrap(); + writer.commit().unwrap(); + + validate_truncation(&sf_rw, &static_dir, Some(20), initial_file_count) + .expect("Truncation validation failed"); + } + + // Case 2: Truncate to block 9 + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + writer.prune_storage_changesets(9).unwrap(); + writer.commit().unwrap(); + + validate_truncation(&sf_rw, &static_dir, Some(9), files_per_range) + .expect("Truncation validation failed"); + } + + // Case 3: Truncate all (should keep block 0) + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + writer.prune_storage_changesets(0).unwrap(); + writer.commit().unwrap(); + + validate_truncation(&sf_rw, &static_dir, Some(0), files_per_range) + .expect("Truncation validation failed"); + } + } + + #[test] + fn test_storage_changeset_binary_search() { + let (static_dir, _) = create_test_static_files_dir(); + + let sf_rw = StaticFileProvider::::read_write(&static_dir) + .expect("Failed to create static file provider"); + + let block_num = 0u64; + let num_slots = 100; + let address = Address::from([4u8; 20]); + + let mut keys: Vec = Vec::with_capacity(num_slots); + for i in 0..num_slots { + keys.push(B256::with_last_byte(i as u8)); + } + + { + let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap(); + let changeset = keys + .iter() + .enumerate() + .map(|(i, key)| StorageBeforeTx { address, key: *key, value: U256::from(i as u64) }) + .collect::>(); + + writer.append_storage_changeset(changeset, block_num).unwrap(); + writer.commit().unwrap(); + } + + { + let result = sf_rw.get_storage_before_block(block_num, address, keys[0]).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, keys[0]); + assert_eq!(entry.value, U256::from(0)); + + let result = + sf_rw.get_storage_before_block(block_num, address, keys[num_slots - 1]).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, keys[num_slots - 1]); + + let mid = num_slots / 2; + let result = sf_rw.get_storage_before_block(block_num, address, keys[mid]).unwrap(); + assert!(result.is_some()); + let entry = result.unwrap(); + assert_eq!(entry.key, keys[mid]); + + let missing_key = B256::with_last_byte(255); + let result = sf_rw.get_storage_before_block(block_num, address, missing_key).unwrap(); + assert!(result.is_none()); + + for i in (0..num_slots).step_by(10) { + let result = sf_rw.get_storage_before_block(block_num, address, keys[i]).unwrap(); + assert!(result.is_some()); + assert_eq!(result.unwrap().key, keys[i]); + } + } + } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 869554cc79..6c4eb97c4c 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -6,7 +6,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; -use reth_db::models::AccountBeforeTx; +use reth_db::models::{AccountBeforeTx, StorageBeforeTx}; use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_node_types::NodePrimitives; @@ -56,6 +56,11 @@ enum PruneStrategy { /// The target block number to prune to. last_block: BlockNumber, }, + /// Prune storage changesets to a target block number. + StorageChangeSets { + /// The target block number to prune to. + last_block: BlockNumber, + }, } /// Static file writers for every known [`StaticFileSegment`]. @@ -69,6 +74,7 @@ pub(crate) struct StaticFileWriters { receipts: RwLock>>, transaction_senders: RwLock>>, account_change_sets: RwLock>>, + storage_change_sets: RwLock>>, } impl Default for StaticFileWriters { @@ -79,6 +85,7 @@ impl Default for StaticFileWriters { receipts: Default::default(), transaction_senders: Default::default(), account_change_sets: Default::default(), + storage_change_sets: Default::default(), } } } @@ -95,6 +102,7 @@ impl StaticFileWriters { StaticFileSegment::Receipts => self.receipts.write(), StaticFileSegment::TransactionSenders => self.transaction_senders.write(), StaticFileSegment::AccountChangeSets => self.account_change_sets.write(), + StaticFileSegment::StorageChangeSets => self.storage_change_sets.write(), }; if write_guard.is_none() { @@ -113,6 +121,7 @@ impl StaticFileWriters { &self.receipts, &self.transaction_senders, &self.account_change_sets, + &self.storage_change_sets, ] { let mut writer = writer_lock.write(); if let Some(writer) = writer.as_mut() { @@ -131,6 +140,7 @@ impl StaticFileWriters { &self.receipts, &self.transaction_senders, &self.account_change_sets, + &self.storage_change_sets, ] { let writer = writer_lock.read(); if let Some(writer) = writer.as_ref() && @@ -155,6 +165,7 @@ impl StaticFileWriters { &self.receipts, &self.transaction_senders, &self.account_change_sets, + &self.storage_change_sets, ] { let mut writer = writer_lock.write(); if let Some(writer) = writer.as_mut() { @@ -388,6 +399,9 @@ impl StaticFileProviderRW { PruneStrategy::AccountChangeSets { last_block } => { self.prune_account_changeset_data(last_block)? } + PruneStrategy::StorageChangeSets { last_block } => { + self.prune_storage_changeset_data(last_block)? + } } } @@ -596,7 +610,7 @@ impl StaticFileProviderRW { /// Commits to the configuration file at the end fn truncate_changesets(&mut self, last_block: u64) -> ProviderResult<()> { let segment = self.writer.user_header().segment(); - debug_assert_eq!(segment, StaticFileSegment::AccountChangeSets); + debug_assert!(segment.is_change_based()); // Get the current block range let current_block_end = self @@ -1076,6 +1090,41 @@ impl StaticFileProviderRW { Ok(()) } + /// Appends a block storage changeset to the static file. + /// + /// It **CALLS** `increment_block()`. + pub fn append_storage_changeset( + &mut self, + mut changeset: Vec, + block_number: u64, + ) -> ProviderResult<()> { + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::StorageChangeSets); + let start = Instant::now(); + + self.increment_block(block_number)?; + self.ensure_no_queued_prune()?; + + // sort by address + storage key + changeset.sort_by_key(|change| (change.address, change.key)); + + let mut count: u64 = 0; + for change in changeset { + self.append_change(&change)?; + count += 1; + } + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operations( + StaticFileSegment::StorageChangeSets, + StaticFileProviderOperation::Append, + count, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Adds an instruction to prune `to_delete` transactions during commit. /// /// Note: `last_block` refers to the block the unwinds ends at. @@ -1127,6 +1176,12 @@ impl StaticFileProviderRW { self.queue_prune(PruneStrategy::AccountChangeSets { last_block }) } + /// Adds an instruction to prune storage changesets until the given block. + pub fn prune_storage_changesets(&mut self, last_block: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::StorageChangeSets); + self.queue_prune(PruneStrategy::StorageChangeSets { last_block }) + } + /// Adds an instruction to prune elements during commit using the specified strategy. fn queue_prune(&mut self, strategy: PruneStrategy) -> ProviderResult<()> { self.ensure_no_queued_prune()?; @@ -1186,6 +1241,25 @@ impl StaticFileProviderRW { Ok(()) } + /// Prunes the last storage changesets from the data file. + fn prune_storage_changeset_data(&mut self, last_block: BlockNumber) -> ProviderResult<()> { + let start = Instant::now(); + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::StorageChangeSets); + + self.truncate_changesets(last_block)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::StorageChangeSets, + StaticFileProviderOperation::Prune, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Prunes the last `to_delete` receipts from the data file. fn prune_receipt_data( &mut self, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index fba585cc79..f9a2f980ef 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -27,14 +27,14 @@ use reth_ethereum_primitives::EthPrimitives; use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{ Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, - SignerRecoverable, + SignerRecoverable, StorageEntry, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, + StorageChangeSetReader, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -989,6 +989,37 @@ impl ChangeSetReader for MockEthProvi } } +impl StorageChangeSetReader + for MockEthProvider +{ + fn storage_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn get_storage_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + _storage_key: B256, + ) -> ProviderResult> { + Ok(None) + } + + fn storage_changesets_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn storage_changeset_count(&self) -> ProviderResult { + Ok(0) + } +} + impl StateReader for MockEthProvider { type Receipt = T::Receipt; diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 67c633559c..8fb9c38706 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -10,14 +10,18 @@ use reth_chain_state::{ CanonStateSubscriptions, ForkChoiceSubscriptions, PersistedBlockSubscriptions, }; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_storage_api::NodePrimitivesProvider; +use reth_storage_api::{NodePrimitivesProvider, StorageChangeSetReader}; use std::fmt::Debug; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory< DB = N::DB, - Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader, + Provider: BlockReader + + StageCheckpointReader + + PruneCheckpointReader + + ChangeSetReader + + StorageChangeSetReader, > + NodePrimitivesProvider + StaticFileProviderFactory + RocksDBProviderFactory @@ -32,6 +36,7 @@ pub trait FullProvider: + HashedPostStateProvider + ChainSpecProvider + ChangeSetReader + + StorageChangeSetReader + CanonStateSubscriptions + ForkChoiceSubscriptions

> + PersistedBlockSubscriptions @@ -46,7 +51,11 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory< DB = N::DB, - Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader, + Provider: BlockReader + + StageCheckpointReader + + PruneCheckpointReader + + ChangeSetReader + + StorageChangeSetReader, > + NodePrimitivesProvider + StaticFileProviderFactory + RocksDBProviderFactory @@ -61,6 +70,7 @@ impl FullProvider for T where + HashedPostStateProvider + ChainSpecProvider + ChangeSetReader + + StorageChangeSetReader + CanonStateSubscriptions + ForkChoiceSubscriptions
> + PersistedBlockSubscriptions diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index beb9d23165..a12a6cfcc9 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -10,7 +10,7 @@ use crate::{ }; #[cfg(feature = "db-api")] -use crate::{DBProvider, DatabaseProviderFactory}; +use crate::{DBProvider, DatabaseProviderFactory, StorageChangeSetReader}; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -28,7 +28,9 @@ use reth_db_api::mock::{DatabaseMock, TxMock}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; use reth_execution_types::ExecutionOutcome; -use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; +use reth_primitives_traits::{ + Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader, StorageEntry, +}; #[cfg(feature = "db-api")] use reth_prune_types::PruneModes; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -408,6 +410,36 @@ impl ChangeSetReader for NoopProvider { } } +#[cfg(feature = "db-api")] +impl StorageChangeSetReader for NoopProvider { + fn storage_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn get_storage_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + _storage_key: B256, + ) -> ProviderResult> { + Ok(None) + } + + fn storage_changesets_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn storage_changeset_count(&self) -> ProviderResult { + Ok(0) + } +} + impl StateRootProvider for NoopProvider { fn state_root(&self, _state: HashedPostState) -> ProviderResult { Ok(B256::default()) diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index 51a9c5e5e5..ecd47ff50d 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -4,6 +4,7 @@ use alloc::{ }; use alloy_primitives::{Address, BlockNumber, B256}; use core::ops::RangeInclusive; +use reth_db_models::StorageBeforeTx; use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; @@ -41,4 +42,44 @@ pub trait StorageChangeSetReader: Send { &self, block_number: BlockNumber, ) -> ProviderResult>; + + /// Search the block's changesets for the given address and storage key, and return the result. + /// + /// Returns `None` if the storage slot was not changed in this block. + fn get_storage_before_block( + &self, + block_number: BlockNumber, + address: Address, + storage_key: B256, + ) -> ProviderResult>; + + /// Get all storage changesets in a range of blocks. + /// + /// NOTE: Get inclusive range of blocks. + fn storage_changesets_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>; + + /// Get the total count of all storage changes. + fn storage_changeset_count(&self) -> ProviderResult; + + /// Get storage changesets for a block as static-file rows. + /// + /// Default implementation uses `storage_changeset` and maps to `StorageBeforeTx`. + fn storage_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + self.storage_changeset(block_number).map(|changesets| { + changesets + .into_iter() + .map(|(block_address, entry)| StorageBeforeTx { + address: block_address.address(), + key: entry.key, + value: entry.value, + }) + .collect() + }) + } } diff --git a/crates/trie/db/src/changesets.rs b/crates/trie/db/src/changesets.rs index fe9558e3bc..deccb1df45 100644 --- a/crates/trie/db/src/changesets.rs +++ b/crates/trie/db/src/changesets.rs @@ -10,7 +10,9 @@ use crate::{DatabaseHashedPostState, DatabaseStateRoot, DatabaseTrieCursorFactory}; use alloy_primitives::{map::B256Map, BlockNumber, B256}; use parking_lot::RwLock; -use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader}; +use reth_storage_api::{ + BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader, +}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ changesets::compute_trie_changesets, @@ -65,7 +67,11 @@ pub fn compute_block_trie_changesets( block_number: BlockNumber, ) -> Result where - Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + Provider: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { debug!( target: "trie::changeset_cache", @@ -175,7 +181,11 @@ pub fn compute_block_trie_updates( block_number: BlockNumber, ) -> ProviderResult where - Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + Provider: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { let tx = provider.tx_ref(); @@ -323,7 +333,11 @@ impl ChangesetCache { provider: &P, ) -> ProviderResult> where - P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + P: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { // Try cache first (with read lock) { @@ -408,7 +422,11 @@ impl ChangesetCache { range: RangeInclusive, ) -> ProviderResult where - P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader, + P: DBProvider + + StageCheckpointReader + + ChangeSetReader + + StorageChangeSetReader + + BlockNumReader, { // Get the database tip block number let db_tip_block = provider diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index f509702c2e..ce49539fd4 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -18,7 +18,9 @@ pub use hashed_cursor::{ pub use prefix_set::{load_prefix_sets_with_provider, PrefixSetLoader}; pub use proof::{DatabaseProof, DatabaseStorageProof}; pub use state::{DatabaseHashedPostState, DatabaseStateRoot}; -pub use storage::{DatabaseHashedStorage, DatabaseStorageRoot}; +pub use storage::{ + hashed_storage_from_reverts_with_provider, DatabaseHashedStorage, DatabaseStorageRoot, +}; pub use trie_cursor::{ DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory, }; diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index b92afa4201..eec1bad362 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -14,7 +14,7 @@ use reth_db_api::{ DatabaseError, }; use reth_primitives_traits::StorageEntry; -use reth_storage_api::{ChangeSetReader, DBProvider}; +use reth_storage_api::{ChangeSetReader, DBProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderError; use reth_trie::{ prefix_set::{PrefixSetMut, TriePrefixSets}, @@ -93,7 +93,7 @@ pub fn load_prefix_sets_with_provider( range: RangeInclusive, ) -> Result where - Provider: ChangeSetReader + DBProvider, + Provider: ChangeSetReader + StorageChangeSetReader + DBProvider, KH: KeyHasher, { let tx = provider.tx_ref(); @@ -118,12 +118,9 @@ where } } - // Walk storage changeset and insert storage prefixes - // Note: Storage changesets don't have static files yet, so we still use direct cursor - let mut storage_cursor = tx.cursor_dup_read::()?; - let storage_range = BlockNumberAddress::range(range); - for storage_entry in storage_cursor.walk_range(storage_range)? { - let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; + // Walk storage changesets using the provider (handles static files + database) + let storage_changesets = provider.storage_changesets_range(range)?; + for (BlockNumberAddress((_, address)), StorageEntry { key, .. }) in storage_changesets { let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index a4f7640256..3be7464a92 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -3,13 +3,11 @@ use crate::{ }; use alloy_primitives::{map::B256Map, BlockNumber, B256}; use reth_db_api::{ - cursor::DbCursorRO, - models::{AccountBeforeTx, BlockNumberAddress, BlockNumberAddressRange}, - tables, + models::{AccountBeforeTx, BlockNumberAddress}, transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider}; +use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, @@ -34,7 +32,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// An instance of state root calculator with account and storage prefixes loaded. fn incremental_root_calculator( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result; @@ -45,7 +43,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// The updated state root. fn incremental_root( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result; @@ -58,7 +56,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// The updated state root and the trie updates. fn incremental_root_with_updates( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result<(B256, TrieUpdates), StateRootError>; @@ -69,7 +67,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// The intermediate progress of state root computation. fn incremental_root_with_progress( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result; @@ -133,7 +131,7 @@ pub trait DatabaseHashedPostState: Sized { /// Initializes [`HashedPostStateSorted`] from reverts. Iterates over state reverts in the /// specified range and aggregates them into sorted hashed state. fn from_reverts( - provider: &(impl ChangeSetReader + BlockNumReader + DBProvider), + provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider), range: impl RangeBounds, ) -> Result; } @@ -146,7 +144,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root_calculator( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result { let loaded_prefix_sets = @@ -155,7 +153,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result { debug!(target: "trie::loader", ?range, "incremental state root"); @@ -163,7 +161,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root_with_updates( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result<(B256, TrieUpdates), StateRootError> { debug!(target: "trie::loader", ?range, "incremental state root"); @@ -171,7 +169,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } fn incremental_root_with_progress( - provider: &'a (impl ChangeSetReader + DBProvider), + provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider), range: RangeInclusive, ) -> Result { debug!(target: "trie::loader", ?range, "incremental state root with progress"); @@ -248,11 +246,9 @@ impl DatabaseHashedPostState for HashedPostStateSorted { /// - Hashes keys and returns them already ordered for trie iteration. #[instrument(target = "trie::db", skip(provider), fields(range))] fn from_reverts( - provider: &(impl ChangeSetReader + BlockNumReader + DBProvider), + provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider), range: impl RangeBounds, ) -> Result { - let tx = provider.tx_ref(); - // Extract concrete start/end values to use for both account and storage changesets. let start = match range.start_bound() { Bound::Included(&n) => n, @@ -266,9 +262,6 @@ impl DatabaseHashedPostState for HashedPostStateSorted { Bound::Unbounded => BlockNumber::MAX, }; - // Convert to BlockNumberAddressRange for storage changesets. - let storage_range: BlockNumberAddressRange = (start..end).into(); - // Iterate over account changesets and record value before first occurring account change let mut accounts = Vec::new(); let mut seen_accounts = HashSet::new(); @@ -280,20 +273,23 @@ impl DatabaseHashedPostState for HashedPostStateSorted { } accounts.sort_unstable_by_key(|(hash, _)| *hash); - // Read storages directly into B256Map> with HashSet to track seen keys. + // Read storages into B256Map> with HashSet to track seen keys. // Only keep the first (oldest) occurrence of each (address, slot) pair. let mut storages = B256Map::>::default(); let mut seen_storage_keys = HashSet::new(); - let mut storage_changesets_cursor = tx.cursor_read::()?; - for entry in storage_changesets_cursor.walk_range(storage_range)? { - let (BlockNumberAddress((_, address)), storage) = entry?; - if seen_storage_keys.insert((address, storage.key)) { - let hashed_address = KH::hash_key(address); - storages - .entry(hashed_address) - .or_default() - .push((KH::hash_key(storage.key), storage.value)); + if start < end { + let end_inclusive = end.saturating_sub(1); + for (BlockNumberAddress((_, address)), storage) in + provider.storage_changesets_range(start..=end_inclusive)? + { + if seen_storage_keys.insert((address, storage.key)) { + let hashed_address = KH::hash_key(address); + storages + .entry(hashed_address) + .or_default() + .push((KH::hash_key(storage.key), storage.value)); + } } } diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs index 42d0d464c7..b4614eb15b 100644 --- a/crates/trie/db/src/storage.rs +++ b/crates/trie/db/src/storage.rs @@ -4,6 +4,8 @@ use reth_db_api::{ cursor::DbCursorRO, models::BlockNumberAddress, tables, transaction::DbTx, DatabaseError, }; use reth_execution_errors::StorageRootError; +use reth_storage_api::{BlockNumReader, StorageChangeSetReader}; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StorageRoot, }; @@ -34,6 +36,36 @@ pub trait DatabaseHashedStorage: Sized { fn from_reverts(tx: &TX, address: Address, from: BlockNumber) -> Result; } +/// Initializes [`HashedStorage`] from reverts using a provider. +pub fn hashed_storage_from_reverts_with_provider

( + provider: &P, + address: Address, + from: BlockNumber, +) -> ProviderResult +where + P: StorageChangeSetReader + BlockNumReader, +{ + let mut storage = HashedStorage::new(false); + let tip = provider.last_block_number()?; + + if from > tip { + return Ok(storage) + } + + for (BlockNumberAddress((_, storage_address)), storage_change) in + provider.storage_changesets_range(from..=tip)? + { + if storage_address == address { + let hashed_slot = keccak256(storage_change.key); + if let hash_map::Entry::Vacant(entry) = storage.storage.entry(hashed_slot) { + entry.insert(storage_change.value); + } + } + } + + Ok(storage) +} + impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> for StorageRoot, DatabaseHashedCursorFactory<&'a TX>> { diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 89a390f3f7..ac193f4d95 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -35,6 +35,7 @@ - [`reth db settings set storages_history`](./reth/db/settings/set/storages_history.mdx) - [`reth db settings set transaction_hash_numbers`](./reth/db/settings/set/transaction_hash_numbers.mdx) - [`reth db settings set account_history`](./reth/db/settings/set/account_history.mdx) + - [`reth db settings set storage_changesets`](./reth/db/settings/set/storage_changesets.mdx) - [`reth db account-storage`](./reth/db/account-storage.mdx) - [`reth download`](./reth/download.mdx) - [`reth stage`](./reth/stage.mdx) @@ -93,6 +94,7 @@ - [`op-reth db settings set storages_history`](./op-reth/db/settings/set/storages_history.mdx) - [`op-reth db settings set transaction_hash_numbers`](./op-reth/db/settings/set/transaction_hash_numbers.mdx) - [`op-reth db settings set account_history`](./op-reth/db/settings/set/account_history.mdx) + - [`op-reth db settings set storage_changesets`](./op-reth/db/settings/set/storage_changesets.mdx) - [`op-reth db account-storage`](./op-reth/db/account-storage.mdx) - [`op-reth stage`](./op-reth/stage.mdx) - [`op-reth stage run`](./op-reth/stage/run.mdx) diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx index 9d5fd0032a..d8a816e23a 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -124,6 +124,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -154,6 +157,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx index 0515b99883..a9939730d1 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum/static-file.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: --start-block diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx index 5ee09d1b05..a0ceb8ee50 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: -h, --help diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx index cd979cffde..af64e2e00a 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table The key to get content for diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx index c804080a0d..8aa2ae3cdc 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx @@ -15,6 +15,7 @@ Commands: storages_history Store storage history in rocksdb instead of MDBX transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX account_history Store account history in rocksdb instead of MDBX + storage_changesets Store storage changesets in static files instead of the database help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storage_changesets.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storage_changesets.mdx new file mode 100644 index 0000000000..d84b848a6b --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/storage_changesets.mdx @@ -0,0 +1,170 @@ +# op-reth db settings set storage_changesets + +Store storage changesets in static files instead of the database + +```bash +$ op-reth db settings set storage_changesets --help +``` +```txt +Usage: op-reth db settings set storage_changesets [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx index a95bbcfbca..bdf56a9804 100644 --- a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Block number to query diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx index 95ef59d63e..c5affadf9f 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx index 499017a379..398086f9dc 100644 --- a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx index 9637e30cd6..3e3e1ba019 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx index 01cd9d866a..9a0930b4fe 100644 --- a/docs/vocs/docs/pages/cli/op-reth/init.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx index 3493c9ac4b..98205ad008 100644 --- a/docs/vocs/docs/pages/cli/op-reth/node.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -1036,6 +1036,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -1066,6 +1069,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + Rollup: --rollup.sequencer Endpoint for the sequencer mempool (can be both HTTP and WS) diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx index 1409abf05f..603af5d99e 100644 --- a/docs/vocs/docs/pages/cli/op-reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx index 484805486e..c185b91027 100644 --- a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx index effc3dfe6b..d5034f0d4b 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx index 9843a02256..9150154c31 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -115,6 +115,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -145,6 +148,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx index 3130a06819..75b39f76c7 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx index 496417aeb4..37852456cf 100644 --- a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -113,6 +113,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -143,6 +146,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 5fd0ef4199..11f25e6973 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -124,6 +124,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -154,6 +157,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx index 04bd067b27..274d060dab 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum/static-file.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: --start-block diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index e9a7792cf0..1fb9e442d4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Options: -h, --help diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 2a79431014..0e0d6e95c5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -16,6 +16,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table The key to get content for diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx index 53a1b8aea0..ecc8163de1 100644 --- a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx @@ -15,6 +15,7 @@ Commands: storages_history Store storage history in rocksdb instead of MDBX transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX account_history Store account history in rocksdb instead of MDBX + storage_changesets Store storage changesets in static files instead of the database help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/storage_changesets.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/storage_changesets.mdx new file mode 100644 index 0000000000..d160b89518 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/storage_changesets.mdx @@ -0,0 +1,170 @@ +# reth db settings set storage_changesets + +Store storage changesets in static files instead of the database + +```bash +$ reth db settings set storage_changesets --help +``` +```txt +Usage: reth db settings set storage_changesets [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + + --logs-otlp[=] + Enable `Opentelemetry` logs export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317` + + Example: --logs-otlp=http://collector:4318/v1/logs + + [env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=] + + --logs-otlp.filter + Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --logs-otlp.filter=info,reth=debug + + Defaults to INFO if not specified. + + [default: info] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces and logs. + + - `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx index dfa69b1281..54808d46f4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx @@ -18,6 +18,7 @@ Arguments: - receipts: Static File segment responsible for the `Receipts` table - transaction-senders: Static File segment responsible for the `TransactionSenders` table - account-change-sets: Static File segment responsible for the `AccountChangeSets` table + - storage-change-sets: Static File segment responsible for the `StorageChangeSets` table Block number to query diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index dfc81c0bf5..02ff7298c7 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 64bc803824..9275a12059 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 3821881662..aa13fd5f56 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index ad81cc3d18..50ed891bcf 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index b1d05e4b5f..cbaf086f7c 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index b6c5ee0539..bc4fe2c30c 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index b076f3eee4..75ee10f7c1 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1036,6 +1036,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -1066,6 +1069,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + Ress: --ress.enable Enable support for `ress` subprotocol diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 8e33c02504..a40d116b5f 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index aa0615070c..30f2f8fc21 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 15318efa4c..9835286266 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index f78ed561f9..9aefa35542 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -115,6 +115,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -145,6 +148,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 8752c5e526..c06c786879 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -108,6 +108,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -138,6 +141,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 8c4f9ef9f7..af442b243d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -113,6 +113,9 @@ Static Files: --static-files.blocks-per-file.account-change-sets Number of blocks per file for the account changesets segment + --static-files.blocks-per-file.storage-change-sets + Number of blocks per file for the storage changesets segment + --static-files.receipts Store receipts in static files instead of the database. @@ -143,6 +146,16 @@ Static Files: [default: false] [possible values: true, false] + --static-files.storage-change-sets + Store storage changesets in static files. + + When enabled, storage changesets will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + [default: false] + [possible values: true, false] + RocksDB: --rocksdb.all Route all supported tables to `RocksDB` instead of MDBX. diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts index 58d814f0e3..7a15d76437 100644 --- a/docs/vocs/sidebar-cli-op-reth.ts +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -159,6 +159,10 @@ export const opRethCliSidebar: SidebarItem = { { text: "op-reth db settings set account_history", link: "/cli/op-reth/db/settings/set/account_history" + }, + { + text: "op-reth db settings set storage_changesets", + link: "/cli/op-reth/db/settings/set/storage_changesets" } ] } diff --git a/docs/vocs/sidebar-cli-reth.ts b/docs/vocs/sidebar-cli-reth.ts index 91c39eabb6..5b5a74c2a6 100644 --- a/docs/vocs/sidebar-cli-reth.ts +++ b/docs/vocs/sidebar-cli-reth.ts @@ -163,6 +163,10 @@ export const rethCliSidebar: SidebarItem = { { text: "reth db settings set account_history", link: "/cli/reth/db/settings/set/account_history" + }, + { + text: "reth db settings set storage_changesets", + link: "/cli/reth/db/settings/set/storage_changesets" } ] }