feat: add StaticFileSegment::StorageChangeSets (#20896)

This commit is contained in:
Dan Cline
2026-01-22 15:03:47 +00:00
committed by GitHub
parent cc242f83fd
commit ebe2ca1366
82 changed files with 2391 additions and 165 deletions

1
Cargo.lock generated
View File

@@ -7907,6 +7907,7 @@ dependencies = [
"reth-stages-types",
"reth-static-file",
"reth-static-file-types",
"reth-storage-api",
"reth-tasks",
"reth-trie",
"reth-trie-common",

View File

@@ -50,6 +50,7 @@ reth-stages-types = { workspace = true, optional = true }
reth-static-file-types = { workspace = true, features = ["clap"] }
reth-static-file.workspace = true
reth-tasks.workspace = true
reth-storage-api.workspace = true
reth-trie = { workspace = true, features = ["metrics"] }
reth-trie-db = { workspace = true, features = ["metrics"] }
reth-trie-common.workspace = true

View File

@@ -21,6 +21,7 @@ use reth_node_builder::NodeTypesWithDB;
use reth_primitives_traits::ValueWithSubKey;
use reth_provider::{providers::ProviderNodeTypes, ChangeSetReader, StaticFileProviderFactory};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::StorageChangeSetReader;
use tracing::error;
/// The arguments for the `reth db get` command
@@ -82,6 +83,33 @@ impl Command {
table.view(&GetValueViewer { tool, key, subkey, end_key, end_subkey, raw })?
}
Subcommand::StaticFile { segment, key, subkey, raw } => {
if let StaticFileSegment::StorageChangeSets = segment {
let storage_key =
table_subkey::<tables::StorageChangeSets>(subkey.as_deref()).ok();
let key = table_key::<tables::StorageChangeSets>(&key)?;
let provider = tool.provider_factory.static_file_provider();
if let Some(storage_key) = storage_key {
let entry = provider.get_storage_before_block(
key.block_number(),
key.address(),
storage_key,
)?;
if let Some(entry) = entry {
println!("{}", serde_json::to_string_pretty(&entry)?);
} else {
error!(target: "reth::cli", "No content for the given table key.");
}
return Ok(());
}
let changesets = provider.storage_changeset(key.block_number())?;
println!("{}", serde_json::to_string_pretty(&changesets)?);
return Ok(());
}
let (key, subkey, mask): (u64, _, _) = match segment {
StaticFileSegment::Headers => (
table_key::<tables::Headers>(&key)?,
@@ -112,6 +140,9 @@ impl Command {
AccountChangesetMask::MASK,
)
}
StaticFileSegment::StorageChangeSets => {
unreachable!("storage changesets handled above");
}
};
// handle account changesets differently if a subkey is provided.
@@ -190,6 +221,9 @@ impl Command {
StaticFileSegment::AccountChangeSets => {
unreachable!("account changeset static files are special cased before this match")
}
StaticFileSegment::StorageChangeSets => {
unreachable!("storage changeset static files are special cased before this match")
}
}
}
}

View File

@@ -69,6 +69,11 @@ pub enum SetCommand {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store storage changesets in static files instead of the database
StorageChangesets {
#[clap(action(ArgAction::Set))]
value: bool,
},
}
impl Command {
@@ -115,6 +120,7 @@ impl Command {
transaction_hash_numbers_in_rocksdb: _,
account_history_in_rocksdb: _,
account_changesets_in_static_files: _,
storage_changesets_in_static_files: _,
} = settings.unwrap_or_else(StorageSettings::legacy);
// Update the setting based on the key
@@ -167,6 +173,14 @@ impl Command {
settings.account_history_in_rocksdb = value;
println!("Set account_history_in_rocksdb = {}", value);
}
SetCommand::StorageChangesets { value } => {
if settings.storage_changesets_in_static_files == value {
println!("storage_changesets_in_static_files is already set to {}", value);
return Ok(());
}
settings.storage_changesets_in_static_files = value;
println!("Set storage_changesets_in_static_files = {}", value);
}
}
// Write updated settings

View File

@@ -91,6 +91,9 @@ impl<C: ChainSpecParser> Command<C> {
StaticFileSegment::AccountChangeSets => {
writer.prune_account_changesets(highest_block)?;
}
StaticFileSegment::StorageChangeSets => {
writer.prune_storage_changesets(highest_block)?;
}
}
}
}

View File

@@ -438,6 +438,8 @@ pub struct BlocksPerFileConfig {
pub transaction_senders: Option<u64>,
/// Number of blocks per file for the account changesets segment.
pub account_change_sets: Option<u64>,
/// Number of blocks per file for the storage changesets segment.
pub storage_change_sets: Option<u64>,
}
impl StaticFilesConfig {
@@ -451,6 +453,7 @@ impl StaticFilesConfig {
receipts,
transaction_senders,
account_change_sets,
storage_change_sets,
} = self.blocks_per_file;
eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
eyre::ensure!(
@@ -469,6 +472,10 @@ impl StaticFilesConfig {
account_change_sets != Some(0),
"Account changesets segment blocks per file must be greater than 0"
);
eyre::ensure!(
storage_change_sets != Some(0),
"Storage changesets segment blocks per file must be greater than 0"
);
Ok(())
}
@@ -480,6 +487,7 @@ impl StaticFilesConfig {
receipts,
transaction_senders,
account_change_sets,
storage_change_sets,
} = self.blocks_per_file;
let mut map = StaticFileMap::default();
@@ -492,6 +500,7 @@ impl StaticFilesConfig {
StaticFileSegment::Receipts => receipts,
StaticFileSegment::TransactionSenders => transaction_senders,
StaticFileSegment::AccountChangeSets => account_change_sets,
StaticFileSegment::StorageChangeSets => storage_change_sets,
};
if let Some(blocks_per_file) = blocks_per_file {

View File

@@ -32,7 +32,8 @@ use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, Sealed
use reth_provider::{
BlockExecutionOutput, BlockExecutionResult, BlockNumReader, BlockReader, ChangeSetReader,
DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StageCheckpointReader,
StateProviderBox, StateProviderFactory, StateReader, TransactionVariant,
StateProviderBox, StateProviderFactory, StateReader, StorageChangeSetReader,
TransactionVariant,
};
use reth_revm::database::StateProviderDatabase;
use reth_stages_api::ControlFlow;
@@ -317,6 +318,7 @@ where
<P as DatabaseProviderFactory>::Provider: BlockReader<Block = N::Block, Header = N::BlockHeader>
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
C: ConfigureEvm<Primitives = N> + 'static,
T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = N>>,

View File

@@ -1323,7 +1323,7 @@ mod tests {
use reth_provider::{
providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory,
BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, LatestStateProvider,
PruneCheckpointReader, StageCheckpointReader, StateProviderBox,
PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StorageChangeSetReader,
};
use reth_trie::MultiProof;
use reth_trie_db::ChangesetCache;
@@ -1350,6 +1350,7 @@ mod tests {
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
> + Clone
+ Send

View File

@@ -39,7 +39,7 @@ use reth_provider::{
providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader,
ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, HashedPostStateProvider,
ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider,
StateProviderFactory, StateReader,
StateProviderFactory, StateReader, StorageChangeSetReader,
};
use reth_revm::db::{states::bundle_state::BundleRetention, State};
use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot};
@@ -144,6 +144,7 @@ where
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
> + BlockReader<Header = N::BlockHeader>
+ ChangeSetReader
@@ -1336,6 +1337,7 @@ where
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
> + BlockReader<Header = N::BlockHeader>
+ StateProviderFactory

View File

@@ -2,6 +2,7 @@
use clap::Args;
use reth_config::config::{BlocksPerFileConfig, StaticFilesConfig};
use reth_storage_api::StorageSettings;
/// Blocks per static file when running in `--minimal` node.
///
@@ -40,6 +41,10 @@ pub struct StaticFilesArgs {
#[arg(long = "static-files.blocks-per-file.account-change-sets")]
pub blocks_per_file_account_change_sets: Option<u64>,
/// Number of blocks per file for the storage changesets segment.
#[arg(long = "static-files.blocks-per-file.storage-change-sets")]
pub blocks_per_file_storage_change_sets: Option<u64>,
/// Store receipts in static files instead of the database.
///
/// When enabled, receipts will be written to static files on disk instead of the database.
@@ -68,6 +73,16 @@ pub struct StaticFilesArgs {
/// the node has been initialized, changing this flag requires re-syncing from scratch.
#[arg(long = "static-files.account-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)]
pub account_changesets: bool,
/// Store storage changesets in static files.
///
/// When enabled, storage changesets will be written to static files on disk instead of the
/// database.
///
/// Note: This setting can only be configured at genesis initialization. Once
/// the node has been initialized, changing this flag requires re-syncing from scratch.
#[arg(long = "static-files.storage-change-sets", default_value_t = default_static_file_flag(), action = clap::ArgAction::Set)]
pub storage_changesets: bool,
}
impl StaticFilesArgs {
@@ -98,9 +113,25 @@ impl StaticFilesArgs {
account_change_sets: self
.blocks_per_file_account_change_sets
.or(config.blocks_per_file.account_change_sets),
storage_change_sets: self
.blocks_per_file_storage_change_sets
.or(config.blocks_per_file.storage_change_sets),
},
}
}
/// Converts the static files arguments into [`StorageSettings`].
pub const fn to_settings(&self) -> StorageSettings {
#[cfg(feature = "edge")]
let base = StorageSettings::edge();
#[cfg(not(feature = "edge"))]
let base = StorageSettings::legacy();
base.with_receipts_in_static_files(self.receipts)
.with_transaction_senders_in_static_files(self.transaction_senders)
.with_account_changesets_in_static_files(self.account_changesets)
.with_storage_changesets_in_static_files(self.storage_changesets)
}
}
impl Default for StaticFilesArgs {
@@ -111,9 +142,11 @@ impl Default for StaticFilesArgs {
blocks_per_file_receipts: None,
blocks_per_file_transaction_senders: None,
blocks_per_file_account_change_sets: None,
blocks_per_file_storage_change_sets: None,
receipts: default_static_file_flag(),
transaction_senders: default_static_file_flag(),
account_changesets: default_static_file_flag(),
storage_changesets: default_static_file_flag(),
}
}
}

View File

@@ -363,6 +363,7 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
.with_receipts_in_static_files(self.static_files.receipts)
.with_transaction_senders_in_static_files(self.static_files.transaction_senders)
.with_account_changesets_in_static_files(self.static_files.account_changesets)
.with_storage_changesets_in_static_files(self.static_files.storage_changesets)
.with_transaction_hash_numbers_in_rocksdb(self.rocksdb.all || self.rocksdb.tx_hash)
.with_storages_history_in_rocksdb(self.rocksdb.all || self.rocksdb.storages_history)
.with_account_history_in_rocksdb(self.rocksdb.all || self.rocksdb.account_history)

View File

@@ -1,4 +1,4 @@
use super::collect_history_indices;
use super::{collect_history_indices, collect_storage_history_indices};
use crate::{stages::utils::load_storage_history, StageCheckpoint, StageId};
use reth_config::config::{EtlConfig, IndexHistoryConfig};
use reth_db_api::{
@@ -8,7 +8,8 @@ use reth_db_api::{
};
use reth_provider::{
DBProvider, EitherWriter, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter,
RocksDBProviderFactory, StorageSettingsCache,
RocksDBProviderFactory, StaticFileProviderFactory, StorageChangeSetReader,
StorageSettingsCache,
};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment};
use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput};
@@ -54,6 +55,8 @@ where
+ PruneCheckpointWriter
+ StorageSettingsCache
+ RocksDBProviderFactory
+ StorageChangeSetReader
+ StaticFileProviderFactory
+ reth_provider::NodePrimitivesProvider,
{
/// Return the id of the stage
@@ -121,7 +124,9 @@ where
}
info!(target: "sync::stages::index_storage_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices");
let collector =
let collector = if provider.cached_storage_settings().storage_changesets_in_static_files {
collect_storage_history_indices(provider, range.clone(), &self.etl_config)?
} else {
collect_history_indices::<_, tables::StorageChangeSets, tables::StoragesHistory, _>(
provider,
BlockNumberAddress::range(range.clone()),
@@ -130,7 +135,8 @@ where
},
|(key, value)| (key.block_number(), AddressStorageKey((key.address(), value.key))),
&self.etl_config,
)?;
)?
};
info!(target: "sync::stages::index_storage_history::exec", "Loading indices into database");

View File

@@ -9,7 +9,7 @@ use reth_db_api::{
use reth_primitives_traits::{GotExpected, SealedHeader};
use reth_provider::{
ChangeSetReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader,
StageCheckpointWriter, StatsReader, TrieWriter,
StageCheckpointWriter, StatsReader, StorageChangeSetReader, TrieWriter,
};
use reth_stages_api::{
BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage,
@@ -159,6 +159,7 @@ where
+ StatsReader
+ HeaderProvider
+ ChangeSetReader
+ StorageChangeSetReader
+ StageCheckpointReader
+ StageCheckpointWriter,
{

View File

@@ -6,7 +6,7 @@ use reth_primitives_traits::{GotExpected, SealedHeader};
use reth_provider::{
BlockNumReader, ChainStateBlockReader, ChangeSetReader, DBProvider, HeaderProvider,
ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader,
StageCheckpointWriter, TrieWriter,
StageCheckpointWriter, StorageChangeSetReader, TrieWriter,
};
use reth_prune_types::{
PruneCheckpoint, PruneMode, PruneSegment, MERKLE_CHANGESETS_RETENTION_BLOCKS,
@@ -167,7 +167,8 @@ impl MerkleChangeSets {
+ HeaderProvider
+ ChainStateBlockReader
+ BlockNumReader
+ ChangeSetReader,
+ ChangeSetReader
+ StorageChangeSetReader,
{
let target_start = target_range.start;
let target_end = target_range.end;
@@ -308,6 +309,7 @@ where
+ PruneCheckpointReader
+ PruneCheckpointWriter
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
{
fn id(&self) -> StageId {

View File

@@ -5,7 +5,7 @@ use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
models::{
sharded_key::NUM_OF_INDICES_IN_SHARD, storage_sharded_key::StorageShardedKey,
AccountBeforeTx, ShardedKey,
AccountBeforeTx, AddressStorageKey, BlockNumberAddress, ShardedKey,
},
table::{Decode, Decompress, Table},
transaction::DbTx,
@@ -19,7 +19,7 @@ use reth_provider::{
};
use reth_stages_api::StageError;
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::ChangeSetReader;
use reth_storage_api::{ChangeSetReader, StorageChangeSetReader};
use std::{collections::HashMap, hash::Hash, ops::RangeBounds};
use tracing::info;
@@ -102,15 +102,15 @@ where
}
/// Allows collecting indices from a cache with a custom insert fn
fn collect_indices<F>(
cache: impl Iterator<Item = (Address, Vec<u64>)>,
fn collect_indices<K, F>(
cache: impl Iterator<Item = (K, Vec<u64>)>,
mut insert_fn: F,
) -> Result<(), StageError>
where
F: FnMut(Address, Vec<u64>) -> Result<(), StageError>,
F: FnMut(K, Vec<u64>) -> Result<(), StageError>,
{
for (address, indices) in cache {
insert_fn(address, indices)?
for (key, indices) in cache {
insert_fn(key, indices)?
}
Ok(())
}
@@ -174,6 +174,62 @@ where
Ok(collector)
}
/// Collects storage history indices using a provider that implements `StorageChangeSetReader`.
pub(crate) fn collect_storage_history_indices<Provider>(
provider: &Provider,
range: impl RangeBounds<BlockNumber>,
etl_config: &EtlConfig,
) -> Result<Collector<StorageShardedKey, BlockNumberList>, StageError>
where
Provider: DBProvider + StorageChangeSetReader + StaticFileProviderFactory,
{
let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone());
let mut cache: HashMap<AddressStorageKey, Vec<u64>> = HashMap::default();
let mut insert_fn = |key: AddressStorageKey, indices: Vec<u64>| {
let last = indices.last().expect("qed");
collector.insert(
StorageShardedKey::new(key.0 .0, key.0 .1, *last),
BlockNumberList::new_pre_sorted(indices.into_iter()),
)?;
Ok::<(), StageError>(())
};
let range = to_range(range);
let static_file_provider = provider.static_file_provider();
let total_changesets = static_file_provider.storage_changeset_count()?;
let interval = (total_changesets / 1000).max(1);
let walker = static_file_provider.walk_storage_changeset_range(range);
let mut flush_counter = 0;
let mut current_block_number = u64::MAX;
for (idx, changeset_result) in walker.enumerate() {
let (BlockNumberAddress((block_number, address)), storage) = changeset_result?;
cache.entry(AddressStorageKey((address, storage.key))).or_default().push(block_number);
if idx > 0 && idx % interval == 0 && total_changesets > 1000 {
info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices");
}
if block_number != current_block_number {
current_block_number = block_number;
flush_counter += 1;
}
if flush_counter > DEFAULT_CACHE_THRESHOLD {
collect_indices(cache.drain(), &mut insert_fn)?;
flush_counter = 0;
}
}
collect_indices(cache.into_iter(), insert_fn)?;
Ok(collector)
}
/// Loads account history indices into the database via `EitherWriter`.
///
/// Works with [`EitherWriter`] to support both MDBX and `RocksDB` backends.

View File

@@ -55,6 +55,11 @@ pub enum StaticFileSegment {
/// * address 0xbb, account info
/// * address 0xcc, account info
AccountChangeSets,
/// Static File segment responsible for the `StorageChangeSets` table.
///
/// Storage changeset static files append block-by-block changesets sorted by address and
/// storage slot.
StorageChangeSets,
}
impl StaticFileSegment {
@@ -71,6 +76,7 @@ impl StaticFileSegment {
Self::Receipts => "receipts",
Self::TransactionSenders => "transaction-senders",
Self::AccountChangeSets => "account-change-sets",
Self::StorageChangeSets => "storage-change-sets",
}
}
@@ -83,6 +89,7 @@ impl StaticFileSegment {
Self::Receipts,
Self::TransactionSenders,
Self::AccountChangeSets,
Self::StorageChangeSets,
]
.into_iter()
}
@@ -99,7 +106,8 @@ impl StaticFileSegment {
Self::Transactions |
Self::Receipts |
Self::TransactionSenders |
Self::AccountChangeSets => 1,
Self::AccountChangeSets |
Self::StorageChangeSets => 1,
}
}
@@ -161,14 +169,14 @@ impl StaticFileSegment {
pub const fn is_tx_based(&self) -> bool {
match self {
Self::Receipts | Self::Transactions | Self::TransactionSenders => true,
Self::Headers | Self::AccountChangeSets => false,
Self::Headers | Self::AccountChangeSets | Self::StorageChangeSets => false,
}
}
/// Returns `true` if the segment is [`StaticFileSegment::AccountChangeSets`]
/// Returns `true` if the segment is change-based.
pub const fn is_change_based(&self) -> bool {
match self {
Self::AccountChangeSets => true,
Self::AccountChangeSets | Self::StorageChangeSets => true,
Self::Receipts | Self::Transactions | Self::Headers | Self::TransactionSenders => false,
}
}
@@ -180,7 +188,8 @@ impl StaticFileSegment {
Self::Receipts |
Self::Transactions |
Self::TransactionSenders |
Self::AccountChangeSets => false,
Self::AccountChangeSets |
Self::StorageChangeSets => false,
}
}
@@ -259,10 +268,10 @@ impl<'de> Visitor<'de> for SegmentHeaderVisitor {
let tx_range =
seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(2, &self))?;
let segment =
let segment: StaticFileSegment =
seq.next_element()?.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?;
let changeset_offsets = if segment == StaticFileSegment::AccountChangeSets {
let changeset_offsets = if segment.is_change_based() {
// Try to read the 5th field (changeset_offsets)
// If it doesn't exist (old format), this will return None
match seq.next_element()? {
@@ -309,8 +318,8 @@ impl Serialize for SegmentHeader {
where
S: Serializer,
{
// We serialize an extra field, the changeset offsets, for account changesets
let len = if self.segment.is_account_change_sets() { 5 } else { 4 };
// We serialize an extra field, the changeset offsets, for change-based segments
let len = if self.segment.is_change_based() { 5 } else { 4 };
let mut state = serializer.serialize_struct("SegmentHeader", len)?;
state.serialize_field("expected_block_range", &self.expected_block_range)?;
@@ -318,7 +327,7 @@ impl Serialize for SegmentHeader {
state.serialize_field("tx_range", &self.tx_range)?;
state.serialize_field("segment", &self.segment)?;
if self.segment.is_account_change_sets() {
if self.segment.is_change_based() {
state.serialize_field("changeset_offsets", &self.changeset_offsets)?;
}
@@ -672,6 +681,12 @@ mod tests {
"static_file_account-change-sets_1123233_11223233",
None,
),
(
StaticFileSegment::StorageChangeSets,
1_123_233..=11_223_233,
"static_file_storage-change-sets_1123233_11223233",
None,
),
(
StaticFileSegment::Headers,
2..=30,
@@ -755,6 +770,13 @@ mod tests {
segment: StaticFileSegment::AccountChangeSets,
changeset_offsets: Some(vec![ChangesetOffset { offset: 1, num_changes: 1 }; 100]),
},
SegmentHeader {
expected_block_range: SegmentRangeInclusive::new(0, 200),
block_range: Some(SegmentRangeInclusive::new(0, 100)),
tx_range: None,
segment: StaticFileSegment::StorageChangeSets,
changeset_offsets: Some(vec![ChangesetOffset { offset: 1, num_changes: 1 }; 100]),
},
];
// Check that we test all segments
assert_eq!(
@@ -788,6 +810,7 @@ mod tests {
StaticFileSegment::Receipts => "receipts",
StaticFileSegment::TransactionSenders => "transaction-senders",
StaticFileSegment::AccountChangeSets => "account-change-sets",
StaticFileSegment::StorageChangeSets => "storage-change-sets",
};
assert_eq!(static_str, expected_str);
}
@@ -806,6 +829,7 @@ mod tests {
StaticFileSegment::Receipts => "Receipts",
StaticFileSegment::TransactionSenders => "TransactionSenders",
StaticFileSegment::AccountChangeSets => "AccountChangeSets",
StaticFileSegment::StorageChangeSets => "StorageChangeSets",
};
assert_eq!(ser, format!("\"{expected_str}\""));
}

View File

@@ -0,0 +1,5 @@
---
source: crates/static-file/types/src/segment.rs
expression: "Bytes::from(serialized)"
---
0x01000000000000000000000000000000c800000000000000010000000000000000640000000000000000050000000164000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000100000000000000010000000000000001000000000000000000000000000000000000000000000000

View File

@@ -31,6 +31,9 @@ pub struct StorageSettings {
/// Whether this node should read and write account changesets from static files.
#[serde(default)]
pub account_changesets_in_static_files: bool,
/// Whether this node should read and write storage changesets from static files.
#[serde(default)]
pub storage_changesets_in_static_files: bool,
}
impl StorageSettings {
@@ -59,6 +62,7 @@ impl StorageSettings {
receipts_in_static_files: true,
transaction_senders_in_static_files: true,
account_changesets_in_static_files: true,
storage_changesets_in_static_files: true,
storages_history_in_rocksdb: false,
transaction_hash_numbers_in_rocksdb: true,
account_history_in_rocksdb: false,
@@ -78,6 +82,7 @@ impl StorageSettings {
transaction_hash_numbers_in_rocksdb: false,
account_history_in_rocksdb: false,
account_changesets_in_static_files: false,
storage_changesets_in_static_files: false,
}
}
@@ -117,6 +122,12 @@ impl StorageSettings {
self
}
/// Sets the `storage_changesets_in_static_files` flag to the provided value.
pub const fn with_storage_changesets_in_static_files(mut self, value: bool) -> Self {
self.storage_changesets_in_static_files = value;
self
}
/// Returns `true` if any tables are configured to be stored in `RocksDB`.
pub const fn any_in_rocksdb(&self) -> bool {
self.transaction_hash_numbers_in_rocksdb ||

View File

@@ -29,8 +29,8 @@ pub use blocks::*;
pub use integer_list::IntegerList;
pub use metadata::*;
pub use reth_db_models::{
AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices,
StoredBlockWithdrawals,
AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StorageBeforeTx,
StoredBlockBodyIndices, StoredBlockWithdrawals,
};
pub use sharded_key::ShardedKey;
@@ -230,6 +230,7 @@ impl_compression_for_compact!(
StaticFileBlockWithdrawals,
Bytecode,
AccountBeforeTx,
StorageBeforeTx,
TransactionSigned,
CompactU256,
StageCheckpoint,

View File

@@ -19,6 +19,10 @@ pub use accounts::AccountBeforeTx;
pub mod blocks;
pub use blocks::{StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockWithdrawals};
/// Storage
pub mod storage;
pub use storage::StorageBeforeTx;
/// Client Version
pub mod client_version;
pub use client_version::ClientVersion;

View File

@@ -0,0 +1,48 @@
use alloy_primitives::{Address, B256, U256};
use reth_primitives_traits::ValueWithSubKey;
/// Storage entry as it is saved in the static files.
///
/// [`B256`] is the subkey.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
pub struct StorageBeforeTx {
/// Address for the storage entry. Acts as `DupSort::SubKey` in static files.
pub address: Address,
/// Storage key.
pub key: B256,
/// Value on storage key.
pub value: U256,
}
impl ValueWithSubKey for StorageBeforeTx {
type SubKey = B256;
fn get_subkey(&self) -> Self::SubKey {
self.key
}
}
// NOTE: Removing reth_codec and manually encode subkey
// and compress second part of the value. If we have compression
// over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey
#[cfg(any(test, feature = "reth-codec"))]
impl reth_codecs::Compact for StorageBeforeTx {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
buf.put_slice(self.address.as_slice());
buf.put_slice(&self.key[..]);
self.value.to_compact(buf) + 52
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let address = Address::from_slice(&buf[..20]);
let key = B256::from_slice(&buf[20..52]);
let (value, out) = U256::from_compact(&buf[52..], len - 52);
(Self { address, key, value }, out)
}
}

View File

@@ -4,7 +4,7 @@ use crate::{
HeaderTerminalDifficulties,
};
use alloy_primitives::{Address, BlockHash};
use reth_db_api::{table::Table, AccountChangeSets};
use reth_db_api::{models::StorageBeforeTx, table::Table, AccountChangeSets};
// HEADER MASKS
add_static_file_mask! {
@@ -54,3 +54,9 @@ add_static_file_mask! {
#[doc = "Mask for selecting a single changeset from `AccountChangesets` static file segment"]
AccountChangesetMask, <AccountChangeSets as Table>::Value, 0b1
}
// STORAGE CHANGESET MASKS
add_static_file_mask! {
#[doc = "Mask for selecting a single changeset from `StorageChangesets` static file segment"]
StorageChangesetMask, StorageBeforeTx, 0b1
}

View File

@@ -1,10 +1,12 @@
//! Account changeset iteration support for walking through historical account state changes in
//! Account/storage changeset iteration support for walking through historical state changes in
//! static files.
use crate::ProviderResult;
use alloy_primitives::BlockNumber;
use reth_db::models::AccountBeforeTx;
use reth_storage_api::ChangeSetReader;
use reth_db_api::models::BlockNumberAddress;
use reth_primitives_traits::StorageEntry;
use reth_storage_api::{ChangeSetReader, StorageChangeSetReader};
use std::ops::{Bound, RangeBounds};
/// Iterator that walks account changesets from static files in a block range.
@@ -97,3 +99,78 @@ where
None
}
}
/// Iterator that walks storage changesets from static files in a block range.
#[derive(Debug)]
pub struct StaticFileStorageChangesetWalker<P> {
/// Static file provider
provider: P,
/// End block (exclusive). `None` means iterate until exhausted.
end_block: Option<BlockNumber>,
/// Current block being processed
current_block: BlockNumber,
/// Changesets for current block
current_changesets: Vec<(BlockNumberAddress, StorageEntry)>,
/// Index within current block's changesets
changeset_index: usize,
}
impl<P> StaticFileStorageChangesetWalker<P> {
/// Create a new static file storage changeset walker.
pub fn new(provider: P, range: impl RangeBounds<BlockNumber>) -> Self {
let start = match range.start_bound() {
Bound::Included(&n) => n,
Bound::Excluded(&n) => n + 1,
Bound::Unbounded => 0,
};
let end_block = match range.end_bound() {
Bound::Included(&n) => Some(n + 1),
Bound::Excluded(&n) => Some(n),
Bound::Unbounded => None,
};
Self {
provider,
end_block,
current_block: start,
current_changesets: Vec::new(),
changeset_index: 0,
}
}
}
impl<P> Iterator for StaticFileStorageChangesetWalker<P>
where
P: StorageChangeSetReader,
{
type Item = ProviderResult<(BlockNumberAddress, StorageEntry)>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(changeset) = self.current_changesets.get(self.changeset_index).copied() {
self.changeset_index += 1;
return Some(Ok(changeset));
}
if !self.current_changesets.is_empty() {
self.current_block += 1;
}
while self.end_block.is_none_or(|end| self.current_block < end) {
match self.provider.storage_changeset(self.current_block) {
Ok(changesets) if !changesets.is_empty() => {
self.current_changesets = changesets;
self.changeset_index = 1;
return Some(Ok(self.current_changesets[0]));
}
Ok(_) => self.current_block += 1,
Err(e) => {
self.current_block += 1;
return Some(Err(e));
}
}
}
None
}
}

View File

@@ -17,20 +17,20 @@ use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber, B25
use rayon::slice::ParallelSliceMut;
use reth_db::{
cursor::{DbCursorRO, DbDupCursorRW},
models::AccountBeforeTx,
models::{AccountBeforeTx, StorageBeforeTx},
static_file::TransactionSenderMask,
table::Value,
transaction::{CursorMutTy, CursorTy, DbTx, DbTxMut, DupCursorMutTy, DupCursorTy},
};
use reth_db_api::{
cursor::DbCursorRW,
models::{storage_sharded_key::StorageShardedKey, ShardedKey},
models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress, ShardedKey},
tables,
tables::BlockNumberList,
};
use reth_errors::ProviderError;
use reth_node_types::NodePrimitives;
use reth_primitives_traits::ReceiptTy;
use reth_primitives_traits::{ReceiptTy, StorageEntry};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{ChangeSetReader, DBProvider, NodePrimitivesProvider, StorageSettingsCache};
use reth_storage_errors::provider::ProviderResult;
@@ -171,6 +171,27 @@ impl<'a> EitherWriter<'a, (), ()> {
}
}
/// Creates a new [`EitherWriter`] for storage changesets based on storage settings.
pub fn new_storage_changesets<P>(
provider: &'a P,
block_number: BlockNumber,
) -> ProviderResult<DupEitherWriterTy<'a, P, tables::StorageChangeSets>>
where
P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory,
P::Tx: DbTxMut,
{
if provider.cached_storage_settings().storage_changesets_in_static_files {
Ok(EitherWriter::StaticFile(
provider
.get_static_file_writer(block_number, StaticFileSegment::StorageChangeSets)?,
))
} else {
Ok(EitherWriter::Database(
provider.tx_ref().cursor_dup_write::<tables::StorageChangeSets>()?,
))
}
}
/// Returns the destination for writing receipts.
///
/// The rules are as follows:
@@ -208,6 +229,19 @@ impl<'a> EitherWriter<'a, (), ()> {
}
}
/// Returns the destination for writing storage changesets.
///
/// This determines the destination based solely on storage settings.
pub fn storage_changesets_destination<P: DBProvider + StorageSettingsCache>(
provider: &P,
) -> EitherWriterDestination {
if provider.cached_storage_settings().storage_changesets_in_static_files {
EitherWriterDestination::StaticFile
} else {
EitherWriterDestination::Database
}
}
/// Creates a new [`EitherWriter`] for storages history based on storage settings.
pub fn new_storages_history<P>(
provider: &P,
@@ -651,6 +685,41 @@ where
}
}
impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N>
where
CURSOR: DbDupCursorRW<tables::StorageChangeSets>,
{
/// Append storage changeset for a block.
///
/// NOTE: This _sorts_ the changesets by address and storage key before appending.
pub fn append_storage_changeset(
&mut self,
block_number: BlockNumber,
mut changeset: Vec<StorageBeforeTx>,
) -> ProviderResult<()> {
changeset.par_sort_by_key(|change| (change.address, change.key));
match self {
Self::Database(cursor) => {
for change in changeset {
let storage_id = BlockNumberAddress((block_number, change.address));
cursor.append_dup(
storage_id,
StorageEntry { key: change.key, value: change.value },
)?;
}
}
Self::StaticFile(writer) => {
writer.append_storage_changeset(changeset, block_number)?;
}
#[cfg(all(unix, feature = "rocksdb"))]
Self::RocksDB(_) => return Err(ProviderError::UnsupportedProvider),
}
Ok(())
}
}
/// Represents a source for reading data, either from database, static files, or `RocksDB`.
#[derive(Debug, Display)]
pub enum EitherReader<'a, CURSOR, N> {
@@ -987,6 +1056,19 @@ impl EitherWriterDestination {
Self::Database
}
}
/// Returns the destination for writing storage changesets based on storage settings.
pub fn storage_changesets<P>(provider: &P) -> Self
where
P: StorageSettingsCache,
{
// Write storage changesets to static files only if they're explicitly enabled
if provider.cached_storage_settings().storage_changesets_in_static_files {
Self::StaticFile
} else {
Self::Database
}
}
}
#[cfg(test)]

View File

@@ -711,6 +711,26 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for BlockchainProvider<N> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
self.consistent_provider()?.storage_changeset(block_number)
}
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
self.consistent_provider()?.get_storage_before_block(block_number, address, storage_key)
}
fn storage_changesets_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
self.consistent_provider()?.storage_changesets_range(range)
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
self.consistent_provider()?.storage_changeset_count()
}
}
impl<N: ProviderNodeTypes> ChangeSetReader for BlockchainProvider<N> {

View File

@@ -1347,6 +1347,138 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
self.storage_provider.storage_changeset(block_number)
}
}
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
let changeset = state
.block_ref()
.execution_output
.state
.reverts
.clone()
.to_plain_state_reverts()
.storage
.into_iter()
.flatten()
.find_map(|revert: PlainStorageRevert| {
if revert.address != address {
return None
}
revert.storage_revert.into_iter().find_map(|(key, value)| {
let key = key.into();
(key == storage_key)
.then(|| StorageEntry { key, value: value.to_previous_value() })
})
});
Ok(changeset)
} else {
let storage_history_exists = self
.storage_provider
.get_prune_checkpoint(PruneSegment::StorageHistory)?
.and_then(|checkpoint| {
checkpoint.block_number.map(|checkpoint| block_number > checkpoint)
})
.unwrap_or(true);
if !storage_history_exists {
return Err(ProviderError::StateAtBlockPruned(block_number))
}
self.storage_provider.get_storage_before_block(block_number, address, storage_key)
}
}
fn storage_changesets_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
let range = to_range(range);
let mut changesets = Vec::new();
let database_start = range.start;
let mut database_end = range.end;
if let Some(head_block) = &self.head_block {
database_end = head_block.anchor().number;
let chain = head_block.chain().collect::<Vec<_>>();
for state in chain {
let block_changesets = state
.block_ref()
.execution_output
.state
.reverts
.clone()
.to_plain_state_reverts()
.storage
.into_iter()
.flatten()
.flat_map(|revert: PlainStorageRevert| {
revert.storage_revert.into_iter().map(move |(key, value)| {
(
BlockNumberAddress((state.number(), revert.address)),
StorageEntry { key: key.into(), value: value.to_previous_value() },
)
})
});
changesets.extend(block_changesets);
}
}
if database_start < database_end {
let storage_history_exists = self
.storage_provider
.get_prune_checkpoint(PruneSegment::StorageHistory)?
.and_then(|checkpoint| {
checkpoint.block_number.map(|checkpoint| database_start > checkpoint)
})
.unwrap_or(true);
if !storage_history_exists {
return Err(ProviderError::StateAtBlockPruned(database_start))
}
let db_changesets = self
.storage_provider
.storage_changesets_range(database_start..=database_end - 1)?;
changesets.extend(db_changesets);
}
changesets.sort_by_key(|(block_address, _)| block_address.block_number());
Ok(changesets)
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
let mut count = 0;
if let Some(head_block) = &self.head_block {
for state in head_block.chain() {
count += state
.block_ref()
.execution_output
.state
.reverts
.clone()
.to_plain_state_reverts()
.storage
.into_iter()
.flatten()
.map(|revert: PlainStorageRevert| revert.storage_revert.len())
.sum::<usize>();
}
}
count += self.storage_provider.storage_changeset_count()?;
Ok(count)
}
}
impl<N: ProviderNodeTypes> ChangeSetReader for ConsistentProvider<N> {

View File

@@ -40,7 +40,8 @@ use reth_db_api::{
database::Database,
models::{
sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress,
BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices,
BlockNumberHashedAddress, ShardedKey, StorageBeforeTx, StorageSettings,
StoredBlockBodyIndices,
},
table::Table,
tables,
@@ -463,6 +464,8 @@ impl<TX: DbTx + DbTxMut + 'static, N: NodeTypesForProvider> DatabaseProvider<TX,
EitherWriter::receipts_destination(self).is_static_file(),
write_account_changesets: save_mode.with_state() &&
EitherWriterDestination::account_changesets(self).is_static_file(),
write_storage_changesets: save_mode.with_state() &&
EitherWriterDestination::storage_changesets(self).is_static_file(),
tip,
receipts_prune_mode: self.prune_modes.receipts,
// Receipts are prunable if no receipts exist in SF yet and within pruning distance
@@ -1344,13 +1347,58 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
let range = block_number..=block_number;
let storage_range = BlockNumberAddress::range(range);
self.tx
.cursor_dup_read::<tables::StorageChangeSets>()?
.walk_range(storage_range)?
.map(|result| -> ProviderResult<_> { Ok(result?) })
.collect()
if self.cached_storage_settings().storage_changesets_in_static_files {
self.static_file_provider.storage_changeset(block_number)
} else {
let range = block_number..=block_number;
let storage_range = BlockNumberAddress::range(range);
self.tx
.cursor_dup_read::<tables::StorageChangeSets>()?
.walk_range(storage_range)?
.map(|result| -> ProviderResult<_> { Ok(result?) })
.collect()
}
}
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
self.static_file_provider.get_storage_before_block(block_number, address, storage_key)
} else {
self.tx
.cursor_dup_read::<tables::StorageChangeSets>()?
.seek_by_key_subkey(BlockNumberAddress((block_number, address)), storage_key)?
.filter(|entry| entry.key == storage_key)
.map(Ok)
.transpose()
}
}
fn storage_changesets_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
self.static_file_provider.storage_changesets_range(range)
} else {
self.tx
.cursor_dup_read::<tables::StorageChangeSets>()?
.walk_range(BlockNumberAddress::range(range))?
.map(|result| -> ProviderResult<_> { Ok(result?) })
.collect()
}
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
if self.cached_storage_settings().storage_changesets_in_static_files {
self.static_file_provider.storage_changeset_count()
} else {
Ok(self.tx.entries::<tables::StorageChangeSets>()?)
}
}
}
@@ -2072,38 +2120,67 @@ impl<TX: DbTx + 'static, N: NodeTypes> StorageReader for DatabaseProvider<TX, N>
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<Address, BTreeSet<B256>>> {
self.tx
.cursor_read::<tables::StorageChangeSets>()?
.walk_range(BlockNumberAddress::range(range))?
// fold all storages and save its old state so we can remove it from HashedStorage
// it is needed as it is dup table.
.try_fold(BTreeMap::new(), |mut accounts: BTreeMap<Address, BTreeSet<B256>>, entry| {
let (BlockNumberAddress((_, address)), storage_entry) = entry?;
accounts.entry(address).or_default().insert(storage_entry.key);
Ok(accounts)
})
if self.cached_storage_settings().storage_changesets_in_static_files {
self.storage_changesets_range(range)?.into_iter().try_fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, BTreeSet<B256>>, entry| {
let (BlockNumberAddress((_, address)), storage_entry) = entry;
accounts.entry(address).or_default().insert(storage_entry.key);
Ok(accounts)
},
)
} else {
self.tx
.cursor_read::<tables::StorageChangeSets>()?
.walk_range(BlockNumberAddress::range(range))?
// fold all storages and save its old state so we can remove it from HashedStorage
// it is needed as it is dup table.
.try_fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, BTreeSet<B256>>, entry| {
let (BlockNumberAddress((_, address)), storage_entry) = entry?;
accounts.entry(address).or_default().insert(storage_entry.key);
Ok(accounts)
},
)
}
}
fn changed_storages_and_blocks_with_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<(Address, B256), Vec<u64>>> {
let mut changeset_cursor = self.tx.cursor_read::<tables::StorageChangeSets>()?;
let storage_changeset_lists =
changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold(
if self.cached_storage_settings().storage_changesets_in_static_files {
self.storage_changesets_range(range)?.into_iter().try_fold(
BTreeMap::new(),
|mut storages: BTreeMap<(Address, B256), Vec<u64>>, entry| -> ProviderResult<_> {
let (index, storage) = entry?;
|mut storages: BTreeMap<(Address, B256), Vec<u64>>, (index, storage)| {
storages
.entry((index.address(), storage.key))
.or_default()
.push(index.block_number());
Ok(storages)
},
)?;
)
} else {
let mut changeset_cursor = self.tx.cursor_read::<tables::StorageChangeSets>()?;
Ok(storage_changeset_lists)
let storage_changeset_lists =
changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold(
BTreeMap::new(),
|mut storages: BTreeMap<(Address, B256), Vec<u64>>,
entry|
-> ProviderResult<_> {
let (index, storage) = entry?;
storages
.entry((index.address(), storage.key))
.or_default()
.push(index.block_number());
Ok(storages)
},
)?;
Ok(storage_changeset_lists)
}
}
}
@@ -2226,17 +2303,16 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
// Write storage changes
tracing::trace!("Writing storage changes");
let mut storages_cursor = self.tx_ref().cursor_dup_write::<tables::PlainStorageState>()?;
let mut storage_changeset_cursor =
self.tx_ref().cursor_dup_write::<tables::StorageChangeSets>()?;
for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() {
let block_number = first_block + block_index as BlockNumber;
tracing::trace!(block_number, "Writing block change");
// sort changes by address.
storage_changes.par_sort_unstable_by_key(|a| a.address);
let total_changes =
storage_changes.iter().map(|change| change.storage_revert.len()).sum();
let mut changeset = Vec::with_capacity(total_changes);
for PlainStorageRevert { address, wiped, storage_revert } in storage_changes {
let storage_id = BlockNumberAddress((block_number, address));
let mut storage = storage_revert
.into_iter()
.map(|(k, v)| (B256::new(k.to_be_bytes()), v))
@@ -2264,9 +2340,13 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
tracing::trace!(?address, ?storage, "Writing storage reverts");
for (key, value) in StorageRevertsIter::new(storage, wiped_storage) {
storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?;
changeset.push(StorageBeforeTx { address, key, value });
}
}
let mut storage_changesets_writer =
EitherWriter::new_storage_changesets(self, block_number)?;
storage_changesets_writer.append_storage_changeset(block_number, changeset)?;
}
if !config.write_account_changesets {
@@ -2427,8 +2507,19 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
block_bodies.first().expect("already checked if there are blocks").first_tx_num();
let storage_range = BlockNumberAddress::range(range.clone());
let storage_changeset = self.take::<tables::StorageChangeSets>(storage_range)?;
let storage_changeset = if let Some(_highest_block) = self
.static_file_provider
.get_highest_static_file_block(StaticFileSegment::StorageChangeSets) &&
self.cached_storage_settings().storage_changesets_in_static_files
{
let changesets = self.storage_changesets_range(range.clone())?;
let mut changeset_writer =
self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?;
changeset_writer.prune_storage_changesets(block)?;
changesets
} else {
self.take::<tables::StorageChangeSets>(storage_range)?
};
let account_changeset = self.take::<tables::AccountChangeSets>(range)?;
// This is not working for blocks that are not at tip. as plain state is not the last
@@ -2523,8 +2614,19 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
block_bodies.last().expect("already checked if there are blocks").last_tx_num();
let storage_range = BlockNumberAddress::range(range.clone());
let storage_changeset = self.take::<tables::StorageChangeSets>(storage_range)?;
let storage_changeset = if let Some(highest_block) = self
.static_file_provider
.get_highest_static_file_block(StaticFileSegment::StorageChangeSets) &&
self.cached_storage_settings().storage_changesets_in_static_files
{
let changesets = self.storage_changesets_range(block + 1..=highest_block)?;
let mut changeset_writer =
self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?;
changeset_writer.prune_storage_changesets(block)?;
changesets
} else {
self.take::<tables::StorageChangeSets>(storage_range)?
};
// This is not working for blocks that are not at tip. as plain state is not the last
// state of end range. We should rename the functions or add support to access

View File

@@ -14,7 +14,7 @@ use reth_db_api::{
use reth_primitives_traits::{Account, Bytecode};
use reth_storage_api::{
BlockNumReader, BytecodeReader, DBProvider, NodePrimitivesProvider, StateProofProvider,
StorageRootProvider, StorageSettingsCache,
StorageChangeSetReader, StorageRootProvider, StorageSettingsCache,
};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{
@@ -26,8 +26,8 @@ use reth_trie::{
TrieInputSorted,
};
use reth_trie_db::{
DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot,
DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness,
hashed_storage_from_reverts_with_provider, DatabaseHashedPostState, DatabaseProof,
DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness,
};
use std::fmt::Debug;
@@ -109,7 +109,7 @@ pub struct HistoricalStateProviderRef<'b, Provider> {
lowest_available_blocks: LowestAvailableBlocks,
}
impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader>
impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
HistoricalStateProviderRef<'b, Provider>
{
/// Create new `StateProvider` for historical block number
@@ -210,7 +210,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader>
);
}
Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?)
hashed_storage_from_reverts_with_provider(self.provider, address, self.block_number)
}
/// Set the lowest block number at which the account history is available.
@@ -242,6 +242,7 @@ impl<
Provider: DBProvider
+ BlockNumReader
+ ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ RocksDBProviderFactory
+ NodePrimitivesProvider,
@@ -285,8 +286,8 @@ impl<Provider: DBProvider + BlockNumReader + BlockHashReader> BlockHashReader
}
}
impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StateRootProvider
for HistoricalStateProviderRef<'_, Provider>
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
StateRootProvider for HistoricalStateProviderRef<'_, Provider>
{
fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult<B256> {
let mut revert_state = self.revert_state()?;
@@ -322,8 +323,8 @@ impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StateRootProvider
}
}
impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StorageRootProvider
for HistoricalStateProviderRef<'_, Provider>
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
StorageRootProvider for HistoricalStateProviderRef<'_, Provider>
{
fn storage_root(
&self,
@@ -361,8 +362,8 @@ impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StorageRootProvide
}
}
impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StateProofProvider
for HistoricalStateProviderRef<'_, Provider>
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
StateProofProvider for HistoricalStateProviderRef<'_, Provider>
{
/// Get account and storage proofs.
fn proof(
@@ -405,6 +406,7 @@ impl<
+ BlockNumReader
+ BlockHashReader
+ ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ RocksDBProviderFactory
+ NodePrimitivesProvider,
@@ -418,18 +420,16 @@ impl<
) -> ProviderResult<Option<StorageValue>> {
match self.storage_history_lookup(address, storage_key)? {
HistoryInfo::NotYetWritten => Ok(None),
HistoryInfo::InChangeset(changeset_block_number) => Ok(Some(
self.tx()
.cursor_dup_read::<tables::StorageChangeSets>()?
.seek_by_key_subkey((changeset_block_number, address).into(), storage_key)?
.filter(|entry| entry.key == storage_key)
.ok_or_else(|| ProviderError::StorageChangesetNotFound {
block_number: changeset_block_number,
address,
storage_key: Box::new(storage_key),
})?
.value,
)),
HistoryInfo::InChangeset(changeset_block_number) => self
.provider
.get_storage_before_block(changeset_block_number, address, storage_key)?
.ok_or_else(|| ProviderError::StorageChangesetNotFound {
block_number: changeset_block_number,
address,
storage_key: Box::new(storage_key),
})
.map(|entry| entry.value)
.map(Some),
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self
.tx()
.cursor_dup_read::<tables::PlainStorageState>()?
@@ -462,7 +462,9 @@ pub struct HistoricalStateProvider<Provider> {
lowest_available_blocks: LowestAvailableBlocks,
}
impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> HistoricalStateProvider<Provider> {
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
HistoricalStateProvider<Provider>
{
/// Create new `StateProvider` for historical block number
pub fn new(provider: Provider, block_number: BlockNumber) -> Self {
Self { provider, block_number, lowest_available_blocks: Default::default() }
@@ -498,7 +500,7 @@ impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> HistoricalStatePro
}
// Delegates all provider impls to [HistoricalStateProviderRef]
reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider<Provider> where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]);
reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider<Provider> where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader + StorageChangeSetReader + StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider]);
/// Lowest blocks at which different parts of the state are available.
/// They may be [Some] if pruning is enabled.
@@ -631,7 +633,7 @@ mod tests {
use reth_primitives_traits::{Account, StorageEntry};
use reth_storage_api::{
BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory,
NodePrimitivesProvider, StorageSettingsCache,
NodePrimitivesProvider, StorageChangeSetReader, StorageSettingsCache,
};
use reth_storage_errors::provider::ProviderError;
@@ -647,6 +649,7 @@ mod tests {
+ BlockNumReader
+ BlockHashReader
+ ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ RocksDBProviderFactory
+ NodePrimitivesProvider,

View File

@@ -10,6 +10,7 @@ use reth_stages_types::StageId;
use reth_storage_api::{
BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory,
DatabaseProviderROFactory, PruneCheckpointReader, StageCheckpointReader,
StorageChangeSetReader,
};
use reth_trie::{
hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory},
@@ -196,6 +197,7 @@ where
F::Provider: StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ DBProvider
+ BlockNumReader,
{
@@ -446,7 +448,11 @@ where
impl<F> DatabaseProviderROFactory for OverlayStateProviderFactory<F>
where
F: DatabaseProviderFactory,
F::Provider: StageCheckpointReader + PruneCheckpointReader + BlockNumReader + ChangeSetReader,
F::Provider: StageCheckpointReader
+ PruneCheckpointReader
+ BlockNumReader
+ ChangeSetReader
+ StorageChangeSetReader,
{
type Provider = OverlayStateProvider<F::Provider>;

View File

@@ -3,10 +3,10 @@ use super::{
StaticFileJarProvider, StaticFileProviderRW, StaticFileProviderRWRefMut,
};
use crate::{
changeset_walker::StaticFileAccountChangesetWalker, to_range, BlockHashReader, BlockNumReader,
BlockReader, BlockSource, EitherWriter, EitherWriterDestination, HeaderProvider,
ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider,
TransactionsProviderExt,
changeset_walker::{StaticFileAccountChangesetWalker, StaticFileStorageChangesetWalker},
to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, EitherWriter,
EitherWriterDestination, HeaderProvider, ReceiptProvider, StageCheckpointReader, StatsReader,
TransactionVariant, TransactionsProvider, TransactionsProviderExt,
};
use alloy_consensus::{transaction::TransactionMeta, Header};
use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber};
@@ -20,12 +20,12 @@ use reth_db::{
lockfile::StorageLock,
static_file::{
iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask,
StaticFileCursor, TransactionMask, TransactionSenderMask,
StaticFileCursor, StorageChangesetMask, TransactionMask, TransactionSenderMask,
},
};
use reth_db_api::{
cursor::DbCursorRO,
models::{AccountBeforeTx, StoredBlockBodyIndices},
models::{AccountBeforeTx, BlockNumberAddress, StorageBeforeTx, StoredBlockBodyIndices},
table::{Decompress, Table, Value},
tables,
transaction::DbTx,
@@ -35,6 +35,7 @@ use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION};
use reth_node_types::NodePrimitives;
use reth_primitives_traits::{
AlloyBlockHeader as _, BlockBody as _, RecoveredBlock, SealedHeader, SignedTransaction,
StorageEntry,
};
use reth_stages_types::{PipelineTarget, StageId};
use reth_static_file_types::{
@@ -42,7 +43,8 @@ use reth_static_file_types::{
StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE,
};
use reth_storage_api::{
BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageSettingsCache,
BlockBodyIndicesProvider, ChangeSetReader, DBProvider, StorageChangeSetReader,
StorageSettingsCache,
};
use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError};
use std::{
@@ -92,6 +94,8 @@ pub struct StaticFileWriteCtx {
pub write_receipts: bool,
/// Whether account changesets should be written to static files.
pub write_account_changesets: bool,
/// Whether storage changesets should be written to static files.
pub write_storage_changesets: bool,
/// The current chain tip block number (for pruning).
pub tip: BlockNumber,
/// The prune mode for receipts, if any.
@@ -622,6 +626,35 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
Ok(())
}
/// Writes storage changesets for all blocks to the static file segment.
#[instrument(level = "debug", target = "providers::db", skip_all)]
fn write_storage_changesets(
w: &mut StaticFileProviderRWRefMut<'_, N>,
blocks: &[ExecutedBlock<N>],
) -> ProviderResult<()> {
for block in blocks {
let block_number = block.recovered_block().number();
let reverts = block.execution_outcome().state.reverts.to_plain_state_reverts();
for storage_block_reverts in reverts.storage {
let changeset = storage_block_reverts
.into_iter()
.flat_map(|revert| {
revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| {
StorageBeforeTx {
address: revert.address,
key: B256::new(key.to_be_bytes()),
value: revert_to_slot.to_previous_value(),
}
})
})
.collect::<Vec<_>>();
w.append_storage_changeset(changeset, block_number)?;
}
}
Ok(())
}
/// Spawns a scoped thread that writes to a static file segment using the provided closure.
///
/// The closure receives a mutable reference to the segment writer. After the closure completes,
@@ -697,6 +730,15 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
)
});
let h_storage_changesets = ctx.write_storage_changesets.then(|| {
self.spawn_segment_writer(
s,
StaticFileSegment::StorageChangeSets,
first_block_number,
|w| Self::write_storage_changesets(w, blocks),
)
});
h_headers.join().map_err(|_| StaticFileWriterError::ThreadPanic("headers"))??;
h_txs.join().map_err(|_| StaticFileWriterError::ThreadPanic("transactions"))??;
if let Some(h) = h_senders {
@@ -709,6 +751,10 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
h.join()
.map_err(|_| StaticFileWriterError::ThreadPanic("account_changesets"))??;
}
if let Some(h) = h_storage_changesets {
h.join()
.map_err(|_| StaticFileWriterError::ThreadPanic("storage_changesets"))??;
}
Ok(())
})
}
@@ -1381,6 +1427,13 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
highest_tx,
highest_block,
)?,
StaticFileSegment::StorageChangeSets => self
.ensure_changeset_invariants_by_block::<_, tables::StorageChangeSets, _>(
provider,
segment,
highest_block,
|key| key.block_number(),
)?,
} {
debug!(target: "reth::providers::static_file", ?segment, unwind_target=unwind, "Invariants check returned unwind target");
update_unwind_target(unwind);
@@ -1462,6 +1515,13 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
}
true
}
StaticFileSegment::StorageChangeSets => {
if EitherWriter::storage_changesets_destination(provider).is_database() {
debug!(target: "reth::providers::static_file", ?segment, "Skipping storage changesets segment: changesets stored in database");
return false
}
true
}
}
}
@@ -1594,9 +1654,9 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
let stage_id = match segment {
StaticFileSegment::Headers => StageId::Headers,
StaticFileSegment::Transactions => StageId::Bodies,
StaticFileSegment::Receipts | StaticFileSegment::AccountChangeSets => {
StageId::Execution
}
StaticFileSegment::Receipts |
StaticFileSegment::AccountChangeSets |
StaticFileSegment::StorageChangeSets => StageId::Execution,
StaticFileSegment::TransactionSenders => StageId::SenderRecovery,
};
let checkpoint_block_number =
@@ -1651,7 +1711,9 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
StaticFileSegment::TransactionSenders => {
writer.prune_transaction_senders(number, checkpoint_block_number)?
}
StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => {
StaticFileSegment::Headers |
StaticFileSegment::AccountChangeSets |
StaticFileSegment::StorageChangeSets => {
unreachable!()
}
}
@@ -1662,6 +1724,9 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
StaticFileSegment::AccountChangeSets => {
writer.prune_account_changesets(checkpoint_block_number)?;
}
StaticFileSegment::StorageChangeSets => {
writer.prune_storage_changesets(checkpoint_block_number)?;
}
}
debug!(target: "reth::providers::static_file", ?segment, "Committing writer after pruning");
writer.commit()?;
@@ -1672,6 +1737,105 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
Ok(None)
}
fn ensure_changeset_invariants_by_block<Provider, T, F>(
&self,
provider: &Provider,
segment: StaticFileSegment,
highest_static_file_block: Option<BlockNumber>,
block_from_key: F,
) -> ProviderResult<Option<BlockNumber>>
where
Provider: DBProvider + BlockReader + StageCheckpointReader,
T: Table,
F: Fn(&T::Key) -> BlockNumber,
{
debug!(
target: "reth::providers::static_file",
?segment,
?highest_static_file_block,
"Ensuring changeset invariants"
);
let mut db_cursor = provider.tx_ref().cursor_read::<T>()?;
if let Some((db_first_key, _)) = db_cursor.first()? {
let db_first_block = block_from_key(&db_first_key);
if let Some(highest_block) = highest_static_file_block &&
!(db_first_block <= highest_block || highest_block + 1 == db_first_block)
{
info!(
target: "reth::providers::static_file",
?db_first_block,
?highest_block,
unwind_target = highest_block,
?segment,
"Setting unwind target."
);
return Ok(Some(highest_block))
}
if let Some((db_last_key, _)) = db_cursor.last()? &&
highest_static_file_block
.is_none_or(|highest_block| block_from_key(&db_last_key) > highest_block)
{
debug!(
target: "reth::providers::static_file",
?segment,
"Database has entries beyond static files, no unwind needed"
);
return Ok(None)
}
} else {
debug!(target: "reth::providers::static_file", ?segment, "No database entries found");
}
let highest_static_file_block = highest_static_file_block.unwrap_or_default();
let stage_id = match segment {
StaticFileSegment::Headers => StageId::Headers,
StaticFileSegment::Transactions => StageId::Bodies,
StaticFileSegment::Receipts |
StaticFileSegment::AccountChangeSets |
StaticFileSegment::StorageChangeSets => StageId::Execution,
StaticFileSegment::TransactionSenders => StageId::SenderRecovery,
};
let checkpoint_block_number =
provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number;
if checkpoint_block_number > highest_static_file_block {
info!(
target: "reth::providers::static_file",
checkpoint_block_number,
unwind_target = highest_static_file_block,
?segment,
"Setting unwind target."
);
return Ok(Some(highest_static_file_block))
}
if checkpoint_block_number < highest_static_file_block {
info!(
target: "reth::providers",
?segment,
from = highest_static_file_block,
to = checkpoint_block_number,
"Unwinding static file segment."
);
let mut writer = self.latest_writer(segment)?;
match segment {
StaticFileSegment::AccountChangeSets => {
writer.prune_account_changesets(checkpoint_block_number)?;
}
StaticFileSegment::StorageChangeSets => {
writer.prune_storage_changesets(checkpoint_block_number)?;
}
_ => unreachable!("invalid segment for changeset invariants"),
}
writer.commit()?;
}
Ok(None)
}
/// Returns the earliest available block number that has not been expired and is still
/// available.
///
@@ -2212,6 +2376,124 @@ impl<N: NodePrimitives> ChangeSetReader for StaticFileProvider<N> {
}
}
impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
let provider = match self.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
block_number,
None,
) {
Ok(provider) => provider,
Err(ProviderError::MissingStaticFileBlock(_, _)) => return Ok(Vec::new()),
Err(err) => return Err(err),
};
if let Some(offset) = provider.user_header().changeset_offset(block_number) {
let mut cursor = provider.cursor()?;
let mut changeset = Vec::with_capacity(offset.num_changes() as usize);
for i in offset.changeset_range() {
if let Some(change) = cursor.get_one::<StorageChangesetMask>(i.into())? {
let block_address = BlockNumberAddress((block_number, change.address));
let entry = StorageEntry { key: change.key, value: change.value };
changeset.push((block_address, entry));
}
}
Ok(changeset)
} else {
Ok(Vec::new())
}
}
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
let provider = match self.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
block_number,
None,
) {
Ok(provider) => provider,
Err(ProviderError::MissingStaticFileBlock(_, _)) => return Ok(None),
Err(err) => return Err(err),
};
let user_header = provider.user_header();
let Some(offset) = user_header.changeset_offset(block_number) else {
return Ok(None);
};
let mut cursor = provider.cursor()?;
let range = offset.changeset_range();
let mut low = range.start;
let mut high = range.end;
while low < high {
let mid = low + (high - low) / 2;
if let Some(change) = cursor.get_one::<StorageChangesetMask>(mid.into())? {
match (change.address, change.key).cmp(&(address, storage_key)) {
std::cmp::Ordering::Less => low = mid + 1,
_ => high = mid,
}
} else {
debug!(
target: "provider::static_file",
?low,
?mid,
?high,
?range,
?block_number,
?address,
?storage_key,
"Cannot continue binary search for storage changeset fetch"
);
low = range.end;
break;
}
}
if low < range.end &&
let Some(change) = cursor
.get_one::<StorageChangesetMask>(low.into())?
.filter(|change| change.address == address && change.key == storage_key)
{
return Ok(Some(StorageEntry { key: change.key, value: change.value }));
}
Ok(None)
}
fn storage_changesets_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
self.walk_storage_changeset_range(range).collect()
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
let mut count = 0;
let static_files = iter_static_files(&self.path).map_err(ProviderError::other)?;
if let Some(changeset_segments) = static_files.get(StaticFileSegment::StorageChangeSets) {
for (_, header) in changeset_segments {
if let Some(changeset_offsets) = header.changeset_offsets() {
for offset in changeset_offsets {
count += offset.num_changes() as usize;
}
}
}
}
Ok(count)
}
}
impl<N: NodePrimitives> StaticFileProvider<N> {
/// Creates an iterator for walking through account changesets in the specified block range.
///
@@ -2228,6 +2510,14 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
) -> StaticFileAccountChangesetWalker<Self> {
StaticFileAccountChangesetWalker::new(self.clone(), range)
}
/// Creates an iterator for walking through storage changesets in the specified block range.
pub fn walk_storage_changeset_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> StaticFileStorageChangesetWalker<Self> {
StaticFileStorageChangesetWalker::new(self.clone(), range)
}
}
impl<N: NodePrimitives<BlockHeader: Value>> HeaderProvider for StaticFileProvider<N> {

View File

@@ -69,14 +69,19 @@ mod tests {
use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy};
use alloy_primitives::{Address, BlockHash, Signature, TxNumber, B256, U160, U256};
use rand::seq::SliceRandom;
use reth_db::{models::AccountBeforeTx, test_utils::create_test_static_files_dir};
use reth_db::{
models::{AccountBeforeTx, StorageBeforeTx},
test_utils::create_test_static_files_dir,
};
use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers};
use reth_ethereum_primitives::{EthPrimitives, Receipt, TransactionSigned};
use reth_primitives_traits::Account;
use reth_static_file_types::{
find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE,
};
use reth_storage_api::{ChangeSetReader, ReceiptProvider, TransactionsProvider};
use reth_storage_api::{
ChangeSetReader, ReceiptProvider, StorageChangeSetReader, TransactionsProvider,
};
use reth_testing_utils::generators::{self, random_header_range};
use std::{collections::BTreeMap, fmt::Debug, fs, ops::Range, path::Path};
@@ -321,7 +326,9 @@ mod tests {
// Append transaction/receipt if there's still a transaction count to append
if tx_count > 0 {
match segment {
StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => {
StaticFileSegment::Headers |
StaticFileSegment::AccountChangeSets |
StaticFileSegment::StorageChangeSets => {
panic!("non tx based segment")
}
StaticFileSegment::Transactions => {
@@ -438,7 +445,9 @@ mod tests {
// Prune transactions or receipts based on the segment type
match segment {
StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => {
StaticFileSegment::Headers |
StaticFileSegment::AccountChangeSets |
StaticFileSegment::StorageChangeSets => {
panic!("non tx based segment")
}
StaticFileSegment::Transactions => {
@@ -463,7 +472,9 @@ mod tests {
// cumulative_gas_used & nonce as ids.
if let Some(id) = expected_tx_tip {
match segment {
StaticFileSegment::Headers | StaticFileSegment::AccountChangeSets => {
StaticFileSegment::Headers |
StaticFileSegment::AccountChangeSets |
StaticFileSegment::StorageChangeSets => {
panic!("non tx based segment")
}
StaticFileSegment::Transactions => assert_eyre(
@@ -1033,4 +1044,311 @@ mod tests {
}
}
}
#[test]
fn test_storage_changeset_static_files() {
let (static_dir, _) = create_test_static_files_dir();
let sf_rw = StaticFileProvider::<EthPrimitives>::read_write(&static_dir)
.expect("Failed to create static file provider");
// Test writing and reading storage changesets
{
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
// Create test data for multiple blocks
let test_blocks = 10u64;
let entries_per_block = 5;
for block_num in 0..test_blocks {
let changeset = (0..entries_per_block)
.map(|i| {
let mut addr = Address::ZERO;
addr.0[0] = block_num as u8;
addr.0[1] = i as u8;
StorageBeforeTx {
address: addr,
key: B256::with_last_byte(i as u8),
value: U256::from(block_num * 1000 + i as u64),
}
})
.collect::<Vec<_>>();
writer.append_storage_changeset(changeset, block_num).unwrap();
}
writer.commit().unwrap();
}
// Verify data can be read back correctly
{
let provider = sf_rw
.get_segment_provider_for_block(StaticFileSegment::StorageChangeSets, 5, None)
.unwrap();
// Check that the segment header has changeset offsets
assert!(provider.user_header().changeset_offsets().is_some());
let offsets = provider.user_header().changeset_offsets().unwrap();
assert_eq!(offsets.len(), 10); // Should have 10 blocks worth of offsets
// Verify each block has the expected number of changes
for (i, offset) in offsets.iter().enumerate() {
assert_eq!(offset.num_changes(), 5, "Block {} should have 5 changes", i);
}
}
}
#[test]
fn test_get_storage_before_block() {
let (static_dir, _) = create_test_static_files_dir();
let sf_rw = StaticFileProvider::<EthPrimitives>::read_write(&static_dir)
.expect("Failed to create static file provider");
let test_address = Address::from([1u8; 20]);
let other_address = Address::from([2u8; 20]);
let missing_address = Address::from([3u8; 20]);
let test_key = B256::with_last_byte(1);
let other_key = B256::with_last_byte(2);
// Write changesets for multiple blocks
{
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
// Block 0: test_address and other_address change
writer
.append_storage_changeset(
vec![
StorageBeforeTx { address: test_address, key: test_key, value: U256::ZERO },
StorageBeforeTx {
address: other_address,
key: other_key,
value: U256::from(5),
},
],
0,
)
.unwrap();
// Block 1: only other_address changes
writer
.append_storage_changeset(
vec![StorageBeforeTx {
address: other_address,
key: other_key,
value: U256::from(7),
}],
1,
)
.unwrap();
// Block 2: test_address changes again
writer
.append_storage_changeset(
vec![StorageBeforeTx {
address: test_address,
key: test_key,
value: U256::from(9),
}],
2,
)
.unwrap();
writer.commit().unwrap();
}
// Test get_storage_before_block
{
let result = sf_rw.get_storage_before_block(0, test_address, test_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, test_key);
assert_eq!(entry.value, U256::ZERO);
let result = sf_rw.get_storage_before_block(2, test_address, test_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, test_key);
assert_eq!(entry.value, U256::from(9));
let result = sf_rw.get_storage_before_block(1, test_address, test_key).unwrap();
assert!(result.is_none());
let result = sf_rw.get_storage_before_block(2, missing_address, test_key).unwrap();
assert!(result.is_none());
let result = sf_rw.get_storage_before_block(1, other_address, other_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, other_key);
}
}
#[test]
fn test_storage_changeset_truncation() {
let (static_dir, _) = create_test_static_files_dir();
let blocks_per_file = 10;
let files_per_range = 3;
let file_set_count = 3;
let initial_file_count = files_per_range * file_set_count;
let tip = blocks_per_file * file_set_count - 1;
// Setup: Create storage changesets for multiple blocks
{
let sf_rw: StaticFileProvider<EthPrimitives> =
StaticFileProviderBuilder::read_write(&static_dir)
.with_blocks_per_file(blocks_per_file)
.build()
.expect("failed to create static file provider");
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
for block_num in 0..=tip {
let num_changes = ((block_num % 5) + 1) as usize;
let mut changeset = Vec::with_capacity(num_changes);
for i in 0..num_changes {
let mut address = Address::ZERO;
address.0[0] = block_num as u8;
address.0[1] = i as u8;
changeset.push(StorageBeforeTx {
address,
key: B256::with_last_byte(i as u8),
value: U256::from(block_num * 1000 + i as u64),
});
}
writer.append_storage_changeset(changeset, block_num).unwrap();
}
writer.commit().unwrap();
}
fn validate_truncation(
sf_rw: &StaticFileProvider<EthPrimitives>,
static_dir: impl AsRef<Path>,
expected_tip: Option<u64>,
expected_file_count: u64,
) -> eyre::Result<()> {
let highest_block =
sf_rw.get_highest_static_file_block(StaticFileSegment::StorageChangeSets);
assert_eyre(highest_block, expected_tip, "block tip mismatch")?;
assert_eyre(
count_files_without_lockfile(static_dir)?,
expected_file_count as usize,
"file count mismatch",
)?;
if let Some(tip) = expected_tip {
let provider = sf_rw.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
tip,
None,
)?;
let offsets = provider.user_header().changeset_offsets();
assert!(offsets.is_some(), "Should have changeset offsets");
}
Ok(())
}
let sf_rw = StaticFileProviderBuilder::read_write(&static_dir)
.with_blocks_per_file(blocks_per_file)
.build()
.expect("failed to create static file provider");
sf_rw.initialize_index().expect("Failed to initialize index");
// Case 1: Truncate to block 20
{
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
writer.prune_storage_changesets(20).unwrap();
writer.commit().unwrap();
validate_truncation(&sf_rw, &static_dir, Some(20), initial_file_count)
.expect("Truncation validation failed");
}
// Case 2: Truncate to block 9
{
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
writer.prune_storage_changesets(9).unwrap();
writer.commit().unwrap();
validate_truncation(&sf_rw, &static_dir, Some(9), files_per_range)
.expect("Truncation validation failed");
}
// Case 3: Truncate all (should keep block 0)
{
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
writer.prune_storage_changesets(0).unwrap();
writer.commit().unwrap();
validate_truncation(&sf_rw, &static_dir, Some(0), files_per_range)
.expect("Truncation validation failed");
}
}
#[test]
fn test_storage_changeset_binary_search() {
let (static_dir, _) = create_test_static_files_dir();
let sf_rw = StaticFileProvider::<EthPrimitives>::read_write(&static_dir)
.expect("Failed to create static file provider");
let block_num = 0u64;
let num_slots = 100;
let address = Address::from([4u8; 20]);
let mut keys: Vec<B256> = Vec::with_capacity(num_slots);
for i in 0..num_slots {
keys.push(B256::with_last_byte(i as u8));
}
{
let mut writer = sf_rw.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
let changeset = keys
.iter()
.enumerate()
.map(|(i, key)| StorageBeforeTx { address, key: *key, value: U256::from(i as u64) })
.collect::<Vec<_>>();
writer.append_storage_changeset(changeset, block_num).unwrap();
writer.commit().unwrap();
}
{
let result = sf_rw.get_storage_before_block(block_num, address, keys[0]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, keys[0]);
assert_eq!(entry.value, U256::from(0));
let result =
sf_rw.get_storage_before_block(block_num, address, keys[num_slots - 1]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, keys[num_slots - 1]);
let mid = num_slots / 2;
let result = sf_rw.get_storage_before_block(block_num, address, keys[mid]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, keys[mid]);
let missing_key = B256::with_last_byte(255);
let result = sf_rw.get_storage_before_block(block_num, address, missing_key).unwrap();
assert!(result.is_none());
for i in (0..num_slots).step_by(10) {
let result = sf_rw.get_storage_before_block(block_num, address, keys[i]).unwrap();
assert!(result.is_some());
assert_eq!(result.unwrap().key, keys[i]);
}
}
}
}

View File

@@ -6,7 +6,7 @@ use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256};
use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock};
use reth_codecs::Compact;
use reth_db::models::AccountBeforeTx;
use reth_db::models::{AccountBeforeTx, StorageBeforeTx};
use reth_db_api::models::CompactU256;
use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter};
use reth_node_types::NodePrimitives;
@@ -56,6 +56,11 @@ enum PruneStrategy {
/// The target block number to prune to.
last_block: BlockNumber,
},
/// Prune storage changesets to a target block number.
StorageChangeSets {
/// The target block number to prune to.
last_block: BlockNumber,
},
}
/// Static file writers for every known [`StaticFileSegment`].
@@ -69,6 +74,7 @@ pub(crate) struct StaticFileWriters<N> {
receipts: RwLock<Option<StaticFileProviderRW<N>>>,
transaction_senders: RwLock<Option<StaticFileProviderRW<N>>>,
account_change_sets: RwLock<Option<StaticFileProviderRW<N>>>,
storage_change_sets: RwLock<Option<StaticFileProviderRW<N>>>,
}
impl<N> Default for StaticFileWriters<N> {
@@ -79,6 +85,7 @@ impl<N> Default for StaticFileWriters<N> {
receipts: Default::default(),
transaction_senders: Default::default(),
account_change_sets: Default::default(),
storage_change_sets: Default::default(),
}
}
}
@@ -95,6 +102,7 @@ impl<N: NodePrimitives> StaticFileWriters<N> {
StaticFileSegment::Receipts => self.receipts.write(),
StaticFileSegment::TransactionSenders => self.transaction_senders.write(),
StaticFileSegment::AccountChangeSets => self.account_change_sets.write(),
StaticFileSegment::StorageChangeSets => self.storage_change_sets.write(),
};
if write_guard.is_none() {
@@ -113,6 +121,7 @@ impl<N: NodePrimitives> StaticFileWriters<N> {
&self.receipts,
&self.transaction_senders,
&self.account_change_sets,
&self.storage_change_sets,
] {
let mut writer = writer_lock.write();
if let Some(writer) = writer.as_mut() {
@@ -131,6 +140,7 @@ impl<N: NodePrimitives> StaticFileWriters<N> {
&self.receipts,
&self.transaction_senders,
&self.account_change_sets,
&self.storage_change_sets,
] {
let writer = writer_lock.read();
if let Some(writer) = writer.as_ref() &&
@@ -155,6 +165,7 @@ impl<N: NodePrimitives> StaticFileWriters<N> {
&self.receipts,
&self.transaction_senders,
&self.account_change_sets,
&self.storage_change_sets,
] {
let mut writer = writer_lock.write();
if let Some(writer) = writer.as_mut() {
@@ -388,6 +399,9 @@ impl<N: NodePrimitives> StaticFileProviderRW<N> {
PruneStrategy::AccountChangeSets { last_block } => {
self.prune_account_changeset_data(last_block)?
}
PruneStrategy::StorageChangeSets { last_block } => {
self.prune_storage_changeset_data(last_block)?
}
}
}
@@ -596,7 +610,7 @@ impl<N: NodePrimitives> StaticFileProviderRW<N> {
/// Commits to the configuration file at the end
fn truncate_changesets(&mut self, last_block: u64) -> ProviderResult<()> {
let segment = self.writer.user_header().segment();
debug_assert_eq!(segment, StaticFileSegment::AccountChangeSets);
debug_assert!(segment.is_change_based());
// Get the current block range
let current_block_end = self
@@ -1076,6 +1090,41 @@ impl<N: NodePrimitives> StaticFileProviderRW<N> {
Ok(())
}
/// Appends a block storage changeset to the static file.
///
/// It **CALLS** `increment_block()`.
pub fn append_storage_changeset(
&mut self,
mut changeset: Vec<StorageBeforeTx>,
block_number: u64,
) -> ProviderResult<()> {
debug_assert!(self.writer.user_header().segment() == StaticFileSegment::StorageChangeSets);
let start = Instant::now();
self.increment_block(block_number)?;
self.ensure_no_queued_prune()?;
// sort by address + storage key
changeset.sort_by_key(|change| (change.address, change.key));
let mut count: u64 = 0;
for change in changeset {
self.append_change(&change)?;
count += 1;
}
if let Some(metrics) = &self.metrics {
metrics.record_segment_operations(
StaticFileSegment::StorageChangeSets,
StaticFileProviderOperation::Append,
count,
Some(start.elapsed()),
);
}
Ok(())
}
/// Adds an instruction to prune `to_delete` transactions during commit.
///
/// Note: `last_block` refers to the block the unwinds ends at.
@@ -1127,6 +1176,12 @@ impl<N: NodePrimitives> StaticFileProviderRW<N> {
self.queue_prune(PruneStrategy::AccountChangeSets { last_block })
}
/// Adds an instruction to prune storage changesets until the given block.
pub fn prune_storage_changesets(&mut self, last_block: u64) -> ProviderResult<()> {
debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::StorageChangeSets);
self.queue_prune(PruneStrategy::StorageChangeSets { last_block })
}
/// Adds an instruction to prune elements during commit using the specified strategy.
fn queue_prune(&mut self, strategy: PruneStrategy) -> ProviderResult<()> {
self.ensure_no_queued_prune()?;
@@ -1186,6 +1241,25 @@ impl<N: NodePrimitives> StaticFileProviderRW<N> {
Ok(())
}
/// Prunes the last storage changesets from the data file.
fn prune_storage_changeset_data(&mut self, last_block: BlockNumber) -> ProviderResult<()> {
let start = Instant::now();
debug_assert!(self.writer.user_header().segment() == StaticFileSegment::StorageChangeSets);
self.truncate_changesets(last_block)?;
if let Some(metrics) = &self.metrics {
metrics.record_segment_operation(
StaticFileSegment::StorageChangeSets,
StaticFileProviderOperation::Prune,
Some(start.elapsed()),
);
}
Ok(())
}
/// Prunes the last `to_delete` receipts from the data file.
fn prune_receipt_data(
&mut self,

View File

@@ -27,14 +27,14 @@ use reth_ethereum_primitives::EthPrimitives;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{
Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader,
SignerRecoverable,
SignerRecoverable, StorageEntry,
};
use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_storage_api::{
BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory,
HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider,
StorageRootProvider,
StorageChangeSetReader, StorageRootProvider,
};
use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult};
use reth_trie::{
@@ -989,6 +989,37 @@ impl<T: NodePrimitives, ChainSpec: Send + Sync> ChangeSetReader for MockEthProvi
}
}
impl<T: NodePrimitives, ChainSpec: Send + Sync> StorageChangeSetReader
for MockEthProvider<T, ChainSpec>
{
fn storage_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
Ok(Vec::default())
}
fn get_storage_before_block(
&self,
_block_number: BlockNumber,
_address: Address,
_storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
Ok(None)
}
fn storage_changesets_range(
&self,
_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
Ok(Vec::default())
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
Ok(0)
}
}
impl<T: NodePrimitives, ChainSpec: Send + Sync> StateReader for MockEthProvider<T, ChainSpec> {
type Receipt = T::Receipt;

View File

@@ -10,14 +10,18 @@ use reth_chain_state::{
CanonStateSubscriptions, ForkChoiceSubscriptions, PersistedBlockSubscriptions,
};
use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy};
use reth_storage_api::NodePrimitivesProvider;
use reth_storage_api::{NodePrimitivesProvider, StorageChangeSetReader};
use std::fmt::Debug;
/// Helper trait to unify all provider traits for simplicity.
pub trait FullProvider<N: NodeTypesWithDB>:
DatabaseProviderFactory<
DB = N::DB,
Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader,
Provider: BlockReader
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader,
> + NodePrimitivesProvider<Primitives = N::Primitives>
+ StaticFileProviderFactory<Primitives = N::Primitives>
+ RocksDBProviderFactory
@@ -32,6 +36,7 @@ pub trait FullProvider<N: NodeTypesWithDB>:
+ HashedPostStateProvider
+ ChainSpecProvider<ChainSpec = N::ChainSpec>
+ ChangeSetReader
+ StorageChangeSetReader
+ CanonStateSubscriptions
+ ForkChoiceSubscriptions<Header = HeaderTy<N>>
+ PersistedBlockSubscriptions
@@ -46,7 +51,11 @@ pub trait FullProvider<N: NodeTypesWithDB>:
impl<T, N: NodeTypesWithDB> FullProvider<N> for T where
T: DatabaseProviderFactory<
DB = N::DB,
Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader + ChangeSetReader,
Provider: BlockReader
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader,
> + NodePrimitivesProvider<Primitives = N::Primitives>
+ StaticFileProviderFactory<Primitives = N::Primitives>
+ RocksDBProviderFactory
@@ -61,6 +70,7 @@ impl<T, N: NodeTypesWithDB> FullProvider<N> for T where
+ HashedPostStateProvider
+ ChainSpecProvider<ChainSpec = N::ChainSpec>
+ ChangeSetReader
+ StorageChangeSetReader
+ CanonStateSubscriptions
+ ForkChoiceSubscriptions<Header = HeaderTy<N>>
+ PersistedBlockSubscriptions

View File

@@ -10,7 +10,7 @@ use crate::{
};
#[cfg(feature = "db-api")]
use crate::{DBProvider, DatabaseProviderFactory};
use crate::{DBProvider, DatabaseProviderFactory, StorageChangeSetReader};
use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
use alloy_consensus::transaction::TransactionMeta;
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
@@ -28,7 +28,9 @@ use reth_db_api::mock::{DatabaseMock, TxMock};
use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices};
use reth_ethereum_primitives::EthPrimitives;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader};
use reth_primitives_traits::{
Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader, StorageEntry,
};
#[cfg(feature = "db-api")]
use reth_prune_types::PruneModes;
use reth_prune_types::{PruneCheckpoint, PruneSegment};
@@ -408,6 +410,36 @@ impl<C: Send + Sync, N: NodePrimitives> ChangeSetReader for NoopProvider<C, N> {
}
}
#[cfg(feature = "db-api")]
impl<C: Send + Sync, N: NodePrimitives> StorageChangeSetReader for NoopProvider<C, N> {
fn storage_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
Ok(Vec::default())
}
fn get_storage_before_block(
&self,
_block_number: BlockNumber,
_address: Address,
_storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
Ok(None)
}
fn storage_changesets_range(
&self,
_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
Ok(Vec::default())
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
Ok(0)
}
}
impl<C: Send + Sync, N: NodePrimitives> StateRootProvider for NoopProvider<C, N> {
fn state_root(&self, _state: HashedPostState) -> ProviderResult<B256> {
Ok(B256::default())

View File

@@ -4,6 +4,7 @@ use alloc::{
};
use alloy_primitives::{Address, BlockNumber, B256};
use core::ops::RangeInclusive;
use reth_db_models::StorageBeforeTx;
use reth_primitives_traits::StorageEntry;
use reth_storage_errors::provider::ProviderResult;
@@ -41,4 +42,44 @@ pub trait StorageChangeSetReader: Send {
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
/// Search the block's changesets for the given address and storage key, and return the result.
///
/// Returns `None` if the storage slot was not changed in this block.
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>>;
/// Get all storage changesets in a range of blocks.
///
/// NOTE: Get inclusive range of blocks.
fn storage_changesets_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
/// Get the total count of all storage changes.
fn storage_changeset_count(&self) -> ProviderResult<usize>;
/// Get storage changesets for a block as static-file rows.
///
/// Default implementation uses `storage_changeset` and maps to `StorageBeforeTx`.
fn storage_block_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<StorageBeforeTx>> {
self.storage_changeset(block_number).map(|changesets| {
changesets
.into_iter()
.map(|(block_address, entry)| StorageBeforeTx {
address: block_address.address(),
key: entry.key,
value: entry.value,
})
.collect()
})
}
}

View File

@@ -10,7 +10,9 @@
use crate::{DatabaseHashedPostState, DatabaseStateRoot, DatabaseTrieCursorFactory};
use alloy_primitives::{map::B256Map, BlockNumber, B256};
use parking_lot::RwLock;
use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader};
use reth_storage_api::{
BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader,
};
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use reth_trie::{
changesets::compute_trie_changesets,
@@ -65,7 +67,11 @@ pub fn compute_block_trie_changesets<Provider>(
block_number: BlockNumber,
) -> Result<TrieUpdatesSorted, ProviderError>
where
Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader,
Provider: DBProvider
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
{
debug!(
target: "trie::changeset_cache",
@@ -175,7 +181,11 @@ pub fn compute_block_trie_updates<Provider>(
block_number: BlockNumber,
) -> ProviderResult<TrieUpdatesSorted>
where
Provider: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader,
Provider: DBProvider
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
{
let tx = provider.tx_ref();
@@ -323,7 +333,11 @@ impl ChangesetCache {
provider: &P,
) -> ProviderResult<Arc<TrieUpdatesSorted>>
where
P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader,
P: DBProvider
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
{
// Try cache first (with read lock)
{
@@ -408,7 +422,11 @@ impl ChangesetCache {
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<TrieUpdatesSorted>
where
P: DBProvider + StageCheckpointReader + ChangeSetReader + BlockNumReader,
P: DBProvider
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
{
// Get the database tip block number
let db_tip_block = provider

View File

@@ -18,7 +18,9 @@ pub use hashed_cursor::{
pub use prefix_set::{load_prefix_sets_with_provider, PrefixSetLoader};
pub use proof::{DatabaseProof, DatabaseStorageProof};
pub use state::{DatabaseHashedPostState, DatabaseStateRoot};
pub use storage::{DatabaseHashedStorage, DatabaseStorageRoot};
pub use storage::{
hashed_storage_from_reverts_with_provider, DatabaseHashedStorage, DatabaseStorageRoot,
};
pub use trie_cursor::{
DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory,
};

View File

@@ -14,7 +14,7 @@ use reth_db_api::{
DatabaseError,
};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::{ChangeSetReader, DBProvider};
use reth_storage_api::{ChangeSetReader, DBProvider, StorageChangeSetReader};
use reth_storage_errors::provider::ProviderError;
use reth_trie::{
prefix_set::{PrefixSetMut, TriePrefixSets},
@@ -93,7 +93,7 @@ pub fn load_prefix_sets_with_provider<Provider, KH>(
range: RangeInclusive<BlockNumber>,
) -> Result<TriePrefixSets, ProviderError>
where
Provider: ChangeSetReader + DBProvider,
Provider: ChangeSetReader + StorageChangeSetReader + DBProvider,
KH: KeyHasher,
{
let tx = provider.tx_ref();
@@ -118,12 +118,9 @@ where
}
}
// Walk storage changeset and insert storage prefixes
// Note: Storage changesets don't have static files yet, so we still use direct cursor
let mut storage_cursor = tx.cursor_dup_read::<tables::StorageChangeSets>()?;
let storage_range = BlockNumberAddress::range(range);
for storage_entry in storage_cursor.walk_range(storage_range)? {
let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?;
// Walk storage changesets using the provider (handles static files + database)
let storage_changesets = provider.storage_changesets_range(range)?;
for (BlockNumberAddress((_, address)), StorageEntry { key, .. }) in storage_changesets {
let hashed_address = KH::hash_key(address);
account_prefix_set.insert(Nibbles::unpack(hashed_address));
storage_prefix_sets

View File

@@ -3,13 +3,11 @@ use crate::{
};
use alloy_primitives::{map::B256Map, BlockNumber, B256};
use reth_db_api::{
cursor::DbCursorRO,
models::{AccountBeforeTx, BlockNumberAddress, BlockNumberAddressRange},
tables,
models::{AccountBeforeTx, BlockNumberAddress},
transaction::DbTx,
};
use reth_execution_errors::StateRootError;
use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider};
use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StorageChangeSetReader};
use reth_storage_errors::provider::ProviderError;
use reth_trie::{
hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory,
@@ -34,7 +32,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// An instance of state root calculator with account and storage prefixes loaded.
fn incremental_root_calculator(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<Self, StateRootError>;
@@ -45,7 +43,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// The updated state root.
fn incremental_root(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<B256, StateRootError>;
@@ -58,7 +56,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// The updated state root and the trie updates.
fn incremental_root_with_updates(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<(B256, TrieUpdates), StateRootError>;
@@ -69,7 +67,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// The intermediate progress of state root computation.
fn incremental_root_with_progress(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<StateRootProgress, StateRootError>;
@@ -133,7 +131,7 @@ pub trait DatabaseHashedPostState: Sized {
/// Initializes [`HashedPostStateSorted`] from reverts. Iterates over state reverts in the
/// specified range and aggregates them into sorted hashed state.
fn from_reverts<KH: KeyHasher>(
provider: &(impl ChangeSetReader + BlockNumReader + DBProvider),
provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider),
range: impl RangeBounds<BlockNumber>,
) -> Result<HashedPostStateSorted, ProviderError>;
}
@@ -146,7 +144,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root_calculator(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<Self, StateRootError> {
let loaded_prefix_sets =
@@ -155,7 +153,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<B256, StateRootError> {
debug!(target: "trie::loader", ?range, "incremental state root");
@@ -163,7 +161,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root_with_updates(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<(B256, TrieUpdates), StateRootError> {
debug!(target: "trie::loader", ?range, "incremental state root");
@@ -171,7 +169,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root_with_progress(
provider: &'a (impl ChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<StateRootProgress, StateRootError> {
debug!(target: "trie::loader", ?range, "incremental state root with progress");
@@ -248,11 +246,9 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
/// - Hashes keys and returns them already ordered for trie iteration.
#[instrument(target = "trie::db", skip(provider), fields(range))]
fn from_reverts<KH: KeyHasher>(
provider: &(impl ChangeSetReader + BlockNumReader + DBProvider),
provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider),
range: impl RangeBounds<BlockNumber>,
) -> Result<Self, ProviderError> {
let tx = provider.tx_ref();
// Extract concrete start/end values to use for both account and storage changesets.
let start = match range.start_bound() {
Bound::Included(&n) => n,
@@ -266,9 +262,6 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
Bound::Unbounded => BlockNumber::MAX,
};
// Convert to BlockNumberAddressRange for storage changesets.
let storage_range: BlockNumberAddressRange = (start..end).into();
// Iterate over account changesets and record value before first occurring account change
let mut accounts = Vec::new();
let mut seen_accounts = HashSet::new();
@@ -280,20 +273,23 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
}
accounts.sort_unstable_by_key(|(hash, _)| *hash);
// Read storages directly into B256Map<Vec<_>> with HashSet to track seen keys.
// Read storages into B256Map<Vec<_>> with HashSet to track seen keys.
// Only keep the first (oldest) occurrence of each (address, slot) pair.
let mut storages = B256Map::<Vec<_>>::default();
let mut seen_storage_keys = HashSet::new();
let mut storage_changesets_cursor = tx.cursor_read::<tables::StorageChangeSets>()?;
for entry in storage_changesets_cursor.walk_range(storage_range)? {
let (BlockNumberAddress((_, address)), storage) = entry?;
if seen_storage_keys.insert((address, storage.key)) {
let hashed_address = KH::hash_key(address);
storages
.entry(hashed_address)
.or_default()
.push((KH::hash_key(storage.key), storage.value));
if start < end {
let end_inclusive = end.saturating_sub(1);
for (BlockNumberAddress((_, address)), storage) in
provider.storage_changesets_range(start..=end_inclusive)?
{
if seen_storage_keys.insert((address, storage.key)) {
let hashed_address = KH::hash_key(address);
storages
.entry(hashed_address)
.or_default()
.push((KH::hash_key(storage.key), storage.value));
}
}
}

View File

@@ -4,6 +4,8 @@ use reth_db_api::{
cursor::DbCursorRO, models::BlockNumberAddress, tables, transaction::DbTx, DatabaseError,
};
use reth_execution_errors::StorageRootError;
use reth_storage_api::{BlockNumReader, StorageChangeSetReader};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{
hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StorageRoot,
};
@@ -34,6 +36,36 @@ pub trait DatabaseHashedStorage<TX>: Sized {
fn from_reverts(tx: &TX, address: Address, from: BlockNumber) -> Result<Self, DatabaseError>;
}
/// Initializes [`HashedStorage`] from reverts using a provider.
pub fn hashed_storage_from_reverts_with_provider<P>(
provider: &P,
address: Address,
from: BlockNumber,
) -> ProviderResult<HashedStorage>
where
P: StorageChangeSetReader + BlockNumReader,
{
let mut storage = HashedStorage::new(false);
let tip = provider.last_block_number()?;
if from > tip {
return Ok(storage)
}
for (BlockNumberAddress((_, storage_address)), storage_change) in
provider.storage_changesets_range(from..=tip)?
{
if storage_address == address {
let hashed_slot = keccak256(storage_change.key);
if let hash_map::Entry::Vacant(entry) = storage.storage.entry(hashed_slot) {
entry.insert(storage_change.value);
}
}
}
Ok(storage)
}
impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX>
for StorageRoot<DatabaseTrieCursorFactory<&'a TX>, DatabaseHashedCursorFactory<&'a TX>>
{

View File

@@ -35,6 +35,7 @@
- [`reth db settings set storages_history`](./reth/db/settings/set/storages_history.mdx)
- [`reth db settings set transaction_hash_numbers`](./reth/db/settings/set/transaction_hash_numbers.mdx)
- [`reth db settings set account_history`](./reth/db/settings/set/account_history.mdx)
- [`reth db settings set storage_changesets`](./reth/db/settings/set/storage_changesets.mdx)
- [`reth db account-storage`](./reth/db/account-storage.mdx)
- [`reth download`](./reth/download.mdx)
- [`reth stage`](./reth/stage.mdx)
@@ -93,6 +94,7 @@
- [`op-reth db settings set storages_history`](./op-reth/db/settings/set/storages_history.mdx)
- [`op-reth db settings set transaction_hash_numbers`](./op-reth/db/settings/set/transaction_hash_numbers.mdx)
- [`op-reth db settings set account_history`](./op-reth/db/settings/set/account_history.mdx)
- [`op-reth db settings set storage_changesets`](./op-reth/db/settings/set/storage_changesets.mdx)
- [`op-reth db account-storage`](./op-reth/db/account-storage.mdx)
- [`op-reth stage`](./op-reth/stage.mdx)
- [`op-reth stage run`](./op-reth/stage/run.mdx)

View File

@@ -124,6 +124,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -154,6 +157,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -18,6 +18,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
Options:
--start-block <START_BLOCK>

View File

@@ -16,6 +16,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
Options:
-h, --help

View File

@@ -16,6 +16,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
<KEY>
The key to get content for

View File

@@ -15,6 +15,7 @@ Commands:
storages_history Store storage history in rocksdb instead of MDBX
transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX
account_history Store account history in rocksdb instead of MDBX
storage_changesets Store storage changesets in static files instead of the database
help Print this message or the help of the given subcommand(s)
Options:

View File

@@ -0,0 +1,170 @@
# op-reth db settings set storage_changesets
Store storage changesets in static files instead of the database
```bash
$ op-reth db settings set storage_changesets --help
```
```txt
Usage: op-reth db settings set storage_changesets [OPTIONS] <VALUE>
Arguments:
<VALUE>
[possible values: true, false]
Options:
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev
[default: optimism]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
[default: 5]
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -18,6 +18,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
<BLOCK>
Block number to query

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -1036,6 +1036,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -1066,6 +1069,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
Rollup:
--rollup.sequencer <SEQUENCER>
Endpoint for the sequencer mempool (can be both HTTP and WS)

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -115,6 +115,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -145,6 +148,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -113,6 +113,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -143,6 +146,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -124,6 +124,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -154,6 +157,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -18,6 +18,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
Options:
--start-block <START_BLOCK>

View File

@@ -16,6 +16,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
Options:
-h, --help

View File

@@ -16,6 +16,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
<KEY>
The key to get content for

View File

@@ -15,6 +15,7 @@ Commands:
storages_history Store storage history in rocksdb instead of MDBX
transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX
account_history Store account history in rocksdb instead of MDBX
storage_changesets Store storage changesets in static files instead of the database
help Print this message or the help of the given subcommand(s)
Options:

View File

@@ -0,0 +1,170 @@
# reth db settings set storage_changesets
Store storage changesets in static files instead of the database
```bash
$ reth db settings set storage_changesets --help
```
```txt
Usage: reth db settings set storage_changesets [OPTIONS] <VALUE>
Arguments:
<VALUE>
[possible values: true, false]
Options:
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
mainnet, sepolia, holesky, hoodi, dev
[default: mainnet]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
[default: 5]
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -18,6 +18,7 @@ Arguments:
- receipts: Static File segment responsible for the `Receipts` table
- transaction-senders: Static File segment responsible for the `TransactionSenders` table
- account-change-sets: Static File segment responsible for the `AccountChangeSets` table
- storage-change-sets: Static File segment responsible for the `StorageChangeSets` table
<BLOCK>
Block number to query

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -1036,6 +1036,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -1066,6 +1069,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
Ress:
--ress.enable
Enable support for `ress` subprotocol

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -115,6 +115,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -145,6 +148,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -108,6 +108,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -138,6 +141,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -113,6 +113,9 @@ Static Files:
--static-files.blocks-per-file.account-change-sets <BLOCKS_PER_FILE_ACCOUNT_CHANGE_SETS>
Number of blocks per file for the account changesets segment
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
@@ -143,6 +146,16 @@ Static Files:
[default: false]
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
[default: false]
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.

View File

@@ -159,6 +159,10 @@ export const opRethCliSidebar: SidebarItem = {
{
text: "op-reth db settings set account_history",
link: "/cli/op-reth/db/settings/set/account_history"
},
{
text: "op-reth db settings set storage_changesets",
link: "/cli/op-reth/db/settings/set/storage_changesets"
}
]
}

View File

@@ -163,6 +163,10 @@ export const rethCliSidebar: SidebarItem = {
{
text: "reth db settings set account_history",
link: "/cli/reth/db/settings/set/account_history"
},
{
text: "reth db settings set storage_changesets",
link: "/cli/reth/db/settings/set/storage_changesets"
}
]
}