chore(storage): rm storage settings, use only one (#22042)

Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com>
This commit is contained in:
Dan Cline
2026-02-12 21:17:05 +00:00
committed by GitHub
parent e3d520b24f
commit e3fe6326bc
55 changed files with 396 additions and 2291 deletions

View File

@@ -19,7 +19,7 @@ use reth_node_builder::{
Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter,
};
use reth_node_core::{
args::{DatabaseArgs, DatadirArgs, RocksDbArgs, StaticFilesArgs, StorageArgs},
args::{DatabaseArgs, DatadirArgs, StaticFilesArgs, StorageArgs},
dirs::{ChainPath, DataDirPath},
};
use reth_provider::{
@@ -67,62 +67,23 @@ pub struct EnvironmentArgs<C: ChainSpecParser> {
#[command(flatten)]
pub static_files: StaticFilesArgs,
/// All `RocksDB` related arguments
#[command(flatten)]
pub rocksdb: RocksDbArgs,
/// Storage mode configuration (v2 vs v1/legacy)
#[command(flatten)]
pub storage: StorageArgs,
}
impl<C: ChainSpecParser> EnvironmentArgs<C> {
/// Returns the effective storage settings derived from `--storage.v2`, static-file, and
/// `RocksDB` CLI args.
/// Returns the effective storage settings derived from `--storage.v2`.
///
/// The base storage mode is determined by `--storage.v2`:
/// - When `--storage.v2` is set: uses [`StorageSettings::v2()`] defaults
/// - Otherwise: uses [`StorageSettings::v1()`] defaults
///
/// Individual `--static-files.*` and `--rocksdb.*` flags override the base when explicitly set.
/// - Otherwise: uses [`StorageSettings::base()`] defaults
pub fn storage_settings(&self) -> StorageSettings {
let mut s = if self.storage.v2 { StorageSettings::v2() } else { StorageSettings::base() };
// Apply static files overrides (only when explicitly set)
if let Some(v) = self.static_files.receipts {
s = s.with_receipts_in_static_files(v);
if self.storage.v2 {
StorageSettings::v2()
} else {
StorageSettings::base()
}
if let Some(v) = self.static_files.transaction_senders {
s = s.with_transaction_senders_in_static_files(v);
}
if let Some(v) = self.static_files.account_changesets {
s = s.with_account_changesets_in_static_files(v);
}
if let Some(v) = self.static_files.storage_changesets {
s = s.with_storage_changesets_in_static_files(v);
}
// Apply rocksdb overrides
// --rocksdb.all sets all rocksdb flags to true
if self.rocksdb.all {
s = s
.with_transaction_hash_numbers_in_rocksdb(true)
.with_storages_history_in_rocksdb(true)
.with_account_history_in_rocksdb(true);
}
// Individual rocksdb flags override --rocksdb.all when explicitly set
if let Some(v) = self.rocksdb.tx_hash {
s = s.with_transaction_hash_numbers_in_rocksdb(v);
}
if let Some(v) = self.rocksdb.storages_history {
s = s.with_storages_history_in_rocksdb(v);
}
if let Some(v) = self.rocksdb.account_history {
s = s.with_account_history_in_rocksdb(v);
}
s
}
/// Initializes environment according to [`AccessRights`] and returns an instance of

View File

@@ -23,7 +23,7 @@ impl Command {
/// Execute `db account-storage` command
pub fn execute<N: NodeTypesWithDB>(self, tool: &DbTool<N>) -> eyre::Result<()> {
let address = self.address;
let use_hashed_state = tool.provider_factory.cached_storage_settings().use_hashed_state;
let use_hashed_state = tool.provider_factory.cached_storage_settings().use_hashed_state();
let (slot_count, storage_size) = if use_hashed_state {
let hashed_address = keccak256(address);

View File

@@ -39,50 +39,12 @@ enum Subcommands {
#[derive(Debug, Clone, Copy, Subcommand)]
#[clap(rename_all = "snake_case")]
pub enum SetCommand {
/// Store receipts in static files instead of the database
Receipts {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store transaction senders in static files instead of the database
TransactionSenders {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store account changesets in static files instead of the database
AccountChangesets {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store storage history in rocksdb instead of MDBX
StoragesHistory {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store transaction hash to number mapping in rocksdb instead of MDBX
TransactionHashNumbers {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store account history in rocksdb instead of MDBX
AccountHistory {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Store storage changesets in static files instead of the database
StorageChangesets {
#[clap(action(ArgAction::Set))]
value: bool,
},
/// Use hashed state tables (HashedAccounts/HashedStorages) as canonical state
/// Enable or disable v2 storage layout
///
/// When enabled, execution writes directly to hashed tables, eliminating need for
/// separate hashing stages. State reads come from hashed tables.
///
/// WARNING: Changing this setting in either direction requires re-syncing the database.
/// Enabling on an existing plain-state database leaves hashed tables empty.
/// Disabling on an existing hashed-state database leaves plain tables empty.
UseHashedState {
/// When enabled, uses static files for receipts/senders/changesets and RocksDB for
/// history indices and transaction hashes. When disabled, uses v1/legacy layout (everything in
/// MDBX).
V2 {
#[clap(action(ArgAction::Set))]
value: bool,
},
@@ -125,87 +87,18 @@ impl Command {
println!("No storage settings found, creating new settings.");
}
let mut settings @ StorageSettings {
receipts_in_static_files: _,
transaction_senders_in_static_files: _,
storages_history_in_rocksdb: _,
transaction_hash_numbers_in_rocksdb: _,
account_history_in_rocksdb: _,
account_changesets_in_static_files: _,
storage_changesets_in_static_files: _,
use_hashed_state: _,
} = settings.unwrap_or_else(StorageSettings::v1);
let mut settings @ StorageSettings { storage_v2: _ } =
settings.unwrap_or_else(StorageSettings::v1);
// Update the setting based on the key
match cmd {
SetCommand::Receipts { value } => {
if settings.receipts_in_static_files == value {
println!("receipts_in_static_files is already set to {}", value);
SetCommand::V2 { value } => {
if settings.storage_v2 == value {
println!("storage_v2 is already set to {}", value);
return Ok(());
}
settings.receipts_in_static_files = value;
println!("Set receipts_in_static_files = {}", value);
}
SetCommand::TransactionSenders { value } => {
if settings.transaction_senders_in_static_files == value {
println!("transaction_senders_in_static_files is already set to {}", value);
return Ok(());
}
settings.transaction_senders_in_static_files = value;
println!("Set transaction_senders_in_static_files = {}", value);
}
SetCommand::AccountChangesets { value } => {
if settings.account_changesets_in_static_files == value {
println!("account_changesets_in_static_files is already set to {}", value);
return Ok(());
}
settings.account_changesets_in_static_files = value;
println!("Set account_changesets_in_static_files = {}", value);
}
SetCommand::StoragesHistory { value } => {
if settings.storages_history_in_rocksdb == value {
println!("storages_history_in_rocksdb is already set to {}", value);
return Ok(());
}
settings.storages_history_in_rocksdb = value;
println!("Set storages_history_in_rocksdb = {}", value);
}
SetCommand::TransactionHashNumbers { value } => {
if settings.transaction_hash_numbers_in_rocksdb == value {
println!("transaction_hash_numbers_in_rocksdb is already set to {}", value);
return Ok(());
}
settings.transaction_hash_numbers_in_rocksdb = value;
println!("Set transaction_hash_numbers_in_rocksdb = {}", value);
}
SetCommand::AccountHistory { value } => {
if settings.account_history_in_rocksdb == value {
println!("account_history_in_rocksdb is already set to {}", value);
return Ok(());
}
settings.account_history_in_rocksdb = value;
println!("Set account_history_in_rocksdb = {}", value);
}
SetCommand::StorageChangesets { value } => {
if settings.storage_changesets_in_static_files == value {
println!("storage_changesets_in_static_files is already set to {}", value);
return Ok(());
}
settings.storage_changesets_in_static_files = value;
println!("Set storage_changesets_in_static_files = {}", value);
}
SetCommand::UseHashedState { value } => {
if settings.use_hashed_state == value {
println!("use_hashed_state is already set to {}", value);
return Ok(());
}
if settings.use_hashed_state && !value {
println!("WARNING: Disabling use_hashed_state on an existing hashed-state database requires a full resync.");
} else {
println!("WARNING: Enabling use_hashed_state on an existing plain-state database requires a full resync.");
}
settings.use_hashed_state = value;
println!("Set use_hashed_state = {}", value);
settings.storage_v2 = value;
println!("Set storage_v2 = {}", value);
}
}

View File

@@ -63,7 +63,7 @@ impl Command {
address: Address,
limit: usize,
) -> eyre::Result<()> {
let use_hashed_state = tool.provider_factory.cached_storage_settings().use_hashed_state;
let use_hashed_state = tool.provider_factory.cached_storage_settings().use_hashed_state();
let entries = tool.provider_factory.db_ref().view(|tx| {
let (account, walker_entries) = if use_hashed_state {
@@ -145,7 +145,7 @@ impl Command {
// Check storage settings to determine where history is stored
let storage_settings = tool.provider_factory.cached_storage_settings();
let history_in_rocksdb = storage_settings.storages_history_in_rocksdb;
let history_in_rocksdb = storage_settings.storage_v2;
// For historical queries, enumerate keys from history indices only
// (not PlainStorageState, which reflects current state)

View File

@@ -10,8 +10,8 @@ use reth_node_builder::NodeBuilder;
use reth_node_core::{
args::{
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs,
NetworkArgs, PayloadBuilderArgs, PruningArgs, RocksDbArgs, RpcServerArgs, StaticFilesArgs,
StorageArgs, TxPoolArgs,
NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, StaticFilesArgs, StorageArgs,
TxPoolArgs,
},
node_config::NodeConfig,
version,
@@ -103,10 +103,6 @@ pub struct NodeCommand<C: ChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs
#[command(flatten)]
pub pruning: PruningArgs,
/// All `RocksDB` table routing arguments
#[command(flatten)]
pub rocksdb: RocksDbArgs,
/// Engine cli arguments
#[command(flatten, next_help_heading = "Engine")]
pub engine: EngineArgs,
@@ -175,7 +171,6 @@ where
db,
dev,
pruning,
rocksdb,
engine,
era,
static_files,
@@ -183,9 +178,6 @@ where
ext,
} = self;
// Validate RocksDB arguments
rocksdb.validate()?;
// set up node config
let mut node_config = NodeConfig {
datadir,
@@ -201,7 +193,6 @@ where
db,
dev,
pruning,
rocksdb,
engine,
era,
static_files,

View File

@@ -133,7 +133,7 @@ impl<C: ChainSpecParser> Command<C> {
reset_stage_checkpoint(tx, StageId::SenderRecovery)?;
}
StageEnum::Execution => {
if provider_rw.cached_storage_settings().use_hashed_state {
if provider_rw.cached_storage_settings().use_hashed_state() {
tx.clear::<tables::HashedAccounts>()?;
tx.clear::<tables::HashedStorages>()?;
reset_stage_checkpoint(tx, StageId::AccountHashing)?;
@@ -187,7 +187,7 @@ impl<C: ChainSpecParser> Command<C> {
let settings = provider_rw.cached_storage_settings();
let rocksdb = tool.provider_factory.rocksdb_provider();
if settings.account_history_in_rocksdb {
if settings.storage_v2 {
rocksdb.clear::<tables::AccountsHistory>()?;
} else {
tx.clear::<tables::AccountsHistory>()?;
@@ -204,7 +204,7 @@ impl<C: ChainSpecParser> Command<C> {
let settings = provider_rw.cached_storage_settings();
let rocksdb = tool.provider_factory.rocksdb_provider();
if settings.storages_history_in_rocksdb {
if settings.storage_v2 {
rocksdb.clear::<tables::StoragesHistory>()?;
} else {
tx.clear::<tables::StoragesHistory>()?;
@@ -218,7 +218,7 @@ impl<C: ChainSpecParser> Command<C> {
)?;
}
StageEnum::TxLookup => {
if provider_rw.cached_storage_settings().transaction_hash_numbers_in_rocksdb {
if provider_rw.cached_storage_settings().storage_v2 {
tool.provider_factory
.rocksdb_provider()
.clear::<tables::TransactionHashNumbers>()?;

View File

@@ -10,7 +10,6 @@ use jsonrpsee::core::client::ClientT;
use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET};
use reth_db::tables;
use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet, E2ETestSetupBuilder};
use reth_node_core::args::RocksDbArgs;
use reth_node_ethereum::EthereumNode;
use reth_payload_builder::EthPayloadBuilderAttributes;
use reth_provider::RocksDBProviderFactory;
@@ -96,22 +95,6 @@ fn test_attributes_generator(timestamp: u64) -> EthPayloadBuilderAttributes {
EthPayloadBuilderAttributes::new(B256::ZERO, attributes)
}
/// Verifies that `RocksDB` CLI defaults are `None` (deferred to storage mode).
#[test]
fn test_rocksdb_defaults_are_none() {
let args = RocksDbArgs::default();
assert!(args.tx_hash.is_none(), "tx_hash default should be None (deferred to --storage.v2)");
assert!(
args.storages_history.is_none(),
"storages_history default should be None (deferred to --storage.v2)"
);
assert!(
args.account_history.is_none(),
"account_history default should be None (deferred to --storage.v2)"
);
}
/// Smoke test: node boots with `RocksDB` routing enabled.
#[tokio::test]
async fn test_rocksdb_node_startup() -> Result<()> {
@@ -477,7 +460,7 @@ async fn test_rocksdb_pending_tx_not_in_storage() -> Result<()> {
///
/// This test exercises `unwind_trie_state_from` which previously failed with
/// `UnsortedInput` errors because it read changesets directly from MDBX tables
/// instead of using storage-aware methods that check `storage_changesets_in_static_files`.
/// instead of using storage-aware methods that check `is_v2()`.
#[tokio::test]
async fn test_rocksdb_reorg_unwind() -> Result<()> {
reth_tracing::init_test_tracing();

View File

@@ -94,7 +94,7 @@ where
if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum };
let downloader = BasicBlockDownloader::new(client, consensus.clone());
let use_hashed_state = provider.cached_storage_settings().use_hashed_state;
let use_hashed_state = provider.cached_storage_settings().use_hashed_state();
let persistence_handle =
PersistenceHandle::<N::Primitives>::spawn_service(provider, pruner, sync_metrics_tx);

View File

@@ -221,7 +221,7 @@ impl TestHarness {
EngineApiKind::Ethereum,
evm_config,
changeset_cache,
provider.cached_storage_settings().use_hashed_state,
provider.cached_storage_settings().use_hashed_state(),
);
let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone());

View File

@@ -76,10 +76,6 @@ pub use era::{DefaultEraHost, EraArgs, EraSourceArgs};
mod static_files;
pub use static_files::{StaticFilesArgs, MINIMAL_BLOCKS_PER_FILE};
/// `RocksDbArgs` for configuring RocksDB table routing.
mod rocksdb;
pub use rocksdb::{RocksDbArgs, RocksDbArgsError};
/// `StorageArgs` for configuring storage settings.
mod storage;
pub use storage::StorageArgs;

View File

@@ -1,160 +0,0 @@
//! clap [Args](clap::Args) for `RocksDB` table routing configuration
use clap::{ArgAction, Args};
/// Parameters for `RocksDB` table routing configuration.
///
/// These flags control which database tables are stored in `RocksDB` instead of MDBX.
/// All flags are genesis-initialization-only: changing them after genesis requires a re-sync.
///
/// When `--storage.v2` is used, the defaults for these flags change to enable `RocksDB` routing.
/// Individual flags can still override those defaults when explicitly set.
#[derive(Debug, Args, PartialEq, Eq, Clone, Copy, Default)]
#[command(next_help_heading = "RocksDB")]
pub struct RocksDbArgs {
/// Route all supported tables to `RocksDB` instead of MDBX.
///
/// This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables.
/// Cannot be combined with individual flags set to false.
#[arg(long = "rocksdb.all", action = ArgAction::SetTrue)]
pub all: bool,
/// Route tx hash -> number table to `RocksDB` instead of MDBX.
///
/// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync.
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "rocksdb.tx-hash", action = ArgAction::Set)]
pub tx_hash: Option<bool>,
/// Route storages history tables to `RocksDB` instead of MDBX.
///
/// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync.
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "rocksdb.storages-history", action = ArgAction::Set)]
pub storages_history: Option<bool>,
/// Route account history tables to `RocksDB` instead of MDBX.
///
/// This is a genesis-initialization-only flag: changing it after genesis requires a re-sync.
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "rocksdb.account-history", action = ArgAction::Set)]
pub account_history: Option<bool>,
}
impl RocksDbArgs {
/// Validates the `RocksDB` arguments.
///
/// Returns an error if `--rocksdb.all` is used with any individual flag explicitly set to
/// `false`.
pub const fn validate(&self) -> Result<(), RocksDbArgsError> {
if self.all {
if matches!(self.tx_hash, Some(false)) {
return Err(RocksDbArgsError::ConflictingFlags("tx-hash"));
}
if matches!(self.storages_history, Some(false)) {
return Err(RocksDbArgsError::ConflictingFlags("storages-history"));
}
if matches!(self.account_history, Some(false)) {
return Err(RocksDbArgsError::ConflictingFlags("account-history"));
}
}
Ok(())
}
}
/// Error type for `RocksDB` argument validation.
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
pub enum RocksDbArgsError {
/// `--rocksdb.all` cannot be combined with an individual flag set to false.
#[error("--rocksdb.all cannot be combined with --rocksdb.{0}=false")]
ConflictingFlags(&'static str),
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_default_rocksdb_args() {
let args = CommandParser::<RocksDbArgs>::parse_from(["reth"]).args;
assert_eq!(args, RocksDbArgs::default());
assert!(!args.all);
assert!(args.tx_hash.is_none());
assert!(args.storages_history.is_none());
assert!(args.account_history.is_none());
}
#[test]
fn test_parse_all_flag() {
let args = CommandParser::<RocksDbArgs>::parse_from(["reth", "--rocksdb.all"]).args;
assert!(args.all);
assert!(args.tx_hash.is_none());
}
#[test]
fn test_parse_individual_flags() {
let args = CommandParser::<RocksDbArgs>::parse_from([
"reth",
"--rocksdb.tx-hash=true",
"--rocksdb.storages-history=false",
"--rocksdb.account-history=true",
])
.args;
assert!(!args.all);
assert_eq!(args.tx_hash, Some(true));
assert_eq!(args.storages_history, Some(false));
assert_eq!(args.account_history, Some(true));
}
#[test]
fn test_validate_all_with_none_ok() {
let args =
RocksDbArgs { all: true, tx_hash: None, storages_history: None, account_history: None };
assert!(args.validate().is_ok());
}
#[test]
fn test_validate_all_with_true_ok() {
let args = RocksDbArgs {
all: true,
tx_hash: Some(true),
storages_history: Some(true),
account_history: Some(true),
};
assert!(args.validate().is_ok());
}
#[test]
fn test_validate_all_with_false_errors() {
let args = RocksDbArgs {
all: true,
tx_hash: Some(false),
storages_history: None,
account_history: None,
};
assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("tx-hash")));
let args = RocksDbArgs {
all: true,
tx_hash: None,
storages_history: Some(false),
account_history: None,
};
assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("storages-history")));
let args = RocksDbArgs {
all: true,
tx_hash: None,
storages_history: None,
account_history: Some(false),
};
assert_eq!(args.validate(), Err(RocksDbArgsError::ConflictingFlags("account-history")));
}
}

View File

@@ -9,9 +9,6 @@ use reth_config::config::{BlocksPerFileConfig, StaticFilesConfig};
pub const MINIMAL_BLOCKS_PER_FILE: u64 = 10000;
/// Parameters for static files configuration
///
/// When `--storage.v2` is used, the defaults for the storage flags change to enable static file
/// storage. Individual flags can still override those defaults when explicitly set.
#[derive(Debug, Args, PartialEq, Eq, Clone, Copy, Default)]
#[command(next_help_heading = "Static Files")]
pub struct StaticFilesArgs {
@@ -38,53 +35,6 @@ pub struct StaticFilesArgs {
/// Number of blocks per file for the storage changesets segment.
#[arg(long = "static-files.blocks-per-file.storage-change-sets")]
pub blocks_per_file_storage_change_sets: Option<u64>,
/// Store receipts in static files instead of the database.
///
/// When enabled, receipts will be written to static files on disk instead of the database.
///
/// Note: This setting can only be configured at genesis initialization. Once
/// the node has been initialized, changing this flag requires re-syncing from scratch.
///
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "static-files.receipts", action = clap::ArgAction::Set)]
pub receipts: Option<bool>,
/// Store transaction senders in static files instead of the database.
///
/// When enabled, transaction senders will be written to static files on disk instead of the
/// database.
///
/// Note: This setting can only be configured at genesis initialization. Once
/// the node has been initialized, changing this flag requires re-syncing from scratch.
///
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "static-files.transaction-senders", action = clap::ArgAction::Set)]
pub transaction_senders: Option<bool>,
/// Store account changesets in static files.
///
/// When enabled, account changesets will be written to static files on disk instead of the
/// database.
///
/// Note: This setting can only be configured at genesis initialization. Once
/// the node has been initialized, changing this flag requires re-syncing from scratch.
///
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "static-files.account-change-sets", action = clap::ArgAction::Set)]
pub account_changesets: Option<bool>,
/// Store storage changesets in static files.
///
/// When enabled, storage changesets will be written to static files on disk instead of the
/// database.
///
/// Note: This setting can only be configured at genesis initialization. Once
/// the node has been initialized, changing this flag requires re-syncing from scratch.
///
/// Defaults to the base storage mode (v1: false, v2: true).
#[arg(long = "static-files.storage-change-sets", action = clap::ArgAction::Set)]
pub storage_changesets: Option<bool>,
}
impl StaticFilesArgs {

View File

@@ -25,16 +25,6 @@ pub struct StorageArgs {
/// flags.
#[arg(long = "storage.v2", action = ArgAction::SetTrue)]
pub v2: bool,
/// Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state
/// representation instead of plain state tables.
///
/// When enabled, execution writes directly to hashed tables, eliminating the need for
/// separate hashing stages. This should only be enabled for new databases.
///
/// WARNING: Changing this setting on an existing database requires a full resync.
#[arg(long = "storage.use-hashed-state", default_value_t = false)]
pub use_hashed_state: bool,
}
#[cfg(test)]

View File

@@ -3,7 +3,7 @@
use crate::{
args::{
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, NetworkArgs, PayloadBuilderArgs,
PruningArgs, RocksDbArgs, RpcServerArgs, StaticFilesArgs, StorageArgs, TxPoolArgs,
PruningArgs, RpcServerArgs, StaticFilesArgs, StorageArgs, TxPoolArgs,
},
dirs::{ChainPath, DataDirPath},
utils::get_single_header,
@@ -152,9 +152,6 @@ pub struct NodeConfig<ChainSpec> {
/// All static files related arguments
pub static_files: StaticFilesArgs,
/// All `RocksDB` table routing arguments
pub rocksdb: RocksDbArgs,
/// All storage related arguments with --storage prefix
pub storage: StorageArgs,
}
@@ -188,7 +185,6 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
engine: EngineArgs::default(),
era: EraArgs::default(),
static_files: StaticFilesArgs::default(),
rocksdb: RocksDbArgs::default(),
storage: StorageArgs::default(),
}
}
@@ -264,7 +260,6 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
engine,
era,
static_files,
rocksdb,
storage,
..
} = self;
@@ -285,7 +280,6 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
engine,
era,
static_files,
rocksdb,
storage,
}
}
@@ -369,49 +363,17 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
self.pruning.prune_config(&self.chain)
}
/// Returns the effective storage settings derived from `--storage.v2`, static-file, and
/// `RocksDB` CLI args.
/// Returns the effective storage settings derived from `--storage.v2`.
///
/// The base storage mode is determined by `--storage.v2`:
/// - When `--storage.v2` is set: uses [`StorageSettings::v2()`] defaults
/// - Otherwise: uses [`StorageSettings::v1()`] defaults
///
/// Individual `--static-files.*` and `--rocksdb.*` flags override the base when explicitly set.
/// - Otherwise: uses [`StorageSettings::base()`] defaults
pub const fn storage_settings(&self) -> StorageSettings {
let mut s = if self.storage.v2 { StorageSettings::v2() } else { StorageSettings::base() };
// Apply static files overrides (only when explicitly set)
s = s
.with_receipts_in_static_files_opt(self.static_files.receipts)
.with_transaction_senders_in_static_files_opt(self.static_files.transaction_senders)
.with_account_changesets_in_static_files_opt(self.static_files.account_changesets)
.with_storage_changesets_in_static_files_opt(self.static_files.storage_changesets);
// Apply rocksdb overrides
// --rocksdb.all sets all rocksdb flags to true
if self.rocksdb.all {
s = s
.with_transaction_hash_numbers_in_rocksdb(true)
.with_storages_history_in_rocksdb(true)
.with_account_history_in_rocksdb(true);
if self.storage.v2 {
StorageSettings::v2()
} else {
StorageSettings::base()
}
// Individual rocksdb flags override --rocksdb.all when explicitly set
s = s
.with_transaction_hash_numbers_in_rocksdb_opt(self.rocksdb.tx_hash)
.with_storages_history_in_rocksdb_opt(self.rocksdb.storages_history)
.with_account_history_in_rocksdb_opt(self.rocksdb.account_history);
s = s.with_use_hashed_state(self.storage.use_hashed_state);
if s.use_hashed_state {
s = s.with_storage_changesets_in_static_files(true);
}
if s.storage_changesets_in_static_files {
s = s.with_use_hashed_state(true);
}
s
}
/// Returns the max block that the node should run to, looking it up from the network if
@@ -608,7 +570,6 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
engine: self.engine,
era: self.era,
static_files: self.static_files,
rocksdb: self.rocksdb,
storage: self.storage,
}
}
@@ -651,7 +612,6 @@ impl<ChainSpec> Clone for NodeConfig<ChainSpec> {
engine: self.engine.clone(),
era: self.era.clone(),
static_files: self.static_files,
rocksdb: self.rocksdb,
storage: self.storage,
}
}

View File

@@ -75,7 +75,7 @@ where
// Check where account history indices are stored
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().account_history_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return self.prune_rocksdb(provider, input, range, range_end);
}
@@ -405,9 +405,7 @@ mod tests {
let segment = AccountHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default().with_account_changesets_in_static_files(false),
);
provider.set_storage_settings_cache(StorageSettings::v1());
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
@@ -508,7 +506,11 @@ mod tests {
test_prune(1400, 3, (PruneProgress::Finished, 804));
}
/// Tests the `prune_static_files` code path. On unix with rocksdb feature, v2 storage
/// routes to `prune_rocksdb` instead, so this test only runs without rocksdb (the
/// `prune_rocksdb_path` test covers that configuration).
#[test]
#[cfg(not(all(unix, feature = "rocksdb")))]
fn prune_static_file() {
let db = TestStageDB::default();
let mut rng = generators::rng();
@@ -564,9 +566,7 @@ mod tests {
let segment = AccountHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default().with_account_changesets_in_static_files(true),
);
provider.set_storage_settings_cache(StorageSettings::v2());
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
@@ -714,11 +714,7 @@ mod tests {
PruneInput { previous_checkpoint: None, to_block, limiter: PruneLimiter::default() };
let segment = AccountHistory::new(prune_mode);
db.factory.set_storage_settings_cache(
StorageSettings::default()
.with_account_changesets_in_static_files(true)
.with_account_history_in_rocksdb(true),
);
db.factory.set_storage_settings_cache(StorageSettings::v2());
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input).unwrap();
@@ -832,9 +828,7 @@ mod tests {
let segment = AccountHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default().with_account_changesets_in_static_files(false),
);
provider.set_storage_settings_cache(StorageSettings::v1());
let result = segment.prune(&provider, input).unwrap();
// Should report that there's more data
@@ -892,9 +886,7 @@ mod tests {
};
let provider2 = db.factory.database_provider_rw().unwrap();
provider2.set_storage_settings_cache(
StorageSettings::default().with_account_changesets_in_static_files(false),
);
provider2.set_storage_settings_cache(StorageSettings::v1());
let result2 = segment.prune(&provider2, input2).unwrap();
assert!(result2.progress.is_finished(), "Second run should complete");

View File

@@ -76,7 +76,7 @@ where
// Check where storage history indices are stored
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().storages_history_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return self.prune_rocksdb(provider, input, range, range_end);
}
@@ -413,9 +413,7 @@ mod tests {
let segment = StorageHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default().with_storage_changesets_in_static_files(false),
);
provider.set_storage_settings_cache(StorageSettings::v1());
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
@@ -520,7 +518,11 @@ mod tests {
test_prune(1200, 3, (PruneProgress::Finished, 202));
}
/// Tests the `prune_static_files` code path. On unix with rocksdb feature, v2 storage
/// routes to `prune_rocksdb` instead, so this test only runs without rocksdb (the
/// `prune_rocksdb_path` test covers that configuration).
#[test]
#[cfg(not(all(unix, feature = "rocksdb")))]
fn prune_static_file() {
let db = TestStageDB::default();
let mut rng = generators::rng();
@@ -577,9 +579,7 @@ mod tests {
let segment = StorageHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default().with_storage_changesets_in_static_files(true),
);
provider.set_storage_settings_cache(StorageSettings::v2());
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
@@ -739,9 +739,7 @@ mod tests {
let segment = StorageHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default().with_storage_changesets_in_static_files(false),
);
provider.set_storage_settings_cache(StorageSettings::v1());
let result = segment.prune(&provider, input).unwrap();
// Should report that there's more data
@@ -793,9 +791,7 @@ mod tests {
};
let provider2 = db.factory.database_provider_rw().unwrap();
provider2.set_storage_settings_cache(
StorageSettings::default().with_storage_changesets_in_static_files(false),
);
provider2.set_storage_settings_cache(StorageSettings::v1());
let result2 = segment.prune(&provider2, input2).unwrap();
assert!(result2.progress.is_finished(), "Second run should complete");
@@ -895,11 +891,7 @@ mod tests {
let segment = StorageHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
provider.set_storage_settings_cache(
StorageSettings::default()
.with_storage_changesets_in_static_files(true)
.with_storages_history_in_rocksdb(true),
);
provider.set_storage_settings_cache(StorageSettings::v2());
let result = segment.prune(&provider, input).unwrap();
provider.commit().expect("commit");

View File

@@ -96,7 +96,7 @@ where
// Check where transaction hash numbers are stored
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return self.prune_rocksdb(provider, input, start, end);
}
@@ -491,9 +491,7 @@ mod tests {
let segment = TransactionLookup::new(prune_mode);
// Enable RocksDB storage for transaction hash numbers
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
db.factory.set_storage_settings_cache(StorageSettings::v2());
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input).unwrap();
@@ -578,9 +576,7 @@ mod tests {
}
// Enable RocksDB storage for transaction hash numbers
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
db.factory.set_storage_settings_cache(StorageSettings::v2());
let to_block: BlockNumber = 6;
let prune_mode = PruneMode::Before(to_block);

View File

@@ -467,7 +467,7 @@ where
// state is then written separately below.
provider.write_state(&state, OriginalValuesKnown::Yes, StateWriteConfig::default())?;
if provider.cached_storage_settings().use_hashed_state {
if provider.cached_storage_settings().use_hashed_state() {
let hashed_state = state.hash_state_slow::<KeccakKeyHasher>();
provider.write_hashed_state(&hashed_state.into_sorted())?;
}
@@ -1269,8 +1269,7 @@ mod tests {
// but no receipt data is written.
let factory = create_test_provider_factory();
factory
.set_storage_settings_cache(StorageSettings::v1().with_receipts_in_static_files(true));
factory.set_storage_settings_cache(StorageSettings::v2());
// Setup with block 1
let provider_rw = factory.database_provider_rw().unwrap();

View File

@@ -159,7 +159,7 @@ where
// If using hashed state as canonical, execution already writes to `HashedAccounts`,
// so this stage becomes a no-op.
if provider.cached_storage_settings().use_hashed_state {
if provider.cached_storage_settings().use_hashed_state() {
return Ok(ExecOutput::done(input.checkpoint().with_block_number(input.target())));
}

View File

@@ -89,7 +89,7 @@ where
// If use_hashed_state is enabled, execution writes directly to `HashedStorages`,
// so this stage becomes a no-op.
if provider.cached_storage_settings().use_hashed_state {
if provider.cached_storage_settings().use_hashed_state() {
return Ok(ExecOutput::done(input.checkpoint().with_block_number(input.target())));
}

View File

@@ -103,7 +103,7 @@ where
let mut range = input.next_block_range();
let first_sync = input.checkpoint().block_number == 0;
let use_rocksdb = provider.cached_storage_settings().account_history_in_rocksdb;
let use_rocksdb = provider.cached_storage_settings().storage_v2;
// On first sync we might have history coming from genesis. We clear the table since it's
// faster to rebuild from scratch.
@@ -122,7 +122,7 @@ where
info!(target: "sync::stages::index_account_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices");
let collector = if provider.cached_storage_settings().account_changesets_in_static_files {
let collector = if provider.cached_storage_settings().storage_v2 {
// Use the provider-based collection that can read from static files.
collect_account_history_indices(provider, range.clone(), &self.etl_config)?
} else {
@@ -666,32 +666,43 @@ mod tests {
#[cfg(all(unix, feature = "rocksdb"))]
mod rocksdb_tests {
use super::*;
use reth_provider::RocksDBProviderFactory;
use reth_provider::{
providers::StaticFileWriter, RocksDBProviderFactory, StaticFileProviderFactory,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::StorageSettings;
/// Sets up v2 account test data: writes block body indices to MDBX and
/// account changesets to static files (matching realistic v2 layout).
fn setup_v2_account_data(db: &TestStageDB, block_range: std::ops::RangeInclusive<u64>) {
db.factory.set_storage_settings_cache(StorageSettings::v2());
db.commit(|tx| {
for block in block_range.clone() {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
}
Ok(())
})
.unwrap();
let static_file_provider = db.factory.static_file_provider();
let mut writer =
static_file_provider.latest_writer(StaticFileSegment::AccountChangeSets).unwrap();
for block in block_range {
writer.append_account_changeset(vec![acc()], block).unwrap();
}
writer.commit().unwrap();
}
/// Test that when `account_history_in_rocksdb` is enabled, the stage
/// writes account history indices to `RocksDB` instead of MDBX.
#[tokio::test]
async fn execute_writes_to_rocksdb_when_enabled() {
// init
let db = TestStageDB::default();
// Enable RocksDB for account history
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=10 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::AccountChangeSets>(block, acc())?;
}
Ok(())
})
.unwrap();
setup_v2_account_data(&db, 0..=10);
let input = ExecInput { target: Some(10), ..Default::default() };
let mut stage = IndexAccountHistoryStage::default();
@@ -721,22 +732,7 @@ mod tests {
#[tokio::test]
async fn unwind_works_when_rocksdb_enabled() {
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=10 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::AccountChangeSets>(block, acc())?;
}
Ok(())
})
.unwrap();
setup_v2_account_data(&db, 0..=10);
let input = ExecInput { target: Some(10), ..Default::default() };
let mut stage = IndexAccountHistoryStage::default();
@@ -772,22 +768,7 @@ mod tests {
#[tokio::test]
async fn execute_incremental_sync() {
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=5 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::AccountChangeSets>(block, acc())?;
}
Ok(())
})
.unwrap();
setup_v2_account_data(&db, 0..=10);
let input = ExecInput { target: Some(5), ..Default::default() };
let mut stage = IndexAccountHistoryStage::default();
@@ -802,18 +783,6 @@ mod tests {
let blocks: Vec<u64> = result.unwrap().iter().collect();
assert_eq!(blocks, (0..=5).collect::<Vec<_>>());
db.commit(|tx| {
for block in 6..=10 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::AccountChangeSets>(block, acc())?;
}
Ok(())
})
.unwrap();
let input = ExecInput { target: Some(10), checkpoint: Some(StageCheckpoint::new(5)) };
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.execute(&provider, input).unwrap();

View File

@@ -107,7 +107,7 @@ where
let mut range = input.next_block_range();
let first_sync = input.checkpoint().block_number == 0;
let use_rocksdb = provider.cached_storage_settings().storages_history_in_rocksdb;
let use_rocksdb = provider.cached_storage_settings().storage_v2;
// On first sync we might have history coming from genesis. We clear the table since it's
// faster to rebuild from scratch.
@@ -125,7 +125,7 @@ where
}
info!(target: "sync::stages::index_storage_history::exec", ?first_sync, ?use_rocksdb, "Collecting indices");
let collector = if provider.cached_storage_settings().storage_changesets_in_static_files {
let collector = if provider.cached_storage_settings().storage_v2 {
collect_storage_history_indices(provider, range.clone(), &self.etl_config)?
} else {
collect_history_indices::<_, tables::StorageChangeSets, tables::StoragesHistory, _>(
@@ -694,33 +694,51 @@ mod tests {
#[cfg(all(unix, feature = "rocksdb"))]
mod rocksdb_tests {
use super::*;
use reth_provider::RocksDBProviderFactory;
use reth_db_api::models::StorageBeforeTx;
use reth_provider::{providers::StaticFileWriter, RocksDBProviderFactory};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::StorageSettings;
/// Sets up v2 storage test data: writes block body indices to MDBX and
/// storage changesets to static files (matching realistic v2 layout).
fn setup_v2_storage_data(db: &TestStageDB, block_range: std::ops::RangeInclusive<u64>) {
db.factory.set_storage_settings_cache(StorageSettings::v2());
db.commit(|tx| {
for block in block_range.clone() {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
}
Ok(())
})
.unwrap();
let static_file_provider = db.factory.static_file_provider();
let mut writer =
static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
for block in block_range {
writer
.append_storage_changeset(
vec![StorageBeforeTx {
address: ADDRESS,
key: STORAGE_KEY,
value: U256::ZERO,
}],
block,
)
.unwrap();
}
writer.commit().unwrap();
}
/// Test that when `storages_history_in_rocksdb` is enabled, the stage
/// writes storage history indices to `RocksDB` instead of MDBX.
#[tokio::test]
async fn execute_writes_to_rocksdb_when_enabled() {
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=10 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap();
setup_v2_storage_data(&db, 0..=10);
let input = ExecInput { target: Some(10), ..Default::default() };
let mut stage = IndexStorageHistoryStage::default();
@@ -748,25 +766,7 @@ mod tests {
#[tokio::test]
async fn unwind_works_when_rocksdb_enabled() {
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=10 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap();
setup_v2_storage_data(&db, 0..=10);
let input = ExecInput { target: Some(10), ..Default::default() };
let mut stage = IndexStorageHistoryStage::default();
@@ -803,25 +803,7 @@ mod tests {
#[tokio::test]
async fn unwind_to_zero_keeps_block_zero() {
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=5 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap();
setup_v2_storage_data(&db, 0..=5);
let input = ExecInput { target: Some(5), ..Default::default() };
let mut stage = IndexStorageHistoryStage::default();
@@ -852,25 +834,7 @@ mod tests {
#[tokio::test]
async fn execute_incremental_sync() {
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
db.commit(|tx| {
for block in 0..=5 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap();
setup_v2_storage_data(&db, 0..=10);
let input = ExecInput { target: Some(5), ..Default::default() };
let mut stage = IndexStorageHistoryStage::default();
@@ -885,21 +849,6 @@ mod tests {
let blocks: Vec<u64> = result.unwrap().iter().collect();
assert_eq!(blocks, (0..=5).collect::<Vec<_>>());
db.commit(|tx| {
for block in 6..=10 {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap();
let input = ExecInput { target: Some(10), checkpoint: Some(StageCheckpoint::new(5)) };
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.execute(&provider, input).unwrap();
@@ -919,27 +868,8 @@ mod tests {
use reth_db_api::models::sharded_key::NUM_OF_INDICES_IN_SHARD;
let db = TestStageDB::default();
db.factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
let num_blocks = (NUM_OF_INDICES_IN_SHARD * 2 + 100) as u64;
db.commit(|tx| {
for block in 0..num_blocks {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap();
setup_v2_storage_data(&db, 0..=num_blocks - 1);
let input = ExecInput { target: Some(num_blocks - 1), ..Default::default() };
let mut stage = IndexStorageHistoryStage::default();

View File

@@ -540,9 +540,7 @@ mod tests {
let mut rng = generators::rng();
let runner = SenderRecoveryTestRunner::default();
runner.db.factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_senders_in_static_files(true),
);
runner.db.factory.set_storage_settings_cache(StorageSettings::v2());
let input = ExecInput {
target: Some(target),
checkpoint: Some(StageCheckpoint::new(stage_progress)),

View File

@@ -200,7 +200,7 @@ where
}
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
provider.commit_pending_rocksdb_batches()?;
provider.rocksdb_provider().flush(&[Tables::TransactionHashNumbers.name()])?;
}
@@ -618,9 +618,7 @@ mod tests {
let runner = TransactionLookupTestRunner::default();
// Enable RocksDB for transaction hash numbers
runner.db.factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
runner.db.factory.set_storage_settings_cache(StorageSettings::v2());
let input = ExecInput {
target: Some(previous_stage),
@@ -686,9 +684,7 @@ mod tests {
let runner = TransactionLookupTestRunner::default();
// Enable RocksDB for transaction hash numbers
runner.db.factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
runner.db.factory.set_storage_settings_cache(StorageSettings::v2());
// Insert blocks with transactions
let blocks = random_block_range(

View File

@@ -79,7 +79,7 @@ fn assert_changesets_queryable(
let settings = provider.cached_storage_settings();
// Verify storage changesets
if settings.storage_changesets_in_static_files {
if settings.storage_v2 {
let static_file_provider = provider_factory.static_file_provider();
static_file_provider.initialize_index()?;
let storage_changesets =
@@ -118,7 +118,7 @@ fn assert_changesets_queryable(
}
// Verify account changesets
if settings.account_changesets_in_static_files {
if settings.storage_v2 {
let static_file_provider = provider_factory.static_file_provider();
static_file_provider.initialize_index()?;
let account_changesets =
@@ -604,7 +604,7 @@ async fn test_pipeline() -> eyre::Result<()> {
}
/// Same as [`test_pipeline`] but runs with v2 storage settings (`use_hashed_state=true`,
/// `storage_changesets_in_static_files=true`, etc.).
/// `is_v2()=true`, etc.).
///
/// In v2 mode:
/// - The execution stage writes directly to `HashedAccounts`/`HashedStorages`

View File

@@ -5,39 +5,25 @@ use serde::{Deserialize, Serialize};
/// Storage configuration settings for this node.
///
/// Controls whether this node uses v2 storage layout (static files + `RocksDB` routing)
/// or v1/legacy layout (everything in MDBX).
///
/// These should be set during `init_genesis` or `init_db` depending on whether we want dictate
/// behaviour of new or old nodes respectively.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, Compact)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Compact, Serialize, Deserialize)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact)]
pub struct StorageSettings {
/// Whether this node always writes receipts to static files.
/// Whether this node uses v2 storage layout.
///
/// If this is set to FALSE AND receipt pruning IS ENABLED, all receipts should be written to DB. Otherwise, they should be written to static files. This ensures that older nodes do not need to migrate their current DB tables to static files. For more, read: <https://github.com/paradigmxyz/reth/issues/18890#issuecomment-3457760097>
#[serde(default)]
pub receipts_in_static_files: bool,
/// Whether this node always writes transaction senders to static files.
#[serde(default)]
pub transaction_senders_in_static_files: bool,
/// Whether `StoragesHistory` is stored in `RocksDB`.
#[serde(default)]
pub storages_history_in_rocksdb: bool,
/// Whether `TransactionHashNumbers` is stored in `RocksDB`.
#[serde(default)]
pub transaction_hash_numbers_in_rocksdb: bool,
/// Whether `AccountsHistory` is stored in `RocksDB`.
#[serde(default)]
pub account_history_in_rocksdb: bool,
/// Whether this node should read and write account changesets from static files.
#[serde(default)]
pub account_changesets_in_static_files: bool,
/// Whether this node should read and write storage changesets from static files.
#[serde(default)]
pub storage_changesets_in_static_files: bool,
/// Whether to use hashed state tables (`HashedAccounts`/`HashedStorages`) as the canonical
/// state representation instead of plain state tables.
#[serde(default)]
pub use_hashed_state: bool,
/// When `true`, enables all v2 storage features:
/// - Receipts and transaction senders in static files
/// - History indices in `RocksDB` (accounts, storages, transaction hashes)
/// - Account and storage changesets in static files
/// - Hashed state tables as canonical state representation
///
/// When `false`, uses v1/legacy layout (everything in MDBX).
pub storage_v2: bool,
}
impl StorageSettings {
@@ -62,159 +48,58 @@ impl StorageSettings {
/// - Receipts and transaction senders in static files
/// - History indices in `RocksDB` (storages, accounts, transaction hashes)
/// - Account and storage changesets in static files
/// - Hashed state as canonical state representation
///
/// Use this when the `--storage.v2` CLI flag is set.
pub const fn v2() -> Self {
Self {
receipts_in_static_files: true,
transaction_senders_in_static_files: true,
account_changesets_in_static_files: true,
storage_changesets_in_static_files: true,
storages_history_in_rocksdb: true,
transaction_hash_numbers_in_rocksdb: true,
account_history_in_rocksdb: true,
use_hashed_state: true,
}
Self { storage_v2: true }
}
/// Creates `StorageSettings` for v1/legacy nodes.
///
/// This explicitly sets `receipts_in_static_files` and `transaction_senders_in_static_files` to
/// `false`, ensuring older nodes continue writing receipts and transaction senders to the
/// database when receipt pruning is enabled.
/// This keeps all data in MDBX, matching the original storage layout.
pub const fn v1() -> Self {
Self {
receipts_in_static_files: false,
transaction_senders_in_static_files: false,
storages_history_in_rocksdb: false,
transaction_hash_numbers_in_rocksdb: false,
account_history_in_rocksdb: false,
account_changesets_in_static_files: false,
storage_changesets_in_static_files: false,
use_hashed_state: false,
}
Self { storage_v2: false }
}
/// Sets the `receipts_in_static_files` flag to the provided value.
pub const fn with_receipts_in_static_files(mut self, value: bool) -> Self {
self.receipts_in_static_files = value;
self
/// Returns `true` if this node uses v2 storage layout.
pub const fn is_v2(&self) -> bool {
self.storage_v2
}
/// Sets the `transaction_senders_in_static_files` flag to the provided value.
pub const fn with_transaction_senders_in_static_files(mut self, value: bool) -> Self {
self.transaction_senders_in_static_files = value;
self
/// Whether receipts are stored in static files.
pub const fn receipts_in_static_files(&self) -> bool {
self.storage_v2
}
/// Sets the `storages_history_in_rocksdb` flag to the provided value.
pub const fn with_storages_history_in_rocksdb(mut self, value: bool) -> Self {
self.storages_history_in_rocksdb = value;
self
/// Whether transaction senders are stored in static files.
pub const fn transaction_senders_in_static_files(&self) -> bool {
self.storage_v2
}
/// Sets the `transaction_hash_numbers_in_rocksdb` flag to the provided value.
pub const fn with_transaction_hash_numbers_in_rocksdb(mut self, value: bool) -> Self {
self.transaction_hash_numbers_in_rocksdb = value;
self
/// Whether storages history is stored in `RocksDB`.
pub const fn storages_history_in_rocksdb(&self) -> bool {
self.storage_v2
}
/// Sets the `account_history_in_rocksdb` flag to the provided value.
pub const fn with_account_history_in_rocksdb(mut self, value: bool) -> Self {
self.account_history_in_rocksdb = value;
self
/// Whether transaction hash numbers are stored in `RocksDB`.
pub const fn transaction_hash_numbers_in_rocksdb(&self) -> bool {
self.storage_v2
}
/// Sets the `account_changesets_in_static_files` flag to the provided value.
pub const fn with_account_changesets_in_static_files(mut self, value: bool) -> Self {
self.account_changesets_in_static_files = value;
self
/// Whether account history is stored in `RocksDB`.
pub const fn account_history_in_rocksdb(&self) -> bool {
self.storage_v2
}
/// Sets the `storage_changesets_in_static_files` flag to the provided value.
pub const fn with_storage_changesets_in_static_files(mut self, value: bool) -> Self {
self.storage_changesets_in_static_files = value;
self
}
/// Sets the `use_hashed_state` flag to the provided value.
pub const fn with_use_hashed_state(mut self, value: bool) -> Self {
self.use_hashed_state = value;
self
}
/// Sets `receipts_in_static_files` if `value` is `Some`.
pub const fn with_receipts_in_static_files_opt(mut self, value: Option<bool>) -> Self {
if let Some(v) = value {
self.receipts_in_static_files = v;
}
self
}
/// Sets `transaction_senders_in_static_files` if `value` is `Some`.
pub const fn with_transaction_senders_in_static_files_opt(
mut self,
value: Option<bool>,
) -> Self {
if let Some(v) = value {
self.transaction_senders_in_static_files = v;
}
self
}
/// Sets `account_changesets_in_static_files` if `value` is `Some`.
pub const fn with_account_changesets_in_static_files_opt(
mut self,
value: Option<bool>,
) -> Self {
if let Some(v) = value {
self.account_changesets_in_static_files = v;
}
self
}
/// Sets `storage_changesets_in_static_files` if `value` is `Some`.
pub const fn with_storage_changesets_in_static_files_opt(
mut self,
value: Option<bool>,
) -> Self {
if let Some(v) = value {
self.storage_changesets_in_static_files = v;
}
self
}
/// Sets `transaction_hash_numbers_in_rocksdb` if `value` is `Some`.
pub const fn with_transaction_hash_numbers_in_rocksdb_opt(
mut self,
value: Option<bool>,
) -> Self {
if let Some(v) = value {
self.transaction_hash_numbers_in_rocksdb = v;
}
self
}
/// Sets `storages_history_in_rocksdb` if `value` is `Some`.
pub const fn with_storages_history_in_rocksdb_opt(mut self, value: Option<bool>) -> Self {
if let Some(v) = value {
self.storages_history_in_rocksdb = v;
}
self
}
/// Sets `account_history_in_rocksdb` if `value` is `Some`.
pub const fn with_account_history_in_rocksdb_opt(mut self, value: Option<bool>) -> Self {
if let Some(v) = value {
self.account_history_in_rocksdb = v;
}
self
/// Whether to use hashed state tables (`HashedAccounts`/`HashedStorages`) as the canonical
/// state representation instead of plain state tables. Implied by v2 storage layout.
pub const fn use_hashed_state(&self) -> bool {
self.storage_v2
}
/// Returns `true` if any tables are configured to be stored in `RocksDB`.
pub const fn any_in_rocksdb(&self) -> bool {
self.transaction_hash_numbers_in_rocksdb ||
self.account_history_in_rocksdb ||
self.storages_history_in_rocksdb
self.storage_v2
}
}

View File

@@ -214,13 +214,13 @@ where
// not the genesis block number. This would cause increment_block(N) to fail.
let static_file_provider = provider_rw.static_file_provider();
if genesis_block_number > 0 {
if genesis_storage_settings.account_changesets_in_static_files {
if genesis_storage_settings.storage_v2 {
static_file_provider
.get_writer(genesis_block_number, StaticFileSegment::AccountChangeSets)?
.user_header_mut()
.set_expected_block_start(genesis_block_number);
}
if genesis_storage_settings.storage_changesets_in_static_files {
if genesis_storage_settings.storage_v2 {
static_file_provider
.get_writer(genesis_block_number, StaticFileSegment::StorageChangeSets)?
.user_header_mut()
@@ -259,7 +259,7 @@ where
.user_header_mut()
.set_block_range(genesis_block_number, genesis_block_number);
if genesis_storage_settings.transaction_senders_in_static_files {
if genesis_storage_settings.storage_v2 {
static_file_provider
.get_writer(genesis_block_number, StaticFileSegment::TransactionSenders)?
.user_header_mut()
@@ -1052,7 +1052,7 @@ mod tests {
)
};
let (accounts, storages) = if settings.account_history_in_rocksdb {
let (accounts, storages) = if settings.storage_v2 {
collect_rocksdb(&rocksdb)
} else {
collect_from_mdbx(&factory)
@@ -1075,10 +1075,7 @@ mod tests {
init_genesis_with_settings(&factory, StorageSettings::v1()).unwrap();
// Request different settings - should warn but succeed
let result = init_genesis_with_settings(
&factory,
StorageSettings::v1().with_receipts_in_static_files(true),
);
let result = init_genesis_with_settings(&factory, StorageSettings::v2());
// Should succeed (warning is logged, not an error)
assert!(result.is_ok());
@@ -1087,7 +1084,7 @@ mod tests {
#[test]
fn allow_same_storage_settings() {
let factory = create_test_provider_factory_with_chain_spec(MAINNET.clone());
let settings = StorageSettings::v1().with_receipts_in_static_files(true);
let settings = StorageSettings::v2();
init_genesis_with_settings(&factory, settings).unwrap();
let result = init_genesis_with_settings(&factory, settings);

View File

@@ -162,7 +162,7 @@ impl<'a> EitherWriter<'a, (), ()> {
P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory,
P::Tx: DbTxMut,
{
if provider.cached_storage_settings().account_changesets_in_static_files {
if provider.cached_storage_settings().storage_v2 {
Ok(EitherWriter::StaticFile(
provider
.get_static_file_writer(block_number, StaticFileSegment::AccountChangeSets)?,
@@ -183,7 +183,7 @@ impl<'a> EitherWriter<'a, (), ()> {
P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory,
P::Tx: DbTxMut,
{
if provider.cached_storage_settings().storage_changesets_in_static_files {
if provider.cached_storage_settings().storage_v2 {
Ok(EitherWriter::StaticFile(
provider
.get_static_file_writer(block_number, StaticFileSegment::StorageChangeSets)?,
@@ -206,7 +206,7 @@ impl<'a> EitherWriter<'a, (), ()> {
pub fn receipts_destination<P: DBProvider + StorageSettingsCache>(
provider: &P,
) -> EitherWriterDestination {
let receipts_in_static_files = provider.cached_storage_settings().receipts_in_static_files;
let receipts_in_static_files = provider.cached_storage_settings().storage_v2;
let prune_modes = provider.prune_modes_ref();
if !receipts_in_static_files && prune_modes.has_receipts_pruning() ||
@@ -225,7 +225,7 @@ impl<'a> EitherWriter<'a, (), ()> {
pub fn account_changesets_destination<P: DBProvider + StorageSettingsCache>(
provider: &P,
) -> EitherWriterDestination {
if provider.cached_storage_settings().account_changesets_in_static_files {
if provider.cached_storage_settings().storage_v2 {
EitherWriterDestination::StaticFile
} else {
EitherWriterDestination::Database
@@ -238,7 +238,7 @@ impl<'a> EitherWriter<'a, (), ()> {
pub fn storage_changesets_destination<P: DBProvider + StorageSettingsCache>(
provider: &P,
) -> EitherWriterDestination {
if provider.cached_storage_settings().storage_changesets_in_static_files {
if provider.cached_storage_settings().storage_v2 {
EitherWriterDestination::StaticFile
} else {
EitherWriterDestination::Database
@@ -255,7 +255,7 @@ impl<'a> EitherWriter<'a, (), ()> {
P::Tx: DbTxMut,
{
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().storages_history_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return Ok(EitherWriter::RocksDB(_rocksdb_batch));
}
@@ -272,7 +272,7 @@ impl<'a> EitherWriter<'a, (), ()> {
P::Tx: DbTxMut,
{
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return Ok(EitherWriter::RocksDB(_rocksdb_batch));
}
@@ -291,7 +291,7 @@ impl<'a> EitherWriter<'a, (), ()> {
P::Tx: DbTxMut,
{
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().account_history_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return Ok(EitherWriter::RocksDB(_rocksdb_batch));
}
@@ -764,7 +764,7 @@ impl<'a> EitherReader<'a, (), ()> {
P::Tx: DbTx,
{
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().storages_history_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return Ok(EitherReader::RocksDB(
_rocksdb_tx.expect("storages_history_in_rocksdb requires rocksdb tx"),
));
@@ -786,7 +786,7 @@ impl<'a> EitherReader<'a, (), ()> {
P::Tx: DbTx,
{
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return Ok(EitherReader::RocksDB(
_rocksdb_tx.expect("transaction_hash_numbers_in_rocksdb requires rocksdb tx"),
));
@@ -808,7 +808,7 @@ impl<'a> EitherReader<'a, (), ()> {
P::Tx: DbTx,
{
#[cfg(all(unix, feature = "rocksdb"))]
if provider.cached_storage_settings().account_history_in_rocksdb {
if provider.cached_storage_settings().storage_v2 {
return Ok(EitherReader::RocksDB(
_rocksdb_tx.expect("account_history_in_rocksdb requires rocksdb tx"),
));
@@ -1046,7 +1046,7 @@ impl EitherWriterDestination {
P: StorageSettingsCache,
{
// Write senders to static files only if they're explicitly enabled
if provider.cached_storage_settings().transaction_senders_in_static_files {
if provider.cached_storage_settings().storage_v2 {
Self::StaticFile
} else {
Self::Database
@@ -1059,7 +1059,7 @@ impl EitherWriterDestination {
P: StorageSettingsCache,
{
// Write account changesets to static files only if they're explicitly enabled
if provider.cached_storage_settings().account_changesets_in_static_files {
if provider.cached_storage_settings().storage_v2 {
Self::StaticFile
} else {
Self::Database
@@ -1072,7 +1072,7 @@ impl EitherWriterDestination {
P: StorageSettingsCache,
{
// Write storage changesets to static files only if they're explicitly enabled
if provider.cached_storage_settings().storage_changesets_in_static_files {
if provider.cached_storage_settings().storage_v2 {
Self::StaticFile
} else {
Self::Database
@@ -1127,9 +1127,7 @@ mod tests {
writer.commit().unwrap();
}
factory.set_storage_settings_cache(
StorageSettings::default().with_account_changesets_in_static_files(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.database_provider_ro().unwrap();
@@ -1161,10 +1159,11 @@ mod tests {
];
for transaction_senders_in_static_files in [false, true] {
factory.set_storage_settings_cache(
factory.set_storage_settings_cache(if transaction_senders_in_static_files {
StorageSettings::v2()
} else {
StorageSettings::v1()
.with_transaction_senders_in_static_files(transaction_senders_in_static_files),
);
});
let provider = factory.database_provider_rw().unwrap();
let mut writer = EitherWriter::new_senders(&provider, 0).unwrap();
@@ -1234,9 +1233,7 @@ mod rocksdb_tests {
let factory = create_test_provider_factory();
// Enable RocksDB for transaction hash numbers
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let hash1 = B256::from([1u8; 32]);
let hash2 = B256::from([2u8; 32]);
@@ -1278,9 +1275,7 @@ mod rocksdb_tests {
let factory = create_test_provider_factory();
// Enable RocksDB for transaction hash numbers
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let hash = B256::from([1u8; 32]);
let tx_num = 100u64;
@@ -1832,9 +1827,7 @@ mod rocksdb_tests {
let factory = create_test_provider_factory();
// Enable RocksDB for transaction hash numbers
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let hash1 = B256::from([1u8; 32]);
let hash2 = B256::from([2u8; 32]);
@@ -1892,9 +1885,7 @@ mod rocksdb_tests {
fn test_settings_mismatch_panics() {
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.database_provider_ro().unwrap();
let _ = EitherReader::<(), ()>::new_accounts_history(&provider, None);

View File

@@ -1313,7 +1313,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state;
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state();
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
@@ -1368,7 +1368,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state;
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state();
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
@@ -1421,7 +1421,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
let database_start = range.start;
let mut database_end = range.end;
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state;
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state();
if let Some(head_block) = &self.head_block {
database_end = head_block.anchor().number;

View File

@@ -559,7 +559,7 @@ impl<TX: DbTx + DbTxMut + 'static, N: NodeTypesForProvider> DatabaseProvider<TX,
#[cfg(all(unix, feature = "rocksdb"))]
let rocksdb_ctx = self.rocksdb_write_ctx(first_number);
#[cfg(all(unix, feature = "rocksdb"))]
let rocksdb_enabled = rocksdb_ctx.storage_settings.any_in_rocksdb();
let rocksdb_enabled = rocksdb_ctx.storage_settings.storage_v2;
let mut sf_result = None;
#[cfg(all(unix, feature = "rocksdb"))]
@@ -595,7 +595,7 @@ impl<TX: DbTx + DbTxMut + 'static, N: NodeTypesForProvider> DatabaseProvider<TX,
let mdbx_start = Instant::now();
// Collect all transaction hashes across all blocks, sort them, and write in batch
if !self.cached_storage_settings().transaction_hash_numbers_in_rocksdb &&
if !self.cached_storage_settings().storage_v2 &&
self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full())
{
let start = Instant::now();
@@ -1396,7 +1396,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> DatabaseProvider<TX, N> {
impl<TX: DbTx, N: NodeTypes> AccountReader for DatabaseProvider<TX, N> {
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
if self.cached_storage_settings().use_hashed_state {
if self.cached_storage_settings().use_hashed_state() {
let hashed_address = keccak256(address);
Ok(self.tx.get_by_encoded_key::<tables::HashedAccounts>(&hashed_address)?)
} else {
@@ -1419,7 +1419,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> AccountExtReader for DatabaseProvider<TX,
&self,
iter: impl IntoIterator<Item = Address>,
) -> ProviderResult<Vec<(Address, Option<Account>)>> {
if self.cached_storage_settings().use_hashed_state {
if self.cached_storage_settings().use_hashed_state() {
let mut hashed_accounts = self.tx.cursor_read::<tables::HashedAccounts>()?;
Ok(iter
.into_iter()
@@ -1448,7 +1448,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> AccountExtReader for DatabaseProvider<TX,
.get_highest_static_file_block(StaticFileSegment::AccountChangeSets);
if let Some(highest) = highest_static_block &&
self.cached_storage_settings().account_changesets_in_static_files
self.cached_storage_settings().storage_v2
{
let start = *range.start();
let static_end = (*range.end()).min(highest);
@@ -1489,7 +1489,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.storage_changeset(block_number)
} else {
let range = block_number..=block_number;
@@ -1517,7 +1517,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.get_storage_before_block(block_number, address, storage_key)
} else {
Ok(self
@@ -1536,7 +1536,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.storage_changesets_range(range)
} else {
self.tx
@@ -1557,7 +1557,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
}
fn storage_changeset_count(&self) -> ProviderResult<usize> {
if self.cached_storage_settings().storage_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.storage_changeset_count()
} else {
Ok(self.tx.entries::<tables::StorageChangeSets>()?)
@@ -1570,7 +1570,7 @@ impl<TX: DbTx, N: NodeTypes> ChangeSetReader for DatabaseProvider<TX, N> {
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<AccountBeforeTx>> {
if self.cached_storage_settings().account_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
let static_changesets =
self.static_file_provider.account_block_changeset(block_number)?;
Ok(static_changesets)
@@ -1592,7 +1592,7 @@ impl<TX: DbTx, N: NodeTypes> ChangeSetReader for DatabaseProvider<TX, N> {
block_number: BlockNumber,
address: Address,
) -> ProviderResult<Option<AccountBeforeTx>> {
if self.cached_storage_settings().account_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
Ok(self.static_file_provider.get_account_before_block(block_number, address)?)
} else {
self.tx
@@ -1608,7 +1608,7 @@ impl<TX: DbTx, N: NodeTypes> ChangeSetReader for DatabaseProvider<TX, N> {
&self,
range: impl core::ops::RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumber, AccountBeforeTx)>> {
if self.cached_storage_settings().account_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.account_changesets_range(range)
} else {
self.tx
@@ -1622,7 +1622,7 @@ impl<TX: DbTx, N: NodeTypes> ChangeSetReader for DatabaseProvider<TX, N> {
fn account_changeset_count(&self) -> ProviderResult<usize> {
// check if account changesets are in static files, otherwise just count the changeset
// entries in the DB
if self.cached_storage_settings().account_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.account_changeset_count()
} else {
Ok(self.tx.entries::<tables::AccountChangeSets>()?)
@@ -2243,7 +2243,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> StorageReader for DatabaseProvider<TX, N>
&self,
addresses_with_keys: impl IntoIterator<Item = (Address, impl IntoIterator<Item = B256>)>,
) -> ProviderResult<Vec<(Address, Vec<StorageEntry>)>> {
if self.cached_storage_settings().use_hashed_state {
if self.cached_storage_settings().use_hashed_state() {
let mut hashed_storage = self.tx.cursor_dup_read::<tables::HashedStorages>()?;
addresses_with_keys
@@ -2290,7 +2290,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> StorageReader for DatabaseProvider<TX, N>
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<Address, BTreeSet<B256>>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.storage_changesets_range(range)?.into_iter().try_fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, BTreeSet<B256>>, entry| {
@@ -2320,7 +2320,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> StorageReader for DatabaseProvider<TX, N>
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<(Address, B256), Vec<u64>>> {
if self.cached_storage_settings().storage_changesets_in_static_files {
if self.cached_storage_settings().storage_v2 {
self.storage_changesets_range(range)?.into_iter().try_fold(
BTreeMap::new(),
|mut storages: BTreeMap<(Address, B256), Vec<u64>>, (index, storage)| {
@@ -2470,7 +2470,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
first_block: BlockNumber,
config: StateWriteConfig,
) -> ProviderResult<()> {
let use_hashed_state = self.cached_storage_settings().use_hashed_state;
let use_hashed_state = self.cached_storage_settings().use_hashed_state();
// Write storage changes
if config.write_storage_changesets {
@@ -2575,7 +2575,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
// When use_hashed_state is enabled, skip plain state writes for accounts and storage.
// The hashed state is already written by the separate `write_hashed_state()` call.
// Bytecode writes remain unconditional since Bytecodes is not a plain/hashed table.
if !self.cached_storage_settings().use_hashed_state {
if !self.cached_storage_settings().use_hashed_state() {
// Write new account state
tracing::trace!(len = changes.accounts.len(), "Writing new account state");
let mut accounts_cursor = self.tx_ref().cursor_write::<tables::PlainAccountState>()?;
@@ -2709,8 +2709,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
block_bodies.first().expect("already checked if there are blocks").first_tx_num();
let storage_range = BlockNumberAddress::range(range.clone());
let storage_changeset = if self.cached_storage_settings().storage_changesets_in_static_files
{
let storage_changeset = if self.cached_storage_settings().storage_v2 {
let changesets = self.storage_changesets_range(range.clone())?;
let mut changeset_writer =
self.static_file_provider.latest_writer(StaticFileSegment::StorageChangeSets)?;
@@ -2724,8 +2723,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
})
.collect()
};
let account_changeset = if self.cached_storage_settings().account_changesets_in_static_files
{
let account_changeset = if self.cached_storage_settings().storage_v2 {
let changesets = self.account_changesets_range(range)?;
let mut changeset_writer =
self.static_file_provider.latest_writer(StaticFileSegment::AccountChangeSets)?;
@@ -2735,7 +2733,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
self.take::<tables::AccountChangeSets>(range)?
};
if self.cached_storage_settings().use_hashed_state {
if self.cached_storage_settings().use_hashed_state() {
let mut hashed_accounts_cursor = self.tx.cursor_write::<tables::HashedAccounts>()?;
let mut hashed_storage_cursor = self.tx.cursor_dup_write::<tables::HashedStorages>()?;
@@ -2868,7 +2866,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
let storage_changeset = if let Some(highest_block) = self
.static_file_provider
.get_highest_static_file_block(StaticFileSegment::StorageChangeSets) &&
self.cached_storage_settings().storage_changesets_in_static_files
self.cached_storage_settings().storage_v2
{
let changesets = self.storage_changesets_range(block + 1..=highest_block)?;
let mut changeset_writer =
@@ -2889,7 +2887,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
.static_file_provider
.get_highest_static_file_block(StaticFileSegment::AccountChangeSets);
let account_changeset = if let Some(highest_block) = highest_changeset_block &&
self.cached_storage_settings().account_changesets_in_static_files
self.cached_storage_settings().storage_v2
{
// TODO: add a `take` method that removes and returns the items instead of doing this
let changesets = self.account_changesets_range(block + 1..highest_block + 1)?;
@@ -2904,7 +2902,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
self.take::<tables::AccountChangeSets>(range)?
};
let (state, reverts) = if self.cached_storage_settings().use_hashed_state {
let (state, reverts) = if self.cached_storage_settings().use_hashed_state() {
let mut hashed_accounts_cursor = self.tx.cursor_write::<tables::HashedAccounts>()?;
let mut hashed_storage_cursor = self.tx.cursor_dup_write::<tables::HashedStorages>()?;
@@ -3260,7 +3258,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HistoryWriter for DatabaseProvi
.collect::<Vec<_>>();
last_indices.sort_unstable_by_key(|(a, _)| *a);
if self.cached_storage_settings().account_history_in_rocksdb {
if self.cached_storage_settings().storage_v2 {
#[cfg(all(unix, feature = "rocksdb"))]
{
let batch = self.rocksdb_provider.unwind_account_history_indices(&last_indices)?;
@@ -3322,7 +3320,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HistoryWriter for DatabaseProvi
.collect::<Vec<_>>();
storage_changesets.sort_by_key(|(address, key, _)| (*address, *key));
if self.cached_storage_settings().storages_history_in_rocksdb {
if self.cached_storage_settings().storage_v2 {
#[cfg(all(unix, feature = "rocksdb"))]
{
let batch =
@@ -3381,12 +3379,12 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HistoryWriter for DatabaseProvi
#[instrument(level = "debug", target = "providers::db", skip_all)]
fn update_history_indices(&self, range: RangeInclusive<BlockNumber>) -> ProviderResult<()> {
let storage_settings = self.cached_storage_settings();
if !storage_settings.account_history_in_rocksdb {
if !storage_settings.storage_v2 {
let indices = self.changed_accounts_and_blocks_with_range(range.clone())?;
self.insert_account_history_index(indices)?;
}
if !storage_settings.storages_history_in_rocksdb {
if !storage_settings.storage_v2 {
let indices = self.changed_storages_and_blocks_with_range(range)?;
self.insert_storage_history_index(indices)?;
}
@@ -3645,7 +3643,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> BlockWriter
// This is necessary because with edge storage, changesets are written to static files
// whose index isn't updated until commit, making them invisible to subsequent reads
// within the same transaction.
let use_hashed = self.cached_storage_settings().use_hashed_state;
let use_hashed = self.cached_storage_settings().use_hashed_state();
let (account_transitions, storage_transitions) = {
let mut account_transitions: BTreeMap<Address, Vec<u64>> = BTreeMap::new();
let mut storage_transitions: BTreeMap<(Address, B256), Vec<u64>> = BTreeMap::new();
@@ -3681,7 +3679,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> BlockWriter
// Note: For MDBX we use insert_*_history_index. For RocksDB we use
// append_*_history_shard which handles read-merge-write internally.
let storage_settings = self.cached_storage_settings();
if storage_settings.account_history_in_rocksdb {
if storage_settings.storage_v2 {
#[cfg(all(unix, feature = "rocksdb"))]
self.with_rocksdb_batch(|mut batch| {
for (address, blocks) in account_transitions {
@@ -3692,7 +3690,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> BlockWriter
} else {
self.insert_account_history_index(account_transitions)?;
}
if storage_settings.storages_history_in_rocksdb {
if storage_settings.storage_v2 {
#[cfg(all(unix, feature = "rocksdb"))]
self.with_rocksdb_batch(|mut batch| {
for ((address, key), blocks) in storage_transitions {
@@ -4445,7 +4443,7 @@ mod tests {
// Static files mode
{
let factory = create_test_provider_factory();
let storage_settings = StorageSettings::v1().with_receipts_in_static_files(true);
let storage_settings = StorageSettings::v2();
factory.set_storage_settings_cache(storage_settings);
let factory = factory.with_prune_modes(PruneModes {
receipts: Some(PruneMode::Before(2)),
@@ -4603,7 +4601,7 @@ mod tests {
fn test_write_and_remove_state_roundtrip_legacy() {
let factory = create_test_provider_factory();
let storage_settings = StorageSettings::v1();
assert!(!storage_settings.use_hashed_state);
assert!(!storage_settings.use_hashed_state());
factory.set_storage_settings_cache(storage_settings);
let address = Address::with_last_byte(1);
@@ -4771,7 +4769,7 @@ mod tests {
fn test_unwind_storage_hashing_legacy() {
let factory = create_test_provider_factory();
let storage_settings = StorageSettings::v1();
assert!(!storage_settings.use_hashed_state);
assert!(!storage_settings.use_hashed_state());
factory.set_storage_settings_cache(storage_settings);
let address = Address::random();
@@ -5205,7 +5203,7 @@ mod tests {
fn test_write_and_remove_state_roundtrip_v2() {
let factory = create_test_provider_factory();
let storage_settings = StorageSettings::v2();
assert!(storage_settings.use_hashed_state);
assert!(storage_settings.use_hashed_state());
factory.set_storage_settings_cache(storage_settings);
let address = Address::with_last_byte(1);

View File

@@ -62,21 +62,21 @@ impl RocksDBProvider {
let mut unwind_target: Option<BlockNumber> = None;
// Heal TransactionHashNumbers if stored in RocksDB
if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb &&
if provider.cached_storage_settings().storage_v2 &&
let Some(target) = self.heal_transaction_hash_numbers(provider)?
{
unwind_target = Some(unwind_target.map_or(target, |t| t.min(target)));
}
// Heal StoragesHistory if stored in RocksDB
if provider.cached_storage_settings().storages_history_in_rocksdb &&
if provider.cached_storage_settings().storage_v2 &&
let Some(target) = self.heal_storages_history(provider)?
{
unwind_target = Some(unwind_target.map_or(target, |t| t.min(target)));
}
// Heal AccountsHistory if stored in RocksDB
if provider.cached_storage_settings().account_history_in_rocksdb &&
if provider.cached_storage_settings().storage_v2 &&
let Some(target) = self.heal_accounts_history(provider)?
{
unwind_target = Some(unwind_target.map_or(target, |t| t.min(target)));
@@ -496,19 +496,11 @@ mod tests {
#[test]
fn test_check_consistency_empty_rocksdb_no_checkpoint_is_ok() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::TransactionHashNumbers>()
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1()
.with_transaction_hash_numbers_in_rocksdb(true)
.with_storages_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.database_provider_ro().unwrap();
@@ -520,16 +512,11 @@ mod tests {
#[test]
fn test_check_consistency_empty_rocksdb_with_checkpoint_is_first_run() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::TransactionHashNumbers>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Set a checkpoint indicating we should have processed up to block 100
{
@@ -553,15 +540,10 @@ mod tests {
#[test]
fn test_check_consistency_checkpoint_zero_with_rocksdb_data_prunes_all() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::TransactionHashNumbers>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Generate blocks with real transactions and insert them
let mut rng = generators::rng();
@@ -589,12 +571,18 @@ mod tests {
provider.commit().unwrap();
}
// Explicitly clear the TransactionLookup checkpoint to simulate crash recovery
// Explicitly clear the checkpoints to simulate crash recovery
{
let provider = factory.database_provider_rw().unwrap();
provider
.save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(0))
.unwrap();
provider
.save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(0))
.unwrap();
provider
.save_stage_checkpoint(StageId::IndexAccountHistory, StageCheckpoint::new(0))
.unwrap();
provider.commit().unwrap();
}
@@ -620,16 +608,11 @@ mod tests {
#[test]
fn test_check_consistency_storages_history_empty_with_checkpoint_is_first_run() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Set a checkpoint indicating we should have processed up to block 100
{
@@ -652,10 +635,7 @@ mod tests {
#[test]
fn test_check_consistency_storages_history_has_data_no_checkpoint_prunes_data() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Insert data into RocksDB
let key = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50);
@@ -667,9 +647,7 @@ mod tests {
// Create a test provider factory for MDBX with NO checkpoint
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.database_provider_ro().unwrap();
@@ -687,15 +665,10 @@ mod tests {
#[test]
fn test_check_consistency_mdbx_behind_checkpoint_needs_unwind() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::TransactionHashNumbers>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Generate blocks with real transactions (blocks 0-2, 6 transactions total)
let mut rng = generators::rng();
@@ -729,6 +702,13 @@ mod tests {
provider
.save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(10))
.unwrap();
// Reset history checkpoints so they don't interfere
provider
.save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(0))
.unwrap();
provider
.save_stage_checkpoint(StageId::IndexAccountHistory, StageCheckpoint::new(0))
.unwrap();
provider.commit().unwrap();
}
@@ -742,16 +722,11 @@ mod tests {
#[test]
fn test_check_consistency_rocksdb_ahead_of_checkpoint_prunes_excess() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::TransactionHashNumbers>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Generate blocks with real transactions:
// Blocks 0-5, each with 2 transactions = 12 total transactions (0-11)
@@ -809,6 +784,13 @@ mod tests {
provider
.save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(2))
.unwrap();
// Reset history checkpoints so they don't interfere
provider
.save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(0))
.unwrap();
provider
.save_stage_checkpoint(StageId::IndexAccountHistory, StageCheckpoint::new(0))
.unwrap();
provider.commit().unwrap();
}
@@ -843,10 +825,7 @@ mod tests {
#[test]
fn test_check_consistency_storages_history_sentinel_only_with_checkpoint_is_first_run() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Insert ONLY sentinel entries (highest_block_number = u64::MAX)
// This simulates a scenario where history tracking started but no shards were completed
@@ -861,9 +840,7 @@ mod tests {
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Set a checkpoint indicating we should have processed up to block 100
{
@@ -888,10 +865,7 @@ mod tests {
use reth_db_api::models::ShardedKey;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::AccountsHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Insert ONLY sentinel entries (highest_block_number = u64::MAX)
let key_sentinel_1 = ShardedKey::new(Address::ZERO, u64::MAX);
@@ -905,9 +879,7 @@ mod tests {
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Set a checkpoint indicating we should have processed up to block 100
{
@@ -940,9 +912,7 @@ mod tests {
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_transaction_hash_numbers_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Generate random blocks with unique transactions
// Block 0 (genesis) has no transactions
@@ -1046,16 +1016,11 @@ mod tests {
#[test]
fn test_check_consistency_accounts_history_empty_with_checkpoint_is_first_run() {
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::AccountsHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Set a checkpoint indicating we should have processed up to block 100
{
@@ -1080,10 +1045,7 @@ mod tests {
use reth_db_api::models::ShardedKey;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::AccountsHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Insert data into RocksDB
let key = ShardedKey::new(Address::ZERO, 50);
@@ -1095,9 +1057,7 @@ mod tests {
// Create a test provider factory for MDBX with NO checkpoint
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.database_provider_ro().unwrap();
@@ -1120,10 +1080,7 @@ mod tests {
use reth_static_file_types::StaticFileSegment;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::AccountsHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Insert some AccountsHistory entries with various highest_block_numbers
let key1 = ShardedKey::new(Address::ZERO, 50);
@@ -1143,9 +1100,7 @@ mod tests {
// Create a test provider factory for MDBX
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_account_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Write account changesets to static files for blocks 0-100
{
@@ -1224,17 +1179,10 @@ mod tests {
const CHECKPOINT_BLOCK: u64 = 5_000;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1()
.with_storages_history_in_rocksdb(true)
.with_storage_changesets_in_static_files(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Helper to generate address from block number (reuses stack arrays)
#[inline]
@@ -1348,17 +1296,10 @@ mod tests {
const SF_TIP: u64 = 200;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1()
.with_storages_history_in_rocksdb(true)
.with_storage_changesets_in_static_files(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let checkpoint_addr = Address::repeat_byte(0xAA);
let checkpoint_slot = B256::repeat_byte(0xBB);
@@ -1453,18 +1394,11 @@ mod tests {
use reth_static_file_types::StaticFileSegment;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::AccountsHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Create test provider factory
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1()
.with_account_history_in_rocksdb(true)
.with_account_changesets_in_static_files(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
const TOTAL_BLOCKS: u64 = 15_000;
const CHECKPOINT_BLOCK: u64 = 5_000;
@@ -1575,17 +1509,10 @@ mod tests {
const SF_TIP: u64 = 200;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::AccountsHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1()
.with_account_history_in_rocksdb(true)
.with_account_changesets_in_static_files(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
let checkpoint_addr = Address::repeat_byte(0xAA);
let stale_addr = Address::repeat_byte(0xCC);
@@ -1652,10 +1579,7 @@ mod tests {
use reth_static_file_types::StaticFileSegment;
let temp_dir = TempDir::new().unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path())
.with_table::<tables::StoragesHistory>()
.build()
.unwrap();
let rocksdb = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Insert StoragesHistory entries into RocksDB
let key1 = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50);
@@ -1671,9 +1595,7 @@ mod tests {
// Create a test provider factory
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(
StorageSettings::v1().with_storages_history_in_rocksdb(true),
);
factory.set_storage_settings_cache(StorageSettings::v2());
// Write storage changesets to static files for blocks 0-100
{

View File

@@ -1204,7 +1204,7 @@ impl RocksDBProvider {
ctx: RocksDBWriteCtx,
runtime: &reth_tasks::Runtime,
) -> ProviderResult<()> {
if !ctx.storage_settings.any_in_rocksdb() {
if !ctx.storage_settings.storage_v2 {
return Ok(());
}
@@ -1212,10 +1212,10 @@ impl RocksDBProvider {
let mut r_account_history = None;
let mut r_storage_history = None;
let write_tx_hash = ctx.storage_settings.transaction_hash_numbers_in_rocksdb &&
ctx.prune_tx_lookup.is_none_or(|m| !m.is_full());
let write_account_history = ctx.storage_settings.account_history_in_rocksdb;
let write_storage_history = ctx.storage_settings.storages_history_in_rocksdb;
let write_tx_hash =
ctx.storage_settings.storage_v2 && ctx.prune_tx_lookup.is_none_or(|m| !m.is_full());
let write_account_history = ctx.storage_settings.storage_v2;
let write_storage_history = ctx.storage_settings.storage_v2;
runtime.storage_pool().in_place_scope(|s| {
if write_tx_hash {

View File

@@ -159,7 +159,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
return Err(ProviderError::StateAtBlockPruned(self.block_number))
}
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state {
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state() {
storage_key.to_hashed()
} else {
debug_assert!(
@@ -193,7 +193,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
where
Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider,
{
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state {
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state() {
storage_key.to_hashed()
} else {
debug_assert!(
@@ -216,7 +216,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
.map(|entry| entry.value)
.map(Some),
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => {
if self.provider.cached_storage_settings().use_hashed_state {
if self.provider.cached_storage_settings().use_hashed_state() {
let hashed_address = alloy_primitives::keccak256(address);
Ok(self
.tx()
@@ -337,7 +337,7 @@ impl<
.map(|account_before| account_before.info)
}
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => {
if self.provider.cached_storage_settings().use_hashed_state {
if self.provider.cached_storage_settings().use_hashed_state() {
let hashed_address = alloy_primitives::keccak256(address);
Ok(self.tx().get_by_encoded_key::<tables::HashedAccounts>(&hashed_address)?)
} else {
@@ -520,7 +520,7 @@ impl<
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if !self.provider.cached_storage_settings().use_hashed_state {
if !self.provider.cached_storage_settings().use_hashed_state() {
return Err(ProviderError::UnsupportedProvider)
}
self.storage_by_lookup_key(address, StorageSlotKey::hashed(hashed_storage_key))
@@ -1033,7 +1033,7 @@ mod tests {
fn history_provider_get_storage_legacy() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let tx = factory.provider_rw().unwrap().into_tx();
@@ -1277,7 +1277,7 @@ mod tests {
#[test]
fn test_historical_storage_by_hashed_key_unsupported_in_v1() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let db = factory.provider().unwrap();
let provider = HistoricalStateProviderRef::new(&db, 1);

View File

@@ -54,7 +54,7 @@ impl<Provider: DBProvider + StorageSettingsCache> AccountReader
{
/// Get basic account information.
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
if self.0.cached_storage_settings().use_hashed_state {
if self.0.cached_storage_settings().use_hashed_state() {
let hashed_address = alloy_primitives::keccak256(address);
self.tx()
.get_by_encoded_key::<tables::HashedAccounts>(&hashed_address)
@@ -180,7 +180,7 @@ impl<Provider: DBProvider + BlockHashReader + StorageSettingsCache> StateProvide
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if self.0.cached_storage_settings().use_hashed_state {
if self.0.cached_storage_settings().use_hashed_state() {
self.hashed_storage_lookup(
alloy_primitives::keccak256(account),
alloy_primitives::keccak256(storage_key),
@@ -201,7 +201,7 @@ impl<Provider: DBProvider + BlockHashReader + StorageSettingsCache> StateProvide
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if self.0.cached_storage_settings().use_hashed_state {
if self.0.cached_storage_settings().use_hashed_state() {
self.hashed_storage_lookup(alloy_primitives::keccak256(address), hashed_storage_key)
} else {
Err(ProviderError::UnsupportedProvider)
@@ -321,7 +321,7 @@ mod tests {
#[test]
fn test_latest_storage_legacy() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000005");
@@ -347,7 +347,7 @@ mod tests {
#[test]
fn test_latest_storage_legacy_does_not_read_hashed() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000005");
@@ -400,7 +400,7 @@ mod tests {
#[test]
fn test_latest_storage_by_hashed_key_unsupported_in_v1() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");

View File

@@ -39,7 +39,7 @@ pub trait RocksDBProviderFactory {
{
#[cfg(all(unix, feature = "rocksdb"))]
{
if self.cached_storage_settings().any_in_rocksdb() {
if self.cached_storage_settings().storage_v2 {
let rocksdb = self.rocksdb_provider();
let tx = rocksdb.tx();
return f(Some(&tx));
@@ -183,8 +183,7 @@ mod tests {
#[test]
fn test_rocksdb_settings_create_tx() {
let settings =
StorageSettings { account_history_in_rocksdb: true, ..StorageSettings::v1() };
let settings = StorageSettings::v2();
let provider = TestProvider::new(settings);
let result = provider.with_rocksdb_tx(|tx| {

View File

@@ -528,15 +528,12 @@ mod tests {
#[test]
fn from_reverts_with_hashed_state() {
use reth_db_api::models::StorageBeforeTx;
use reth_db_api::models::{StorageBeforeTx, StorageSettings};
use reth_provider::{StaticFileProviderFactory, StaticFileSegment, StaticFileWriter};
let factory = create_test_provider_factory();
let mut settings = factory.cached_storage_settings();
settings.use_hashed_state = true;
settings.storage_changesets_in_static_files = true;
factory.set_storage_settings_cache(settings);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.provider_rw().unwrap();
@@ -548,33 +545,32 @@ mod tests {
let hashed_slot1 = keccak256(plain_slot1);
let hashed_slot2 = keccak256(plain_slot2);
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
1,
AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 1, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
2,
AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 2, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::AccountChangeSets>(3, AccountBeforeTx { address: address2, info: None })
.unwrap();
{
let sf = factory.static_file_provider();
// Write account changesets to static files (v2 reads from here)
let mut aw = sf.latest_writer(StaticFileSegment::AccountChangeSets).unwrap();
aw.append_account_changeset(vec![], 0).unwrap();
aw.append_account_changeset(
vec![AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 1, ..Default::default() }),
}],
1,
)
.unwrap();
aw.append_account_changeset(
vec![AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 2, ..Default::default() }),
}],
2,
)
.unwrap();
aw.append_account_changeset(vec![AccountBeforeTx { address: address2, info: None }], 3)
.unwrap();
aw.commit().unwrap();
let mut writer = sf.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
writer.append_storage_changeset(vec![], 0).unwrap();
writer

View File

@@ -149,7 +149,7 @@ mod tests {
let factory = create_test_provider_factory();
let provider = factory.provider_rw().unwrap();
assert!(!provider.cached_storage_settings().use_hashed_state);
assert!(!provider.cached_storage_settings().use_hashed_state());
let address = Address::with_last_byte(42);
let slot1 = B256::from(U256::from(100));
@@ -191,18 +191,15 @@ mod tests {
#[test]
fn test_hashed_storage_from_reverts_hashed_state() {
use reth_db_api::models::StorageBeforeTx;
use reth_db_api::models::{StorageBeforeTx, StorageSettings};
let factory = create_test_provider_factory();
let mut settings = factory.cached_storage_settings();
settings.use_hashed_state = true;
settings.storage_changesets_in_static_files = true;
factory.set_storage_settings_cache(settings);
factory.set_storage_settings_cache(StorageSettings::v2());
let provider = factory.provider_rw().unwrap();
assert!(provider.cached_storage_settings().use_hashed_state);
assert!(provider.cached_storage_settings().storage_changesets_in_static_files);
assert!(provider.cached_storage_settings().use_hashed_state());
assert!(provider.cached_storage_settings().is_v2());
let address = Address::with_last_byte(42);
let plain_slot1 = B256::from(U256::from(100));

View File

@@ -31,14 +31,7 @@
- [`reth db settings`](./reth/db/settings.mdx)
- [`reth db settings get`](./reth/db/settings/get.mdx)
- [`reth db settings set`](./reth/db/settings/set.mdx)
- [`reth db settings set receipts`](./reth/db/settings/set/receipts.mdx)
- [`reth db settings set transaction_senders`](./reth/db/settings/set/transaction_senders.mdx)
- [`reth db settings set account_changesets`](./reth/db/settings/set/account_changesets.mdx)
- [`reth db settings set storages_history`](./reth/db/settings/set/storages_history.mdx)
- [`reth db settings set transaction_hash_numbers`](./reth/db/settings/set/transaction_hash_numbers.mdx)
- [`reth db settings set account_history`](./reth/db/settings/set/account_history.mdx)
- [`reth db settings set storage_changesets`](./reth/db/settings/set/storage_changesets.mdx)
- [`reth db settings set use_hashed_state`](./reth/db/settings/set/use_hashed_state.mdx)
- [`reth db settings set v2`](./reth/db/settings/set/v2.mdx)
- [`reth db account-storage`](./reth/db/account-storage.mdx)
- [`reth db state`](./reth/db/state.mdx)
- [`reth download`](./reth/download.mdx)

View File

@@ -129,77 +129,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -210,13 +139,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout

View File

@@ -9,15 +9,8 @@ $ reth db settings set --help
Usage: reth db settings set [OPTIONS] <COMMAND>
Commands:
receipts Store receipts in static files instead of the database
transaction_senders Store transaction senders in static files instead of the database
account_changesets Store account changesets in static files instead of the database
storages_history Store storage history in rocksdb instead of MDBX
transaction_hash_numbers Store transaction hash to number mapping in rocksdb instead of MDBX
account_history Store account history in rocksdb instead of MDBX
storage_changesets Store storage changesets in static files instead of the database
use_hashed_state Use hashed state tables (HashedAccounts/HashedStorages) as canonical state
help Print this message or the help of the given subcommand(s)
v2 Enable or disable v2 storage layout
help Print this message or the help of the given subcommand(s)
Options:
-h, --help

View File

@@ -1,12 +1,12 @@
# reth db settings set account_changesets_in_static_files
# reth db settings set v2
Store account changesets in static files instead of the database
Enable or disable v2 storage layout
```bash
$ reth db settings set account_changesets_in_static_files --help
$ reth db settings set v2 --help
```
```txt
Usage: reth db settings set account_changesets_in_static_files [OPTIONS] <VALUE>
Usage: reth db settings set v2 [OPTIONS] <VALUE>
Arguments:
<VALUE>
@@ -95,6 +95,24 @@ Logging:
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
@@ -119,9 +137,9 @@ Tracing:
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces.
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
-u, --url <URL>
Specify a snapshot URL or let the command propose a default one.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--first-block-number <first-block-number>
Optional first block number to export from the db.
It is by default 0.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--path <IMPORT_ERA_PATH>
The path to a directory for import.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--no-state
Disables stages that require state.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--without-evm
Specifies whether to initialize the state without relying on EVM historical data.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout

View File

@@ -912,33 +912,6 @@ Pruning:
--prune.bodies.before <BLOCK_NUMBER>
Prune storage history before the specified block number. The specified block number is not pruned
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Engine:
--engine.persistence-threshold <PERSISTENCE_THRESHOLD>
Configure persistence threshold for the engine. This determines how many canonical blocks must be in-memory, ahead of the last persisted block, before flushing canonical blocks to disk again.
@@ -1073,50 +1046,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -1127,13 +1056,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
Metrics:
--metrics <PROMETHEUS>
Enable Prometheus metrics.

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--from <FROM>
The height to start at

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
<STAGE>
Possible values:
- headers: The headers stage within the pipeline

View File

@@ -118,77 +118,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -199,13 +128,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout

View File

@@ -111,77 +111,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -192,13 +121,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--metrics <SOCKET>
Enable Prometheus metrics.

View File

@@ -116,77 +116,6 @@ Static Files:
--static-files.blocks-per-file.storage-change-sets <BLOCKS_PER_FILE_STORAGE_CHANGE_SETS>
Number of blocks per file for the storage changesets segment
--static-files.receipts <RECEIPTS>
Store receipts in static files instead of the database.
When enabled, receipts will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.transaction-senders <TRANSACTION_SENDERS>
Store transaction senders in static files instead of the database.
When enabled, transaction senders will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.account-change-sets <ACCOUNT_CHANGESETS>
Store account changesets in static files.
When enabled, account changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--static-files.storage-change-sets <STORAGE_CHANGESETS>
Store storage changesets in static files.
When enabled, storage changesets will be written to static files on disk instead of the database.
Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch.
Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
RocksDB:
--rocksdb.all
Route all supported tables to `RocksDB` instead of MDBX.
This enables `RocksDB` for `tx-hash`, `storages-history`, and `account-history` tables. Cannot be combined with individual flags set to false.
--rocksdb.tx-hash <TX_HASH>
Route tx hash -> number table to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.storages-history <STORAGES_HISTORY>
Route storages history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
--rocksdb.account-history <ACCOUNT_HISTORY>
Route account history tables to `RocksDB` instead of MDBX.
This is a genesis-initialization-only flag: changing it after genesis requires a re-sync. Defaults to the base storage mode (v1: false, v2: true).
[possible values: true, false]
Storage:
--storage.v2
Enable v2 storage defaults (static files + `RocksDB` routing).
@@ -197,13 +126,6 @@ Storage:
Individual settings can still be overridden with `--static-files.*` and `--rocksdb.*` flags.
--storage.use-hashed-state
Use hashed state tables (`HashedAccounts`/`HashedStorages`) as canonical state representation instead of plain state tables.
When enabled, execution writes directly to hashed tables, eliminating the need for separate hashing stages. This should only be enabled for new databases.
WARNING: Changing this setting on an existing database requires a full resync.
--offline
If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound

View File

@@ -149,36 +149,8 @@ export const rethCliSidebar: SidebarItem = {
collapsed: true,
items: [
{
text: "reth db settings set receipts",
link: "/cli/reth/db/settings/set/receipts"
},
{
text: "reth db settings set transaction_senders",
link: "/cli/reth/db/settings/set/transaction_senders"
},
{
text: "reth db settings set account_changesets",
link: "/cli/reth/db/settings/set/account_changesets"
},
{
text: "reth db settings set storages_history",
link: "/cli/reth/db/settings/set/storages_history"
},
{
text: "reth db settings set transaction_hash_numbers",
link: "/cli/reth/db/settings/set/transaction_hash_numbers"
},
{
text: "reth db settings set account_history",
link: "/cli/reth/db/settings/set/account_history"
},
{
text: "reth db settings set storage_changesets",
link: "/cli/reth/db/settings/set/storage_changesets"
},
{
text: "reth db settings set use_hashed_state",
link: "/cli/reth/db/settings/set/use_hashed_state"
text: "reth db settings set v2",
link: "/cli/reth/db/settings/set/v2"
}
]
}