refactor(db): use hashed state as canonical state representation (#21115)

Co-authored-by: Amp <amp@ampcode.com>
Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com>
Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com>
This commit is contained in:
Georgios Konstantopoulos
2026-02-12 13:02:02 -05:00
committed by GitHub
parent 7ff78ca082
commit 121160d248
60 changed files with 4341 additions and 484 deletions

View File

@@ -1061,6 +1061,14 @@ mod tests {
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
}
impl BytecodeReader for MockStateProvider {

View File

@@ -223,6 +223,26 @@ impl<N: NodePrimitives> StateProvider for MemoryOverlayStateProviderRef<'_, N> {
self.historical.storage(address, storage_key)
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
let hashed_address = keccak256(address);
let state = &self.trie_input().state;
if let Some(hs) = state.storages.get(&hashed_address) {
if let Some(value) = hs.storage.get(&hashed_storage_key) {
return Ok(Some(*value));
}
if hs.wiped {
return Ok(Some(StorageValue::ZERO));
}
}
self.historical.storage_by_hashed_key(address, hashed_storage_key)
}
}
impl<N: NodePrimitives> BytecodeReader for MemoryOverlayStateProviderRef<'_, N> {

View File

@@ -5,6 +5,7 @@ use reth_codecs::Compact;
use reth_db_api::{cursor::DbDupCursorRO, database::Database, tables, transaction::DbTx};
use reth_db_common::DbTool;
use reth_node_builder::NodeTypesWithDB;
use reth_storage_api::StorageSettingsCache;
use std::time::{Duration, Instant};
use tracing::info;
@@ -22,52 +23,94 @@ impl Command {
/// Execute `db account-storage` command
pub fn execute<N: NodeTypesWithDB>(self, tool: &DbTool<N>) -> eyre::Result<()> {
let address = self.address;
let (slot_count, plain_size) = tool.provider_factory.db_ref().view(|tx| {
let mut cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
let mut count = 0usize;
let mut total_value_bytes = 0usize;
let mut last_log = Instant::now();
let use_hashed_state = tool.provider_factory.cached_storage_settings().use_hashed_state;
// Walk all storage entries for this address
let walker = cursor.walk_dup(Some(address), None)?;
for entry in walker {
let (_, storage_entry) = entry?;
count += 1;
// StorageEntry encodes as: 32 bytes (key/subkey uncompressed) + compressed U256
let mut buf = Vec::new();
let entry_len = storage_entry.to_compact(&mut buf);
total_value_bytes += entry_len;
let (slot_count, storage_size) = if use_hashed_state {
let hashed_address = keccak256(address);
tool.provider_factory.db_ref().view(|tx| {
let mut cursor = tx.cursor_dup_read::<tables::HashedStorages>()?;
let mut count = 0usize;
let mut total_value_bytes = 0usize;
let mut last_log = Instant::now();
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots = count,
key = %storage_entry.key,
"Processing storage slots"
);
last_log = Instant::now();
let walker = cursor.walk_dup(Some(hashed_address), None)?;
for entry in walker {
let (_, storage_entry) = entry?;
count += 1;
let mut buf = Vec::new();
let entry_len = storage_entry.to_compact(&mut buf);
total_value_bytes += entry_len;
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots = count,
key = %storage_entry.key,
"Processing hashed storage slots"
);
last_log = Instant::now();
}
}
}
// Add 20 bytes for the Address key (stored once per account in dupsort)
let total_size = if count > 0 { 20 + total_value_bytes } else { 0 };
let total_size = if count > 0 { 32 + total_value_bytes } else { 0 };
Ok::<_, eyre::Report>((count, total_size))
})??;
Ok::<_, eyre::Report>((count, total_size))
})??
} else {
tool.provider_factory.db_ref().view(|tx| {
let mut cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
let mut count = 0usize;
let mut total_value_bytes = 0usize;
let mut last_log = Instant::now();
// Estimate hashed storage size: 32-byte B256 key instead of 20-byte Address
let hashed_size_estimate = if slot_count > 0 { plain_size + 12 } else { 0 };
let total_estimate = plain_size + hashed_size_estimate;
// Walk all storage entries for this address
let walker = cursor.walk_dup(Some(address), None)?;
for entry in walker {
let (_, storage_entry) = entry?;
count += 1;
let mut buf = Vec::new();
// StorageEntry encodes as: 32 bytes (key/subkey uncompressed) + compressed U256
let entry_len = storage_entry.to_compact(&mut buf);
total_value_bytes += entry_len;
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots = count,
key = %storage_entry.key,
"Processing storage slots"
);
last_log = Instant::now();
}
}
// Add 20 bytes for the Address key (stored once per account in dupsort)
let total_size = if count > 0 { 20 + total_value_bytes } else { 0 };
Ok::<_, eyre::Report>((count, total_size))
})??
};
let hashed_address = keccak256(address);
println!("Account: {address}");
println!("Hashed address: {hashed_address}");
println!("Storage slots: {slot_count}");
println!("Plain storage size: {} (estimated)", human_bytes(plain_size as f64));
println!("Hashed storage size: {} (estimated)", human_bytes(hashed_size_estimate as f64));
println!("Total estimated size: {}", human_bytes(total_estimate as f64));
if use_hashed_state {
println!("Hashed storage size: {} (estimated)", human_bytes(storage_size as f64));
} else {
// Estimate hashed storage size: 32-byte B256 key instead of 20-byte Address
let hashed_size_estimate = if slot_count > 0 { storage_size + 12 } else { 0 };
let total_estimate = storage_size + hashed_size_estimate;
println!("Plain storage size: {} (estimated)", human_bytes(storage_size as f64));
println!(
"Hashed storage size: {} (estimated)",
human_bytes(hashed_size_estimate as f64)
);
println!("Total estimated size: {}", human_bytes(total_estimate as f64));
}
Ok(())
}

View File

@@ -98,7 +98,8 @@ impl Command {
)?;
if let Some(entry) = entry {
println!("{}", serde_json::to_string_pretty(&entry)?);
let se: reth_primitives_traits::StorageEntry = entry.into();
println!("{}", serde_json::to_string_pretty(&se)?);
} else {
error!(target: "reth::cli", "No content for the given table key.");
}
@@ -106,7 +107,14 @@ impl Command {
}
let changesets = provider.storage_changeset(key.block_number())?;
println!("{}", serde_json::to_string_pretty(&changesets)?);
let serializable: Vec<_> = changesets
.into_iter()
.map(|(addr, entry)| {
let se: reth_primitives_traits::StorageEntry = entry.into();
(addr, se)
})
.collect();
println!("{}", serde_json::to_string_pretty(&serializable)?);
return Ok(());
}

View File

@@ -1,4 +1,4 @@
use alloy_primitives::{Address, BlockNumber, B256, U256};
use alloy_primitives::{keccak256, Address, BlockNumber, B256, U256};
use clap::Parser;
use parking_lot::Mutex;
use reth_db_api::{
@@ -63,39 +63,65 @@ impl Command {
address: Address,
limit: usize,
) -> eyre::Result<()> {
let use_hashed_state = tool.provider_factory.cached_storage_settings().use_hashed_state;
let entries = tool.provider_factory.db_ref().view(|tx| {
// Get account info
let account = tx.get::<tables::PlainAccountState>(address)?;
// Get storage entries
let mut cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
let mut entries = Vec::new();
let mut last_log = Instant::now();
let walker = cursor.walk_dup(Some(address), None)?;
for (idx, entry) in walker.enumerate() {
let (_, storage_entry) = entry?;
if storage_entry.value != U256::ZERO {
entries.push((storage_entry.key, storage_entry.value));
let (account, walker_entries) = if use_hashed_state {
let hashed_address = keccak256(address);
let account = tx.get::<tables::HashedAccounts>(hashed_address)?;
let mut cursor = tx.cursor_dup_read::<tables::HashedStorages>()?;
let walker = cursor.walk_dup(Some(hashed_address), None)?;
let mut entries = Vec::new();
let mut last_log = Instant::now();
for (idx, entry) in walker.enumerate() {
let (_, storage_entry) = entry?;
if storage_entry.value != U256::ZERO {
entries.push((storage_entry.key, storage_entry.value));
}
if entries.len() >= limit {
break;
}
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots_scanned = idx,
"Scanning storage slots"
);
last_log = Instant::now();
}
}
if entries.len() >= limit {
break;
(account, entries)
} else {
// Get account info
let account = tx.get::<tables::PlainAccountState>(address)?;
// Get storage entries
let mut cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
let walker = cursor.walk_dup(Some(address), None)?;
let mut entries = Vec::new();
let mut last_log = Instant::now();
for (idx, entry) in walker.enumerate() {
let (_, storage_entry) = entry?;
if storage_entry.value != U256::ZERO {
entries.push((storage_entry.key, storage_entry.value));
}
if entries.len() >= limit {
break;
}
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots_scanned = idx,
"Scanning storage slots"
);
last_log = Instant::now();
}
}
(account, entries)
};
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots_scanned = idx,
"Scanning storage slots"
);
last_log = Instant::now();
}
}
Ok::<_, eyre::Report>((account, entries))
Ok::<_, eyre::Report>((account, walker_entries))
})??;
let (account, storage_entries) = entries;

View File

@@ -119,8 +119,8 @@ pub struct NodeCommand<C: ChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs
#[command(flatten, next_help_heading = "Static Files")]
pub static_files: StaticFilesArgs,
/// Storage mode configuration (v2 vs v1/legacy)
#[command(flatten)]
/// All storage related arguments with --storage prefix
#[command(flatten, next_help_heading = "Storage")]
pub storage: StorageArgs,
/// Additional cli arguments

View File

@@ -131,8 +131,15 @@ impl<C: ChainSpecParser> Command<C> {
reset_stage_checkpoint(tx, StageId::SenderRecovery)?;
}
StageEnum::Execution => {
tx.clear::<tables::PlainAccountState>()?;
tx.clear::<tables::PlainStorageState>()?;
if provider_rw.cached_storage_settings().use_hashed_state {
tx.clear::<tables::HashedAccounts>()?;
tx.clear::<tables::HashedStorages>()?;
reset_stage_checkpoint(tx, StageId::AccountHashing)?;
reset_stage_checkpoint(tx, StageId::StorageHashing)?;
} else {
tx.clear::<tables::PlainAccountState>()?;
tx.clear::<tables::PlainStorageState>()?;
}
tx.clear::<tables::AccountChangeSets>()?;
tx.clear::<tables::StorageChangeSets>()?;
tx.clear::<tables::Bytecodes>()?;

View File

@@ -1,6 +1,6 @@
//! Test setup utilities for configuring the initial state.
use crate::{setup_engine_with_connection, testsuite::Environment, NodeBuilderHelper};
use crate::{testsuite::Environment, E2ETestSetupBuilder, NodeBuilderHelper};
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::B256;
use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes};
@@ -38,6 +38,8 @@ pub struct Setup<I> {
shutdown_tx: Option<mpsc::Sender<()>>,
/// Is this setup in dev mode
pub is_dev: bool,
/// Whether to use v2 storage mode (hashed keys, static file changesets, rocksdb history)
pub storage_v2: bool,
/// Tracks instance generic.
_phantom: PhantomData<I>,
/// Holds the import result to keep nodes alive when using imported chain
@@ -58,6 +60,7 @@ impl<I> Default for Setup<I> {
tree_config: TreeConfig::default(),
shutdown_tx: None,
is_dev: true,
storage_v2: false,
_phantom: Default::default(),
import_result_holder: None,
import_rlp_path: None,
@@ -126,6 +129,12 @@ where
self
}
/// Enable v2 storage mode (hashed keys, static file changesets, rocksdb history)
pub const fn with_storage_v2(mut self) -> Self {
self.storage_v2 = true;
self
}
/// Apply setup using pre-imported chain data from RLP file
pub async fn apply_with_import<N>(
&mut self,
@@ -194,19 +203,28 @@ where
self.shutdown_tx = Some(shutdown_tx);
let is_dev = self.is_dev;
let storage_v2 = self.storage_v2;
let node_count = self.network.node_count;
let tree_config = self.tree_config.clone();
let attributes_generator = Self::create_static_attributes_generator::<N>();
let result = setup_engine_with_connection::<N>(
let mut builder = E2ETestSetupBuilder::<N, _>::new(
node_count,
Arc::<N::ChainSpec>::new((*chain_spec).clone().into()),
is_dev,
self.tree_config.clone(),
attributes_generator,
self.network.connect_nodes,
)
.await;
.with_tree_config_modifier(move |base| {
tree_config.clone().with_cross_block_cache_size(base.cross_block_cache_size())
})
.with_node_config_modifier(move |config| config.set_dev(is_dev))
.with_connect_nodes(self.network.connect_nodes);
if storage_v2 {
builder = builder.with_storage_v2();
}
let result = builder.build().await;
let mut node_clients = Vec::new();
match result {

View File

@@ -20,7 +20,7 @@ use reth_node_types::{BlockTy, NodeTypes};
use reth_payload_builder::PayloadBuilderHandle;
use reth_provider::{
providers::{BlockchainProvider, ProviderNodeTypes},
ProviderFactory,
ProviderFactory, StorageSettingsCache,
};
use reth_prune::PrunerWithFactory;
use reth_stages_api::{MetricEventsSender, Pipeline};
@@ -94,6 +94,7 @@ where
if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum };
let downloader = BasicBlockDownloader::new(client, consensus.clone());
let use_hashed_state = provider.cached_storage_settings().use_hashed_state;
let persistence_handle =
PersistenceHandle::<N::Primitives>::spawn_service(provider, pruner, sync_metrics_tx);
@@ -111,6 +112,7 @@ where
engine_kind,
evm_config,
changeset_cache,
use_hashed_state,
);
let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree);

View File

@@ -143,6 +143,13 @@ test-utils = [
"reth-evm-ethereum/test-utils",
"reth-tasks/test-utils",
]
rocksdb = [
"reth-provider/rocksdb",
"reth-prune/rocksdb",
"reth-stages?/rocksdb",
"reth-e2e-test-utils/rocksdb",
]
edge = ["rocksdb"]
[[test]]
name = "e2e_testsuite"

View File

@@ -351,6 +351,14 @@ impl<S: StateProvider, const PREWARM: bool> StateProvider for CachedStateProvide
self.state_provider.storage(account, storage_key)
}
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
self.state_provider.storage_by_hashed_key(address, hashed_storage_key)
}
}
impl<S: BytecodeReader, const PREWARM: bool> BytecodeReader for CachedStateProvider<S, PREWARM> {

View File

@@ -199,6 +199,17 @@ impl<S: StateProvider> StateProvider for InstrumentedStateProvider<S> {
self.record_storage_fetch(start.elapsed());
res
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
let start = Instant::now();
let res = self.state_provider.storage_by_hashed_key(address, hashed_storage_key);
self.record_storage_fetch(start.elapsed());
res
}
}
impl<S: BytecodeReader> BytecodeReader for InstrumentedStateProvider<S> {

View File

@@ -32,7 +32,7 @@ use reth_provider::{
BlockExecutionOutput, BlockExecutionResult, BlockReader, ChangeSetReader,
DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StageCheckpointReader,
StateProviderBox, StateProviderFactory, StateReader, StorageChangeSetReader,
TransactionVariant,
StorageSettingsCache, TransactionVariant,
};
use reth_revm::database::StateProviderDatabase;
use reth_stages_api::ControlFlow;
@@ -271,6 +271,9 @@ where
evm_config: C,
/// Changeset cache for in-memory trie changesets
changeset_cache: ChangesetCache,
/// Whether the node uses hashed state as canonical storage (v2 mode).
/// Cached at construction to avoid threading `StorageSettingsCache` bounds everywhere.
use_hashed_state: bool,
}
impl<N, P: Debug, T: PayloadTypes + Debug, V: Debug, C> std::fmt::Debug
@@ -296,6 +299,7 @@ where
.field("engine_kind", &self.engine_kind)
.field("evm_config", &self.evm_config)
.field("changeset_cache", &self.changeset_cache)
.field("use_hashed_state", &self.use_hashed_state)
.finish()
}
}
@@ -313,7 +317,8 @@ where
P::Provider: BlockReader<Block = N::Block, Header = N::BlockHeader>
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader,
+ StorageChangeSetReader
+ StorageSettingsCache,
C: ConfigureEvm<Primitives = N> + 'static,
T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = N>>,
V: EngineValidator<T>,
@@ -334,6 +339,7 @@ where
engine_kind: EngineApiKind,
evm_config: C,
changeset_cache: ChangesetCache,
use_hashed_state: bool,
) -> Self {
let (incoming_tx, incoming) = crossbeam_channel::unbounded();
@@ -355,6 +361,7 @@ where
engine_kind,
evm_config,
changeset_cache,
use_hashed_state,
}
}
@@ -375,6 +382,7 @@ where
kind: EngineApiKind,
evm_config: C,
changeset_cache: ChangesetCache,
use_hashed_state: bool,
) -> (Sender<FromEngine<EngineApiRequest<T, N>, N::Block>>, UnboundedReceiver<EngineApiEvent<N>>)
{
let best_block_number = provider.best_block_number().unwrap_or(0);
@@ -407,6 +415,7 @@ where
kind,
evm_config,
changeset_cache,
use_hashed_state,
);
let incoming = task.incoming_tx.clone();
spawn_os_thread("engine", || task.run());
@@ -2379,7 +2388,12 @@ where
self.update_reorg_metrics(old.len(), old_first);
self.reinsert_reorged_blocks(new.clone());
self.reinsert_reorged_blocks(old.clone());
// When use_hashed_state is enabled, skip reinserting the old chain — the
// bundle state references plain state reverts which don't exist.
if !self.use_hashed_state {
self.reinsert_reorged_blocks(old.clone());
}
}
// update the tracked in-memory state with the new chain

View File

@@ -1541,6 +1541,7 @@ mod tests {
providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory,
BlockNumReader, BlockReader, ChangeSetReader, DatabaseProviderFactory, LatestStateProvider,
PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StorageChangeSetReader,
StorageSettingsCache,
};
use reth_trie::MultiProof;
use reth_trie_db::ChangesetCache;
@@ -1562,6 +1563,7 @@ mod tests {
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ BlockNumReader,
> + Clone
+ Send
@@ -1581,7 +1583,10 @@ mod tests {
fn create_cached_provider<F>(factory: F) -> CachedStateProvider<StateProviderBox>
where
F: DatabaseProviderFactory<
Provider: BlockReader + StageCheckpointReader + PruneCheckpointReader,
Provider: BlockReader
+ StageCheckpointReader
+ PruneCheckpointReader
+ reth_provider::StorageSettingsCache,
> + Clone
+ Send
+ 'static,

View File

@@ -38,7 +38,7 @@ use reth_provider::{
providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader,
ChangeSetReader, DatabaseProviderFactory, DatabaseProviderROFactory, HashedPostStateProvider,
ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider,
StateProviderFactory, StateReader, StorageChangeSetReader,
StateProviderFactory, StateReader, StorageChangeSetReader, StorageSettingsCache,
};
use reth_revm::db::{states::bundle_state::BundleRetention, State};
use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot};
@@ -146,7 +146,8 @@ where
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
> + BlockReader<Header = N::BlockHeader>
+ ChangeSetReader
+ BlockNumReader
@@ -1526,7 +1527,8 @@ where
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
> + BlockReader<Header = N::BlockHeader>
+ StateProviderFactory
+ StateReader

View File

@@ -221,6 +221,7 @@ impl TestHarness {
EngineApiKind::Ethereum,
evm_config,
changeset_cache,
provider.cached_storage_settings().use_hashed_state,
);
let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone());

View File

@@ -2,13 +2,15 @@
mod fcu_finalized_blocks;
use alloy_rpc_types_engine::PayloadStatusEnum;
use eyre::Result;
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_e2e_test_utils::testsuite::{
actions::{
CaptureBlock, CompareNodeChainTips, CreateFork, ExpectFcuStatus, MakeCanonical,
ProduceBlocks, ProduceBlocksLocally, ProduceInvalidBlocks, ReorgTo, SelectActiveNode,
SendNewPayloads, UpdateBlockInfo, ValidateCanonicalTag, WaitForSync,
BlockReference, CaptureBlock, CompareNodeChainTips, CreateFork, ExpectFcuStatus,
MakeCanonical, ProduceBlocks, ProduceBlocksLocally, ProduceInvalidBlocks, ReorgTo,
SelectActiveNode, SendForkchoiceUpdate, SendNewPayloads, SetForkBase, UpdateBlockInfo,
ValidateCanonicalTag, WaitForSync,
},
setup::{NetworkSetup, Setup},
TestBuilder,
@@ -39,6 +41,14 @@ fn default_engine_tree_setup() -> Setup<EthEngineTypes> {
)
}
/// Creates a v2 storage mode setup for engine tree e2e tests.
///
/// v2 mode uses keccak256-hashed slot keys in static file changesets and rocksdb history
/// instead of plain keys in MDBX.
fn v2_engine_tree_setup() -> Setup<EthEngineTypes> {
default_engine_tree_setup().with_storage_v2()
}
/// Test that verifies forkchoice update and canonical chain insertion functionality.
#[tokio::test]
async fn test_engine_tree_fcu_canon_chain_insertion_e2e() -> Result<()> {
@@ -334,3 +344,152 @@ async fn test_engine_tree_live_sync_transition_eventually_canonical_e2e() -> Res
Ok(())
}
// ==================== v2 storage mode variants ====================
/// v2 variant: Verifies forkchoice update and canonical chain insertion in v2 storage mode.
///
/// Exercises the full `save_blocks` → `write_state` → static file changeset path with hashed keys.
#[tokio::test]
async fn test_engine_tree_fcu_canon_chain_insertion_v2_e2e() -> Result<()> {
reth_tracing::init_test_tracing();
let test = TestBuilder::new()
.with_setup(v2_engine_tree_setup())
.with_action(ProduceBlocks::<EthEngineTypes>::new(1))
.with_action(MakeCanonical::new())
.with_action(ProduceBlocks::<EthEngineTypes>::new(3))
.with_action(MakeCanonical::new());
test.run::<EthereumNode>().await?;
Ok(())
}
/// v2 variant: Verifies forkchoice update with a reorg where all blocks are already available.
///
/// Exercises `write_state_reverts` path with hashed changeset keys during CL-driven reorgs.
#[tokio::test]
async fn test_engine_tree_fcu_reorg_with_all_blocks_v2_e2e() -> Result<()> {
reth_tracing::init_test_tracing();
let test = TestBuilder::new()
.with_setup(v2_engine_tree_setup())
.with_action(ProduceBlocks::<EthEngineTypes>::new(5))
.with_action(MakeCanonical::new())
.with_action(CreateFork::<EthEngineTypes>::new(2, 3))
.with_action(CaptureBlock::new("fork_tip"))
.with_action(ReorgTo::<EthEngineTypes>::new_from_tag("fork_tip"));
test.run::<EthereumNode>().await?;
Ok(())
}
/// v2 variant: Verifies progressive canonical chain extension in v2 storage mode.
#[tokio::test]
async fn test_engine_tree_fcu_extends_canon_chain_v2_e2e() -> Result<()> {
reth_tracing::init_test_tracing();
let test = TestBuilder::new()
.with_setup(v2_engine_tree_setup())
.with_action(ProduceBlocks::<EthEngineTypes>::new(1))
.with_action(MakeCanonical::new())
.with_action(ProduceBlocks::<EthEngineTypes>::new(10))
.with_action(CaptureBlock::new("target_block"))
.with_action(ReorgTo::<EthEngineTypes>::new_from_tag("target_block"))
.with_action(MakeCanonical::new());
test.run::<EthereumNode>().await?;
Ok(())
}
/// Creates a 2-node setup for disk-level reorg testing.
///
/// Uses unconnected nodes so fork blocks can be produced independently on Node 1 and then
/// sent to Node 0 via newPayload only (no FCU), keeping Node 0's persisted chain intact
/// until the final `ReorgTo` triggers `find_disk_reorg`.
fn disk_reorg_setup(storage_v2: bool) -> Setup<EthEngineTypes> {
let mut setup = Setup::default()
.with_chain_spec(Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(
serde_json::from_str(include_str!(
"../../../../e2e-test-utils/src/testsuite/assets/genesis.json"
))
.unwrap(),
)
.cancun_activated()
.build(),
))
.with_network(NetworkSetup::multi_node_unconnected(2))
.with_tree_config(
TreeConfig::default().with_legacy_state_root(false).with_has_enough_parallelism(true),
);
if storage_v2 {
setup = setup.with_storage_v2();
}
setup
}
/// Builds a disk-level reorg test scenario.
///
/// 1. Both nodes receive 3 shared blocks
/// 2. Node 0 extends to 10 blocks locally (persisted to disk)
/// 3. Node 1 builds an 8-block fork from block 3 (its canonical head)
/// 4. Fork blocks are sent to Node 0 via newPayload (no FCU, old chain stays on disk)
/// 5. FCU to fork tip on Node 0 triggers `find_disk_reorg` → `RemoveBlocksAbove(3)`
fn disk_reorg_test(storage_v2: bool) -> TestBuilder<EthEngineTypes> {
TestBuilder::new()
.with_setup(disk_reorg_setup(storage_v2))
.with_action(SelectActiveNode::new(0))
.with_action(ProduceBlocks::<EthEngineTypes>::new(3))
.with_action(MakeCanonical::new())
.with_action(ProduceBlocksLocally::<EthEngineTypes>::new(7))
.with_action(MakeCanonical::with_active_node())
.with_action(SelectActiveNode::new(1))
.with_action(SetForkBase::new(3))
.with_action(ProduceBlocksLocally::<EthEngineTypes>::new(8))
.with_action(MakeCanonical::with_active_node())
.with_action(CaptureBlock::new("fork_tip"))
.with_action(
SendNewPayloads::<EthEngineTypes>::new()
.with_source_node(1)
.with_target_node(0)
.with_start_block(4)
.with_total_blocks(8),
)
.with_action(
SendForkchoiceUpdate::<EthEngineTypes>::new(
BlockReference::Tag("fork_tip".into()),
BlockReference::Tag("fork_tip".into()),
BlockReference::Tag("fork_tip".into()),
)
.with_expected_status(PayloadStatusEnum::Valid)
.with_node_idx(0),
)
}
/// Verifies disk-level reorg in v1 (plain key) storage mode.
///
/// Confirms `find_disk_reorg()` detects persisted blocks on the wrong fork and calls
/// `RemoveBlocksAbove` to truncate, then re-persists the correct fork chain.
#[tokio::test]
async fn test_engine_tree_disk_reorg_v1_e2e() -> Result<()> {
reth_tracing::init_test_tracing();
disk_reorg_test(false).run::<EthereumNode>().await?;
Ok(())
}
/// v2 variant: Verifies disk-level reorg in v2 storage mode.
///
/// Same scenario as v1 but with hashed changeset keys in static files and rocksdb history.
/// Exercises `find_disk_reorg()` → `RemoveBlocksAbove` with v2 hashed key format.
#[tokio::test]
async fn test_engine_tree_disk_reorg_v2_e2e() -> Result<()> {
reth_tracing::init_test_tracing();
disk_reorg_test(true).run::<EthereumNode>().await?;
Ok(())
}

View File

@@ -18,6 +18,7 @@ use reth_provider::{
};
use reth_revm::database::StateProviderDatabase;
use reth_testing_utils::generators::sign_tx_with_key_pair;
use reth_trie_common::KeccakKeyHasher;
use secp256k1::Keypair;
pub(crate) fn to_execution_outcome(
@@ -77,12 +78,9 @@ where
let execution_outcome = to_execution_outcome(block.number(), &block_execution_output);
// Commit the block's execution outcome to the database
let hashed_state = execution_outcome.hash_state_slow::<KeccakKeyHasher>().into_sorted();
let provider_rw = provider_factory.provider_rw()?;
provider_rw.append_blocks_with_state(
vec![block.clone()],
&execution_outcome,
Default::default(),
)?;
provider_rw.append_blocks_with_state(vec![block.clone()], &execution_outcome, hashed_state)?;
provider_rw.commit()?;
Ok(block_execution_output)
@@ -210,11 +208,12 @@ where
execution_outcome.state_mut().reverts.sort();
// Commit the block's execution outcome to the database
let hashed_state = execution_outcome.hash_state_slow::<KeccakKeyHasher>().into_sorted();
let provider_rw = provider_factory.provider_rw()?;
provider_rw.append_blocks_with_state(
vec![block1.clone(), block2.clone()],
&execution_outcome,
Default::default(),
hashed_state,
)?;
provider_rw.commit()?;

View File

@@ -80,7 +80,7 @@ pub use static_files::{StaticFilesArgs, MINIMAL_BLOCKS_PER_FILE};
mod rocksdb;
pub use rocksdb::{RocksDbArgs, RocksDbArgsError};
/// `StorageArgs` for configuring storage mode (v2 vs v1/legacy).
/// `StorageArgs` for configuring storage settings.
mod storage;
pub use storage::StorageArgs;

View File

@@ -1,11 +1,13 @@
//! clap [Args](clap::Args) for storage mode configuration
//! clap [Args](clap::Args) for storage configuration
use clap::{ArgAction, Args};
/// Parameters for storage mode configuration.
/// Parameters for storage configuration.
///
/// This controls whether the node uses v2 storage defaults (with `RocksDB` and static file
/// optimizations) or v1/legacy storage defaults.
///
/// Individual storage settings can be overridden with `--static-files.*` and `--rocksdb.*` flags.
#[derive(Debug, Args, PartialEq, Eq, Clone, Copy, Default)]
#[command(next_help_heading = "Storage")]
pub struct StorageArgs {
@@ -40,21 +42,24 @@ mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser {
struct CommandParser<T: Args> {
#[command(flatten)]
args: StorageArgs,
args: T,
}
#[test]
fn test_default_storage_args() {
let args = CommandParser::parse_from(["reth"]).args;
let default_args = StorageArgs::default();
let args = CommandParser::<StorageArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
assert!(!args.v2);
}
#[test]
fn test_parse_v2_flag() {
let args = CommandParser::parse_from(["reth", "--storage.v2"]).args;
let args = CommandParser::<StorageArgs>::parse_from(["reth", "--storage.v2"]).args;
assert!(args.v2);
}
}

View File

@@ -155,7 +155,7 @@ pub struct NodeConfig<ChainSpec> {
/// All `RocksDB` table routing arguments
pub rocksdb: RocksDbArgs,
/// Storage mode configuration (v2 vs v1/legacy)
/// All storage related arguments with --storage prefix
pub storage: StorageArgs,
}
@@ -355,6 +355,12 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
self
}
/// Set the storage args for the node
pub const fn with_storage(mut self, storage: StorageArgs) -> Self {
self.storage = storage;
self
}
/// Returns pruning configuration.
pub fn prune_config(&self) -> Option<PruneConfig>
where
@@ -398,6 +404,13 @@ impl<ChainSpec> NodeConfig<ChainSpec> {
s = s.with_use_hashed_state(self.storage.use_hashed_state);
if s.use_hashed_state {
s = s.with_storage_changesets_in_static_files(true);
}
if s.storage_changesets_in_static_files {
s = s.with_use_hashed_state(true);
}
s
}

View File

@@ -164,7 +164,7 @@ pub use alloy_primitives::{logs_bloom, Log, LogData};
pub mod proofs;
mod storage;
pub use storage::{StorageEntry, ValueWithSubKey};
pub use storage::{StorageEntry, StorageSlotKey, ValueWithSubKey};
pub mod sync;

View File

@@ -1,4 +1,4 @@
use alloy_primitives::{B256, U256};
use alloy_primitives::{keccak256, B256, U256};
/// Trait for `DupSort` table values that contain a subkey.
///
@@ -12,6 +12,117 @@ pub trait ValueWithSubKey {
fn get_subkey(&self) -> Self::SubKey;
}
/// A storage slot key that tracks whether it holds a plain (unhashed) EVM slot
/// or a keccak256-hashed slot.
///
/// This enum replaces the `use_hashed_state: bool` parameter pattern by carrying
/// provenance with the key itself. Once tagged at a read/write boundary, downstream
/// code can call [`Self::to_hashed`] without risk of double-hashing — hashing a
/// [`StorageSlotKey::Hashed`] is a no-op.
///
/// The on-disk encoding is unchanged (raw 32-byte [`B256`]). The variant is set
/// by the code that knows the context (which table, which storage mode).
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum StorageSlotKey {
/// An unhashed EVM storage slot, as produced by REVM execution.
Plain(B256),
/// A keccak256-hashed storage slot, as stored in `HashedStorages` and
/// in v2-mode `StorageChangeSets`.
Hashed(B256),
}
impl Default for StorageSlotKey {
fn default() -> Self {
Self::Plain(B256::ZERO)
}
}
impl StorageSlotKey {
/// Create a plain slot key from a REVM [`U256`] storage index.
pub const fn from_u256(slot: U256) -> Self {
Self::Plain(B256::new(slot.to_be_bytes()))
}
/// Create a plain slot key from a raw [`B256`].
pub const fn plain(key: B256) -> Self {
Self::Plain(key)
}
/// Create a hashed slot key from a raw [`B256`].
pub const fn hashed(key: B256) -> Self {
Self::Hashed(key)
}
/// Tag a raw [`B256`] based on the storage mode.
///
/// When `use_hashed_state` is true the key is assumed already hashed.
/// When false it is assumed to be a plain slot.
pub const fn from_raw(key: B256, use_hashed_state: bool) -> Self {
if use_hashed_state {
Self::Hashed(key)
} else {
Self::Plain(key)
}
}
/// Returns the raw [`B256`] regardless of variant.
pub const fn as_b256(&self) -> B256 {
match *self {
Self::Plain(b) | Self::Hashed(b) => b,
}
}
/// Returns `true` if this key is already hashed.
pub const fn is_hashed(&self) -> bool {
matches!(self, Self::Hashed(_))
}
/// Returns `true` if this key is plain (unhashed).
pub const fn is_plain(&self) -> bool {
matches!(self, Self::Plain(_))
}
/// Produce the keccak256-hashed form of this slot key.
///
/// - If already [`Hashed`](Self::Hashed), returns the inner value as-is (no double-hash).
/// - If [`Plain`](Self::Plain), applies keccak256 and returns the result.
pub fn to_hashed(&self) -> B256 {
match *self {
Self::Hashed(b) => b,
Self::Plain(b) => keccak256(b),
}
}
/// Convert a plain slot to its changeset representation.
///
/// In v2 mode (`use_hashed_state = true`), the changeset stores hashed keys,
/// so the plain key is hashed. In v1 mode, the plain key is stored as-is.
///
/// Panics (debug) if called on an already-hashed key.
pub fn to_changeset_key(self, use_hashed_state: bool) -> B256 {
debug_assert!(self.is_plain(), "to_changeset_key called on already-hashed key");
if use_hashed_state {
self.to_hashed()
} else {
self.as_b256()
}
}
/// Like [`to_changeset_key`](Self::to_changeset_key) but returns a tagged
/// [`StorageSlotKey`] instead of a raw [`B256`].
///
/// Panics (debug) if called on an already-hashed key.
pub fn to_changeset(self, use_hashed_state: bool) -> Self {
Self::from_raw(self.to_changeset_key(use_hashed_state), use_hashed_state)
}
}
impl From<StorageSlotKey> for B256 {
fn from(key: StorageSlotKey) -> Self {
key.as_b256()
}
}
/// Account storage entry.
///
/// `key` is the subkey when used as a value in the `StorageChangeSets` table.
@@ -31,6 +142,14 @@ impl StorageEntry {
pub const fn new(key: B256, value: U256) -> Self {
Self { key, value }
}
/// Tag this entry's key as a [`StorageSlotKey`] based on the storage mode.
///
/// When `use_hashed_state` is true, the key is tagged as already-hashed.
/// When false, it is tagged as plain.
pub const fn slot_key(&self, use_hashed_state: bool) -> StorageSlotKey {
StorageSlotKey::from_raw(self.key, use_hashed_state)
}
}
impl ValueWithSubKey for StorageEntry {

View File

@@ -135,7 +135,7 @@ impl StorageHistory {
let (block_address, entry) = result?;
let block_number = block_address.block_number();
let address = block_address.address();
highest_deleted_storages.insert((address, entry.key), block_number);
highest_deleted_storages.insert((address, entry.key.as_b256()), block_number);
last_changeset_pruned_block = Some(block_number);
pruned_changesets += 1;
limiter.increment_deleted_entries_count();
@@ -273,7 +273,7 @@ impl StorageHistory {
let (block_address, entry) = result?;
let block_number = block_address.block_number();
let address = block_address.address();
highest_deleted_storages.insert((address, entry.key), block_number);
highest_deleted_storages.insert((address, entry.key.as_b256()), block_number);
last_changeset_pruned_block = Some(block_number);
changesets_processed += 1;
limiter.increment_deleted_entries_count();

View File

@@ -160,6 +160,14 @@ impl StateProvider for StateProviderTest {
) -> ProviderResult<Option<alloy_primitives::StorageValue>> {
Ok(self.accounts.get(&account).and_then(|(storage, _)| storage.get(&storage_key).copied()))
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<alloy_primitives::StorageValue>> {
Ok(None)
}
}
impl BytecodeReader for StateProviderTest {

View File

@@ -154,6 +154,14 @@ impl StateProvider for StateProviderTraitObjWrapper {
self.0.storage(account, storage_key)
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: alloy_primitives::StorageKey,
) -> reth_errors::ProviderResult<Option<alloy_primitives::StorageValue>> {
self.0.storage_by_hashed_key(address, hashed_storage_key)
}
fn account_code(
&self,
addr: &Address,

View File

@@ -22,6 +22,7 @@ use reth_stages_api::{
UnwindInput, UnwindOutput,
};
use reth_static_file_types::StaticFileSegment;
use reth_trie::KeccakKeyHasher;
use std::{
cmp::{max, Ordering},
collections::BTreeMap,
@@ -461,9 +462,16 @@ where
}
}
// write output
// Write output. When `use_hashed_state` is enabled, `write_state` skips writing to
// plain account/storage tables and only writes bytecodes and changesets. The hashed
// state is then written separately below.
provider.write_state(&state, OriginalValuesKnown::Yes, StateWriteConfig::default())?;
if provider.cached_storage_settings().use_hashed_state {
let hashed_state = state.hash_state_slow::<KeccakKeyHasher>();
provider.write_hashed_state(&hashed_state.into_sorted())?;
}
let db_write_duration = time.elapsed();
debug!(
target: "sync::stages::execution",

View File

@@ -9,7 +9,9 @@ use reth_db_api::{
};
use reth_etl::Collector;
use reth_primitives_traits::Account;
use reth_provider::{AccountExtReader, DBProvider, HashingWriter, StatsReader};
use reth_provider::{
AccountExtReader, DBProvider, HashingWriter, StatsReader, StorageSettingsCache,
};
use reth_stages_api::{
AccountHashingCheckpoint, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint,
StageError, StageId, UnwindInput, UnwindOutput,
@@ -134,7 +136,11 @@ impl Default for AccountHashingStage {
impl<Provider> Stage<Provider> for AccountHashingStage
where
Provider: DBProvider<Tx: DbTxMut> + HashingWriter + AccountExtReader + StatsReader,
Provider: DBProvider<Tx: DbTxMut>
+ HashingWriter
+ AccountExtReader
+ StatsReader
+ StorageSettingsCache,
{
/// Return the id of the stage
fn id(&self) -> StageId {
@@ -142,11 +148,21 @@ where
}
/// Execute the stage.
///
/// When `use_hashed_state` is enabled, this stage is a no-op because the execution stage
/// writes directly to `HashedAccounts`. Otherwise, it hashes plain state to populate hashed
/// tables.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
// If using hashed state as canonical, execution already writes to `HashedAccounts`,
// so this stage becomes a no-op.
if provider.cached_storage_settings().use_hashed_state {
return Ok(ExecOutput::done(input.checkpoint().with_block_number(input.target())));
}
let (from_block, to_block) = input.next_block_range().into_inner();
// if there are more blocks then threshold it is faster to go over Plain state and hash all
@@ -234,10 +250,14 @@ where
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
// NOTE: this runs in both v1 and v2 mode. In v2 mode, execution writes
// directly to `HashedAccounts`, but the unwind must still revert those
// entries here because `MerkleUnwind` runs after this stage (in unwind
// order) and needs `HashedAccounts` to reflect the target block state
// before it can verify the state root.
let (range, unwind_progress, _) =
input.unwind_block_range_with_threshold(self.commit_threshold);
// Aggregate all transition changesets and make a list of accounts that have been changed.
provider.unwind_account_hashing_range(range)?;
let mut stage_checkpoint =

View File

@@ -15,6 +15,7 @@ use reth_stages_api::{
EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId,
StorageHashingCheckpoint, UnwindInput, UnwindOutput,
};
use reth_storage_api::StorageSettingsCache;
use reth_storage_errors::provider::ProviderResult;
use std::{
fmt::Debug,
@@ -68,7 +69,11 @@ impl Default for StorageHashingStage {
impl<Provider> Stage<Provider> for StorageHashingStage
where
Provider: DBProvider<Tx: DbTxMut> + StorageReader + HashingWriter + StatsReader,
Provider: DBProvider<Tx: DbTxMut>
+ StorageReader
+ HashingWriter
+ StatsReader
+ StorageSettingsCache,
{
/// Return the id of the stage
fn id(&self) -> StageId {
@@ -82,6 +87,12 @@ where
return Ok(ExecOutput::done(input.checkpoint()))
}
// If use_hashed_state is enabled, execution writes directly to `HashedStorages`,
// so this stage becomes a no-op.
if provider.cached_storage_settings().use_hashed_state {
return Ok(ExecOutput::done(input.checkpoint().with_block_number(input.target())));
}
let (from_block, to_block) = input.next_block_range().into_inner();
// if there are more blocks then threshold it is faster to go over Plain state and hash all
@@ -176,6 +187,11 @@ where
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
// NOTE: this runs in both v1 and v2 mode. In v2 mode, execution writes
// directly to `HashedStorages`, but the unwind must still revert those
// entries here because `MerkleUnwind` runs after this stage (in unwind
// order) and needs `HashedStorages` to reflect the target block state
// before it can verify the state root.
let (range, unwind_progress, _) =
input.unwind_block_range_with_threshold(self.commit_threshold);

View File

@@ -9,7 +9,7 @@ use reth_db_api::{
use reth_primitives_traits::{GotExpected, SealedHeader};
use reth_provider::{
ChangeSetReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader,
StageCheckpointWriter, StatsReader, StorageChangeSetReader, TrieWriter,
StageCheckpointWriter, StatsReader, StorageChangeSetReader, StorageSettingsCache, TrieWriter,
};
use reth_stages_api::{
BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage,
@@ -160,6 +160,7 @@ where
+ HeaderProvider
+ ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ StageCheckpointReader
+ StageCheckpointWriter,
{

View File

@@ -208,7 +208,10 @@ where
for (idx, changeset_result) in walker.enumerate() {
let (BlockNumberAddress((block_number, address)), storage) = changeset_result?;
cache.entry(AddressStorageKey((address, storage.key))).or_default().push(block_number);
cache
.entry(AddressStorageKey((address, storage.key.as_b256())))
.or_default()
.push(block_number);
if idx > 0 && idx % interval == 0 && total_changesets > 1000 {
info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices");

View File

@@ -3,7 +3,7 @@
use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip1559, TxReceipt};
use alloy_eips::eip1559::INITIAL_BASE_FEE;
use alloy_genesis::{Genesis, GenesisAccount};
use alloy_primitives::{bytes, Address, Bytes, TxKind, B256, U256};
use alloy_primitives::{bytes, keccak256, Address, Bytes, TxKind, B256, U256};
use reth_chainspec::{ChainSpecBuilder, ChainSpecProvider, MAINNET};
use reth_config::config::StageConfig;
use reth_consensus::noop::NoopConsensus;
@@ -36,7 +36,7 @@ use reth_stages::sets::DefaultStages;
use reth_stages_api::{Pipeline, StageId};
use reth_static_file::StaticFileProducer;
use reth_storage_api::{
ChangeSetReader, StateProvider, StorageChangeSetReader, StorageSettingsCache,
ChangeSetReader, StateProvider, StorageChangeSetReader, StorageSettings, StorageSettingsCache,
};
use reth_testing_utils::generators::{self, generate_key, sign_tx_with_key_pair};
use reth_trie::{HashedPostState, KeccakKeyHasher, StateRoot};
@@ -89,6 +89,11 @@ fn assert_changesets_queryable(
"storage changesets should be queryable from static files for blocks {:?}",
block_range
);
// Verify keys are in hashed format (v2 mode)
for (_, entry) in &storage_changesets {
assert!(entry.key.is_hashed(), "v2: storage changeset keys should be tagged as hashed");
}
} else {
let storage_changesets: Vec<_> = provider
.tx_ref()
@@ -100,6 +105,16 @@ fn assert_changesets_queryable(
"storage changesets should be queryable from MDBX for blocks {:?}",
block_range
);
// Verify keys are plain (not hashed) in v1 mode
for (_, entry) in &storage_changesets {
let key = entry.key;
assert_ne!(
key,
keccak256(key),
"v1: storage changeset key should be plain (not its own keccak256)"
);
}
}
// Verify account changesets
@@ -201,19 +216,22 @@ where
pipeline
}
/// Tests pipeline with ALL stages enabled using both ETH transfers and contract storage changes.
/// Shared helper for pipeline forward sync and unwind tests.
///
/// This test:
/// 1. Pre-funds a signer account and deploys a Counter contract in genesis
/// 2. Each block contains two transactions:
/// - ETH transfer to a recipient (account state changes)
/// - Counter `increment()` call (storage state changes)
/// 3. Runs the full pipeline with ALL stages enabled
/// 4. Forward syncs to block 5, unwinds to block 2
/// 4. Forward syncs to `num_blocks`, unwinds to `unwind_target`, then re-syncs back to `num_blocks`
///
/// This exercises both account and storage hashing/history stages.
#[tokio::test(flavor = "multi_thread")]
async fn test_pipeline() -> eyre::Result<()> {
/// When `storage_settings` is `Some`, the pipeline provider factory is configured with the given
/// settings before genesis initialization (e.g. v2 storage mode).
async fn run_pipeline_forward_and_unwind(
storage_settings: Option<StorageSettings>,
num_blocks: u64,
unwind_target: u64,
) -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// Generate a keypair for signing transactions
@@ -259,7 +277,6 @@ async fn test_pipeline() -> eyre::Result<()> {
let evm_config = EthEvmConfig::new(chain_spec.clone());
// Build blocks by actually executing transactions to get correct state roots
let num_blocks = 5u64;
let mut blocks: Vec<SealedBlock<Block>> = Vec::new();
let mut parent_hash = genesis.hash();
@@ -384,11 +401,15 @@ async fn test_pipeline() -> eyre::Result<()> {
// This is needed because we wrote state during block generation for computing state roots
let pipeline_provider_factory =
create_test_provider_factory_with_chain_spec(chain_spec.clone());
if let Some(settings) = storage_settings {
pipeline_provider_factory.set_storage_settings_cache(settings);
}
init_genesis(&pipeline_provider_factory).expect("init genesis");
let pipeline_genesis =
pipeline_provider_factory.sealed_header(0)?.expect("genesis should exist");
let pipeline_consensus = NoopConsensus::arc();
let blocks_clone = blocks.clone();
let file_client = create_file_client_from_blocks(blocks);
let max_block = file_client.max_block().unwrap();
let tip = file_client.tip().expect("tip");
@@ -417,7 +438,7 @@ async fn test_pipeline() -> eyre::Result<()> {
{
let provider = pipeline_provider_factory.provider()?;
let last_block = provider.last_block_number()?;
assert_eq!(last_block, 5, "should have synced 5 blocks");
assert_eq!(last_block, num_blocks, "should have synced {num_blocks} blocks");
for stage_id in [
StageId::Headers,
@@ -435,29 +456,28 @@ async fn test_pipeline() -> eyre::Result<()> {
let checkpoint = provider.get_stage_checkpoint(stage_id)?;
assert_eq!(
checkpoint.map(|c| c.block_number),
Some(5),
"{stage_id} checkpoint should be at block 5"
Some(num_blocks),
"{stage_id} checkpoint should be at block {num_blocks}"
);
}
// Verify the counter contract's storage was updated
// After 5 blocks with 1 increment each, slot 0 should be 5
// After num_blocks blocks with 1 increment each, slot 0 should be num_blocks
let state = provider.latest();
let counter_storage = state.storage(CONTRACT_ADDRESS, B256::ZERO)?;
assert_eq!(
counter_storage,
Some(U256::from(5)),
"Counter storage slot 0 should be 5 after 5 increments"
Some(U256::from(num_blocks)),
"Counter storage slot 0 should be {num_blocks} after {num_blocks} increments"
);
}
// Verify changesets are queryable before unwind
// This validates that the #21561 fix works - unwind needs to read changesets from the correct
// source
assert_changesets_queryable(&pipeline_provider_factory, 1..=5)?;
assert_changesets_queryable(&pipeline_provider_factory, 1..=num_blocks)?;
// Unwind to block 2
let unwind_target = 2u64;
// Unwind to unwind_target
pipeline.unwind(unwind_target, None)?;
// Verify unwind
@@ -484,7 +504,114 @@ async fn test_pipeline() -> eyre::Result<()> {
);
}
}
let state = provider.latest();
let counter_storage = state.storage(CONTRACT_ADDRESS, B256::ZERO)?;
assert_eq!(
counter_storage,
Some(U256::from(unwind_target)),
"Counter storage slot 0 should be {unwind_target} after unwinding to block {unwind_target}"
);
}
// Re-sync: build a new pipeline starting from unwind_target and sync back to num_blocks
let resync_file_client = create_file_client_from_blocks(blocks_clone);
let resync_consensus = NoopConsensus::arc();
let resync_stages_config = StageConfig::default();
let unwind_head = pipeline_provider_factory
.sealed_header(unwind_target)?
.expect("unwind target header should exist");
let mut resync_header_downloader =
ReverseHeadersDownloaderBuilder::new(resync_stages_config.headers)
.build(resync_file_client.clone(), resync_consensus.clone())
.into_task();
resync_header_downloader.update_local_head(unwind_head);
resync_header_downloader.update_sync_target(SyncTarget::Tip(tip));
let mut resync_body_downloader = BodiesDownloaderBuilder::new(resync_stages_config.bodies)
.build(resync_file_client, resync_consensus, pipeline_provider_factory.clone())
.into_task();
resync_body_downloader
.set_download_range(unwind_target + 1..=max_block)
.expect("set download range");
let resync_pipeline = build_pipeline(
pipeline_provider_factory.clone(),
resync_header_downloader,
resync_body_downloader,
max_block,
tip,
);
let (_resync_pipeline, resync_result) = resync_pipeline.run_as_fut(None).await;
resync_result?;
// Verify re-sync
{
let provider = pipeline_provider_factory.provider()?;
let last_block = provider.last_block_number()?;
assert_eq!(last_block, num_blocks, "should have re-synced to {num_blocks} blocks");
for stage_id in [
StageId::Headers,
StageId::Bodies,
StageId::SenderRecovery,
StageId::Execution,
StageId::AccountHashing,
StageId::StorageHashing,
StageId::MerkleExecute,
StageId::TransactionLookup,
StageId::IndexAccountHistory,
StageId::IndexStorageHistory,
StageId::Finish,
] {
let checkpoint = provider.get_stage_checkpoint(stage_id)?;
assert_eq!(
checkpoint.map(|c| c.block_number),
Some(num_blocks),
"{stage_id} checkpoint should be at block {num_blocks} after re-sync"
);
}
let state = provider.latest();
let counter_storage = state.storage(CONTRACT_ADDRESS, B256::ZERO)?;
assert_eq!(
counter_storage,
Some(U256::from(num_blocks)),
"Counter storage slot 0 should be {num_blocks} after re-sync"
);
}
Ok(())
}
/// Tests pipeline with ALL stages enabled using both ETH transfers and contract storage changes.
///
/// This test:
/// 1. Pre-funds a signer account and deploys a Counter contract in genesis
/// 2. Each block contains two transactions:
/// - ETH transfer to a recipient (account state changes)
/// - Counter `increment()` call (storage state changes)
/// 3. Runs the full pipeline with ALL stages enabled
/// 4. Forward syncs to block 5, unwinds to block 2, then re-syncs to block 5
///
/// This exercises both account and storage hashing/history stages.
#[tokio::test(flavor = "multi_thread")]
async fn test_pipeline() -> eyre::Result<()> {
run_pipeline_forward_and_unwind(None, 5, 2).await
}
/// Same as [`test_pipeline`] but runs with v2 storage settings (`use_hashed_state=true`,
/// `storage_changesets_in_static_files=true`, etc.).
///
/// In v2 mode:
/// - The execution stage writes directly to `HashedAccounts`/`HashedStorages`
/// - `AccountHashingStage` and `StorageHashingStage` are no-ops during forward execution
/// - Changesets are stored in static files with pre-hashed storage keys
/// - Unwind must still revert hashed state via the hashing stages before `MerkleUnwind` validates
#[tokio::test(flavor = "multi_thread")]
async fn test_pipeline_v2() -> eyre::Result<()> {
run_pipeline_forward_and_unwind(Some(StorageSettings::v2()), 5, 2).await
}

View File

@@ -43,11 +43,19 @@ pub struct StorageSettings {
impl StorageSettings {
/// Returns the default base `StorageSettings`.
///
/// Always returns [`Self::v1()`]. Use the `--storage.v2` CLI flag to opt into
/// [`Self::v2()`] at runtime. The `rocksdb` feature only makes the v2 backend
/// *available*; it does not activate it by default.
/// When the `edge` feature is enabled, returns [`Self::v2()`] so that CI and
/// edge builds automatically use v2 storage defaults. Otherwise returns
/// [`Self::v1()`]. The `--storage.v2` CLI flag can also opt into v2 at runtime
/// regardless of feature flags.
pub const fn base() -> Self {
Self::v1()
#[cfg(feature = "edge")]
{
Self::v2()
}
#[cfg(not(feature = "edge"))]
{
Self::v1()
}
}
/// Creates `StorageSettings` for v2 nodes with all storage features enabled:
@@ -65,7 +73,7 @@ impl StorageSettings {
storages_history_in_rocksdb: true,
transaction_hash_numbers_in_rocksdb: true,
account_history_in_rocksdb: true,
use_hashed_state: false,
use_hashed_state: true,
}
}

View File

@@ -5,8 +5,7 @@ use crate::ProviderResult;
use alloy_primitives::BlockNumber;
use reth_db::models::AccountBeforeTx;
use reth_db_api::models::BlockNumberAddress;
use reth_primitives_traits::StorageEntry;
use reth_storage_api::{ChangeSetReader, StorageChangeSetReader};
use reth_storage_api::{ChangeSetReader, ChangesetEntry, StorageChangeSetReader};
use std::ops::{Bound, RangeBounds};
/// Iterator that walks account changesets from static files in a block range.
@@ -110,7 +109,7 @@ pub struct StaticFileStorageChangesetWalker<P> {
/// Current block being processed
current_block: BlockNumber,
/// Changesets for current block
current_changesets: Vec<(BlockNumberAddress, StorageEntry)>,
current_changesets: Vec<(BlockNumberAddress, ChangesetEntry)>,
/// Index within current block's changesets
changeset_index: usize,
}
@@ -144,7 +143,7 @@ impl<P> Iterator for StaticFileStorageChangesetWalker<P>
where
P: StorageChangeSetReader,
{
type Item = ProviderResult<(BlockNumberAddress, StorageEntry)>;
type Item = ProviderResult<(BlockNumberAddress, ChangesetEntry)>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(changeset) = self.current_changesets.get(self.changeset_index).copied() {

View File

@@ -23,11 +23,13 @@ use reth_chainspec::ChainInfo;
use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices};
use reth_execution_types::ExecutionOutcome;
use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy};
use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry};
use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader};
use reth_storage_api::{
BlockBodyIndicesProvider, ChangesetEntry, NodePrimitivesProvider, StorageChangeSetReader,
};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{HashedPostState, KeccakKeyHasher};
use revm_database::BundleState;
@@ -713,7 +715,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for BlockchainProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
self.consistent_provider()?.storage_changeset(block_number)
}
@@ -722,14 +724,14 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for BlockchainProvider<N> {
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
) -> ProviderResult<Option<ChangesetEntry>> {
self.consistent_provider()?.get_storage_before_block(block_number, address, storage_key)
}
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
self.consistent_provider()?.storage_changesets_range(range)
}

View File

@@ -21,13 +21,16 @@ use reth_chainspec::ChainInfo;
use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices};
use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit};
use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy};
use reth_primitives_traits::{Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry};
use reth_primitives_traits::{
Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry, StorageSlotKey,
};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{
BlockBodyIndicesProvider, DatabaseProviderFactory, NodePrimitivesProvider, StateProvider,
StateProviderBox, StorageChangeSetReader, TryIntoHistoricalStateProvider,
BlockBodyIndicesProvider, ChangesetEntry, DatabaseProviderFactory, NodePrimitivesProvider,
StateProvider, StateProviderBox, StorageChangeSetReader, StorageSettingsCache,
TryIntoHistoricalStateProvider,
};
use reth_storage_errors::provider::ProviderResult;
use revm_database::states::PlainStorageRevert;
@@ -214,13 +217,16 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
)))
}
/// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the
/// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given
/// storage and account changesets.
/// Populate a [`BundleStateInit`] and [`RevertsInit`] based on the given storage and account
/// changesets.
///
/// When `use_hashed_state` is enabled, storage changeset keys are already hashed, so current
/// values are read directly from [`reth_db_api::tables::HashedStorages`]. Otherwise, values
/// are read via [`StateProvider::storage`] which queries plain state tables.
fn populate_bundle_state(
&self,
account_changeset: Vec<(u64, AccountBeforeTx)>,
storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>,
storage_changeset: Vec<(BlockNumberAddress, ChangesetEntry)>,
block_range_end: BlockNumber,
) -> ProviderResult<(BundleStateInit, RevertsInit)> {
let mut state: BundleStateInit = HashMap::default();
@@ -257,10 +263,16 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
};
// match storage.
match account_state.2.entry(old_storage.key) {
match account_state.2.entry(old_storage.key.as_b256()) {
hash_map::Entry::Vacant(entry) => {
let new_storage_value =
state_provider.storage(address, old_storage.key)?.unwrap_or_default();
let new_storage_value = match old_storage.key {
StorageSlotKey::Hashed(_) => state_provider
.storage_by_hashed_key(address, old_storage.key.as_b256())?
.unwrap_or_default(),
StorageSlotKey::Plain(_) => state_provider
.storage(address, old_storage.key.as_b256())?
.unwrap_or_default(),
};
entry.insert((old_storage.value, new_storage_value));
}
hash_map::Entry::Occupied(mut entry) => {
@@ -274,7 +286,7 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
.entry(address)
.or_default()
.1
.push(old_storage);
.push(StorageEntry::from(old_storage));
}
Ok((state, reverts))
@@ -1300,7 +1312,8 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state;
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
@@ -1316,9 +1329,10 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
.flatten()
.flat_map(|revert: PlainStorageRevert| {
revert.storage_revert.into_iter().map(move |(key, value)| {
let tagged_key = StorageSlotKey::from_u256(key).to_changeset(use_hashed);
(
BlockNumberAddress((block_number, revert.address)),
StorageEntry { key: key.into(), value: value.to_previous_value() },
ChangesetEntry { key: tagged_key, value: value.to_previous_value() },
)
})
})
@@ -1353,7 +1367,8 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
) -> ProviderResult<Option<ChangesetEntry>> {
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state;
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
@@ -1372,9 +1387,11 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
return None
}
revert.storage_revert.into_iter().find_map(|(key, value)| {
let key = key.into();
(key == storage_key)
.then(|| StorageEntry { key, value: value.to_previous_value() })
let tagged_key = StorageSlotKey::from_u256(key).to_changeset(use_hashed);
(tagged_key.as_b256() == storage_key).then(|| ChangesetEntry {
key: tagged_key,
value: value.to_previous_value(),
})
})
});
Ok(changeset)
@@ -1398,12 +1415,14 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
let range = to_range(range);
let mut changesets = Vec::new();
let database_start = range.start;
let mut database_end = range.end;
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state;
if let Some(head_block) = &self.head_block {
database_end = head_block.anchor().number;
@@ -1421,9 +1440,14 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
.flatten()
.flat_map(|revert: PlainStorageRevert| {
revert.storage_revert.into_iter().map(move |(key, value)| {
let tagged_key =
StorageSlotKey::from_u256(key).to_changeset(use_hashed);
(
BlockNumberAddress((state.number(), revert.address)),
StorageEntry { key: key.into(), value: value.to_previous_value() },
ChangesetEntry {
key: tagged_key,
value: value.to_previous_value(),
},
)
})
});
@@ -2060,4 +2084,648 @@ mod tests {
Ok(())
}
#[test]
fn test_get_state_storage_value_hashed_state() -> eyre::Result<()> {
use alloy_primitives::{keccak256, U256};
use reth_db_api::{models::StorageSettings, tables, transaction::DbTxMut};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use std::collections::HashMap;
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let slot_b256 = B256::from(slot);
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot_b256);
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let blocks = random_block_range(
&mut rng,
0..=1,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
[
Vec::new(),
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.tx_ref().put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(100) },
)?;
provider_rw.tx_ref().put::<tables::HashedAccounts>(hashed_address, account)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let consistent_provider = provider.consistent_provider()?;
let outcome =
consistent_provider.get_state(1..=1)?.expect("should return execution outcome");
let state = &outcome.bundle.state;
let account_state = state.get(&address).expect("should have account in bundle state");
let storage = &account_state.storage;
let slot_as_u256 = U256::from_be_bytes(*hashed_slot);
let storage_slot = storage.get(&slot_as_u256).expect("should have the slot in storage");
assert_eq!(
storage_slot.present_value,
U256::from(100),
"present_value should be 100 (the actual value in HashedStorages)"
);
Ok(())
}
#[test]
#[cfg(all(unix, feature = "rocksdb"))]
fn test_get_state_storage_value_hashed_state_historical() -> eyre::Result<()> {
use alloy_primitives::{keccak256, U256};
use reth_db_api::{models::StorageSettings, tables, transaction::DbTxMut};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use std::collections::HashMap;
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let slot_b256 = B256::from(slot);
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot_b256);
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let blocks = random_block_range(
&mut rng,
0..=3,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(300)));
s
})],
[
Vec::new(),
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
vec![(address, Some(Some(account.into())), vec![(slot, U256::from(100))])],
vec![(address, Some(Some(account.into())), vec![(slot, U256::from(200))])],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.tx_ref().put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(300) },
)?;
provider_rw.tx_ref().put::<tables::HashedAccounts>(hashed_address, account)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let consistent_provider = provider.consistent_provider()?;
let outcome =
consistent_provider.get_state(1..=2)?.expect("should return execution outcome");
let state = &outcome.bundle.state;
let account_state = state.get(&address).expect("should have account in bundle state");
let storage = &account_state.storage;
let slot_as_u256 = U256::from_be_bytes(*hashed_slot);
let storage_slot = storage.get(&slot_as_u256).expect("should have the slot in storage");
assert_eq!(
storage_slot.present_value,
U256::from(200),
"present_value should be 200 (the value at block 2, not 300 which is the latest)"
);
Ok(())
}
#[test]
fn test_get_state_storage_value_plain_state() -> eyre::Result<()> {
use alloy_primitives::U256;
use reth_db_api::{models::StorageSettings, tables, transaction::DbTxMut};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use std::collections::HashMap;
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let slot_b256 = B256::from(slot);
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v1());
let blocks = random_block_range(
&mut rng,
0..=1,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
[
Vec::new(),
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.tx_ref().put::<tables::PlainStorageState>(
address,
StorageEntry { key: slot_b256, value: U256::from(100) },
)?;
provider_rw.tx_ref().put::<tables::PlainAccountState>(address, account)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let consistent_provider = provider.consistent_provider()?;
let outcome =
consistent_provider.get_state(1..=1)?.expect("should return execution outcome");
let state = &outcome.bundle.state;
let account_state = state.get(&address).expect("should have account in bundle state");
let storage = &account_state.storage;
let storage_slot = storage.get(&slot).expect("should have the slot in storage");
assert_eq!(
storage_slot.present_value,
U256::from(100),
"present_value should be 100 (the actual value in PlainStorageState)"
);
Ok(())
}
#[test]
fn test_storage_changeset_consistent_keys_hashed_state() -> eyre::Result<()> {
use alloy_primitives::{keccak256, U256};
use reth_db_api::models::StorageSettings;
use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache};
use std::collections::HashMap;
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let (database_blocks, in_memory_blocks) = random_blocks(&mut rng, 1, 1, None, None, 0..1);
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
database_blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])]],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let in_mem_block = in_memory_blocks.first().unwrap();
let senders = in_mem_block.senders().expect("failed to recover senders");
let chain = NewCanonicalChain::Commit {
new: vec![ExecutedBlock {
recovered_block: Arc::new(RecoveredBlock::new_sealed(
in_mem_block.clone(),
senders,
)),
execution_output: Arc::new(BlockExecutionOutput {
state: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::from(100), U256::from(200)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::from(100))])]],
[],
),
result: BlockExecutionResult {
receipts: Default::default(),
requests: Default::default(),
gas_used: 0,
blob_gas_used: 0,
},
}),
..Default::default()
}],
};
provider.canonical_in_memory_state.update_chain(chain);
let consistent_provider = provider.consistent_provider()?;
let db_changeset = consistent_provider.storage_changeset(0)?;
let mem_changeset = consistent_provider.storage_changeset(1)?;
let slot_b256 = B256::from(slot);
let _hashed_slot_b256 = keccak256(slot_b256);
assert_eq!(db_changeset.len(), 1);
assert_eq!(mem_changeset.len(), 1);
let db_key = db_changeset[0].1.key;
let mem_key = mem_changeset[0].1.key;
assert_eq!(
db_key, mem_key,
"DB and in-memory changesets should return the same key format (hashed) for the same logical slot"
);
Ok(())
}
#[test]
fn test_storage_changeset_consistent_keys_plain_state() -> eyre::Result<()> {
use alloy_primitives::U256;
use reth_db_api::models::StorageSettings;
use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache};
use std::collections::HashMap;
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v1());
let (database_blocks, in_memory_blocks) = random_blocks(&mut rng, 1, 1, None, None, 0..1);
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
database_blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])]],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let in_mem_block = in_memory_blocks.first().unwrap();
let senders = in_mem_block.senders().expect("failed to recover senders");
let chain = NewCanonicalChain::Commit {
new: vec![ExecutedBlock {
recovered_block: Arc::new(RecoveredBlock::new_sealed(
in_mem_block.clone(),
senders,
)),
execution_output: Arc::new(BlockExecutionOutput {
state: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::from(100), U256::from(200)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::from(100))])]],
[],
),
result: BlockExecutionResult {
receipts: Default::default(),
requests: Default::default(),
gas_used: 0,
blob_gas_used: 0,
},
}),
..Default::default()
}],
};
provider.canonical_in_memory_state.update_chain(chain);
let consistent_provider = provider.consistent_provider()?;
let db_changeset = consistent_provider.storage_changeset(0)?;
let mem_changeset = consistent_provider.storage_changeset(1)?;
let slot_b256 = B256::from(slot);
assert_eq!(db_changeset.len(), 1);
assert_eq!(mem_changeset.len(), 1);
let db_key = db_changeset[0].1.key.as_b256();
let mem_key = mem_changeset[0].1.key.as_b256();
assert_eq!(db_key, slot_b256, "DB changeset should use plain (unhashed) key");
assert_eq!(mem_key, slot_b256, "In-memory changeset should use plain (unhashed) key");
assert_eq!(
db_key, mem_key,
"DB and in-memory changesets should return the same key format (plain) for the same logical slot"
);
Ok(())
}
#[test]
fn test_storage_changesets_range_consistent_keys_hashed_state() -> eyre::Result<()> {
use alloy_primitives::U256;
use reth_db_api::models::StorageSettings;
use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache};
use std::collections::HashMap;
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let (database_blocks, in_memory_blocks) = random_blocks(&mut rng, 2, 1, None, None, 0..1);
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
database_blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
vec![
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
vec![],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let in_mem_block = in_memory_blocks.first().unwrap();
let senders = in_mem_block.senders().expect("failed to recover senders");
let chain = NewCanonicalChain::Commit {
new: vec![ExecutedBlock {
recovered_block: Arc::new(RecoveredBlock::new_sealed(
in_mem_block.clone(),
senders,
)),
execution_output: Arc::new(BlockExecutionOutput {
state: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::from(100), U256::from(200)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::from(100))])]],
[],
),
result: BlockExecutionResult {
receipts: Default::default(),
requests: Default::default(),
gas_used: 0,
blob_gas_used: 0,
},
}),
..Default::default()
}],
};
provider.canonical_in_memory_state.update_chain(chain);
let consistent_provider = provider.consistent_provider()?;
let all_changesets = consistent_provider.storage_changesets_range(0..=2)?;
assert_eq!(all_changesets.len(), 2, "should have one changeset entry per block");
let keys: Vec<B256> = all_changesets.iter().map(|(_, entry)| entry.key.as_b256()).collect();
assert_eq!(
keys[0], keys[1],
"same logical slot should produce identical keys whether from DB or memory"
);
Ok(())
}
#[test]
fn test_storage_changesets_range_consistent_keys_plain_state() -> eyre::Result<()> {
use alloy_primitives::U256;
use reth_db_api::models::StorageSettings;
use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache};
use std::collections::HashMap;
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v1());
let (database_blocks, in_memory_blocks) = random_blocks(&mut rng, 2, 1, None, None, 0..1);
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
database_blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
vec![
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
vec![],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let in_mem_block = in_memory_blocks.first().unwrap();
let senders = in_mem_block.senders().expect("failed to recover senders");
let chain = NewCanonicalChain::Commit {
new: vec![ExecutedBlock {
recovered_block: Arc::new(RecoveredBlock::new_sealed(
in_mem_block.clone(),
senders,
)),
execution_output: Arc::new(BlockExecutionOutput {
state: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::from(100), U256::from(200)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::from(100))])]],
[],
),
result: BlockExecutionResult {
receipts: Default::default(),
requests: Default::default(),
gas_used: 0,
blob_gas_used: 0,
},
}),
..Default::default()
}],
};
provider.canonical_in_memory_state.update_chain(chain);
let consistent_provider = provider.consistent_provider()?;
let all_changesets = consistent_provider.storage_changesets_range(0..=2)?;
assert_eq!(all_changesets.len(), 2, "should have one changeset entry per block");
let slot_b256 = B256::from(slot);
let keys: Vec<B256> = all_changesets.iter().map(|(_, entry)| entry.key.as_b256()).collect();
assert_eq!(
keys[0], keys[1],
"same logical slot should produce identical keys whether from DB or memory"
);
assert_eq!(
keys[0], slot_b256,
"keys should be plain/unhashed when use_hashed_state is false"
);
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -317,7 +317,10 @@ impl RocksDBProvider {
let unique_keys: HashSet<_> = changesets
.into_iter()
.map(|(block_addr, entry)| (block_addr.address(), entry.key, checkpoint + 1))
.map(|(block_addr, entry)| {
// entry.key is a hashed storage key
(block_addr.address(), entry.key.as_b256(), checkpoint + 1)
})
.collect();
let indices: Vec<_> = unique_keys.into_iter().collect();

View File

@@ -2,6 +2,7 @@ use super::metrics::{RocksDBMetrics, RocksDBOperation, ROCKSDB_TABLES};
use crate::providers::{compute_history_rank, needs_prev_shard_check, HistoryInfo};
use alloy_consensus::transaction::TxHashRef;
use alloy_primitives::{
keccak256,
map::{AddressMap, HashMap},
Address, BlockNumber, TxNumber, B256,
};
@@ -1336,7 +1337,8 @@ impl RocksDBProvider {
for storage_block_reverts in reverts.storage {
for revert in storage_block_reverts {
for (slot, _) in revert.storage_revert {
let key = B256::new(slot.to_be_bytes());
let plain_key = B256::new(slot.to_be_bytes());
let key = keccak256(plain_key);
storage_history
.entry((revert.address, key))
.or_default()

View File

@@ -11,7 +11,7 @@ use reth_db_api::{
transaction::DbTx,
BlockNumberList,
};
use reth_primitives_traits::{Account, Bytecode};
use reth_primitives_traits::{Account, Bytecode, StorageSlotKey};
use reth_storage_api::{
BlockNumReader, BytecodeReader, DBProvider, NodePrimitivesProvider, StateProofProvider,
StorageChangeSetReader, StorageRootProvider, StorageSettingsCache,
@@ -26,8 +26,8 @@ use reth_trie::{
TrieInputSorted,
};
use reth_trie_db::{
hashed_storage_from_reverts_with_provider, DatabaseHashedPostState, DatabaseProof,
DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness,
hashed_storage_from_reverts_with_provider, DatabaseProof, DatabaseStateRoot,
DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness,
};
use std::fmt::Debug;
@@ -150,7 +150,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
pub fn storage_history_lookup(
&self,
address: Address,
storage_key: StorageKey,
storage_key: StorageSlotKey,
) -> ProviderResult<HistoryInfo>
where
Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider,
@@ -159,17 +159,85 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
return Err(ProviderError::StateAtBlockPruned(self.block_number))
}
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state {
storage_key.to_hashed()
} else {
debug_assert!(
storage_key.is_plain(),
"expected plain storage key when use_hashed_state is false"
);
storage_key.as_b256()
};
self.provider.with_rocksdb_tx(|rocks_tx_ref| {
let mut reader = EitherReader::new_storages_history(self.provider, rocks_tx_ref)?;
reader.storage_history_info(
address,
storage_key,
lookup_key,
self.block_number,
self.lowest_available_blocks.storage_history_block_number,
)
})
}
/// Resolves a storage value by looking up the given key in history, changesets, or
/// plain state.
///
/// Accepts a [`StorageSlotKey`]; the correct lookup key is derived internally
/// based on the storage mode.
fn storage_by_lookup_key(
&self,
address: Address,
storage_key: StorageSlotKey,
) -> ProviderResult<Option<StorageValue>>
where
Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider,
{
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state {
storage_key.to_hashed()
} else {
debug_assert!(
storage_key.is_plain(),
"expected plain storage key when use_hashed_state is false"
);
storage_key.as_b256()
};
match self.storage_history_lookup(address, storage_key)? {
HistoryInfo::NotYetWritten => Ok(None),
HistoryInfo::InChangeset(changeset_block_number) => self
.provider
.get_storage_before_block(changeset_block_number, address, lookup_key)?
.ok_or_else(|| ProviderError::StorageChangesetNotFound {
block_number: changeset_block_number,
address,
storage_key: Box::new(lookup_key),
})
.map(|entry| entry.value)
.map(Some),
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => {
if self.provider.cached_storage_settings().use_hashed_state {
let hashed_address = alloy_primitives::keccak256(address);
Ok(self
.tx()
.cursor_dup_read::<tables::HashedStorages>()?
.seek_by_key_subkey(hashed_address, lookup_key)?
.filter(|entry| entry.key == lookup_key)
.map(|entry| entry.value)
.or(Some(StorageValue::ZERO)))
} else {
Ok(self
.tx()
.cursor_dup_read::<tables::PlainStorageState>()?
.seek_by_key_subkey(address, lookup_key)?
.filter(|entry| entry.key == lookup_key)
.map(|entry| entry.value)
.or(Some(StorageValue::ZERO)))
}
}
}
}
/// Checks and returns `true` if distance to historical block exceeds the provided limit.
fn check_distance_against_limit(&self, limit: u64) -> ProviderResult<bool> {
let tip = self.provider.last_block_number()?;
@@ -178,7 +246,10 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
}
/// Retrieve revert hashed state for this history provider.
fn revert_state(&self) -> ProviderResult<HashedPostStateSorted> {
fn revert_state(&self) -> ProviderResult<HashedPostStateSorted>
where
Provider: StorageSettingsCache,
{
if !self.lowest_available_blocks.is_account_history_available(self.block_number) ||
!self.lowest_available_blocks.is_storage_history_available(self.block_number)
{
@@ -193,11 +264,14 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
);
}
HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(self.provider, self.block_number..)
reth_trie_db::from_reverts_auto(self.provider, self.block_number..)
}
/// Retrieve revert hashed storage for this history provider and target address.
fn revert_storage(&self, address: Address) -> ProviderResult<HashedStorage> {
fn revert_storage(&self, address: Address) -> ProviderResult<HashedStorage>
where
Provider: StorageSettingsCache,
{
if !self.lowest_available_blocks.is_storage_history_available(self.block_number) {
return Err(ProviderError::StateAtBlockPruned(self.block_number))
}
@@ -263,7 +337,12 @@ impl<
.map(|account_before| account_before.info)
}
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => {
Ok(self.tx().get_by_encoded_key::<tables::PlainAccountState>(address)?)
if self.provider.cached_storage_settings().use_hashed_state {
let hashed_address = alloy_primitives::keccak256(address);
Ok(self.tx().get_by_encoded_key::<tables::HashedAccounts>(&hashed_address)?)
} else {
Ok(self.tx().get_by_encoded_key::<tables::PlainAccountState>(address)?)
}
}
}
}
@@ -286,8 +365,13 @@ impl<Provider: DBProvider + BlockNumReader + BlockHashReader> BlockHashReader
}
}
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
StateRootProvider for HistoricalStateProviderRef<'_, Provider>
impl<
Provider: DBProvider
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader
+ StorageSettingsCache,
> StateRootProvider for HistoricalStateProviderRef<'_, Provider>
{
fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult<B256> {
let mut revert_state = self.revert_state()?;
@@ -323,8 +407,13 @@ impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumR
}
}
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
StorageRootProvider for HistoricalStateProviderRef<'_, Provider>
impl<
Provider: DBProvider
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader
+ StorageSettingsCache,
> StorageRootProvider for HistoricalStateProviderRef<'_, Provider>
{
fn storage_root(
&self,
@@ -362,8 +451,13 @@ impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumR
}
}
impl<Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + BlockNumReader>
StateProofProvider for HistoricalStateProviderRef<'_, Provider>
impl<
Provider: DBProvider
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader
+ StorageSettingsCache,
> StateProofProvider for HistoricalStateProviderRef<'_, Provider>
{
/// Get account and storage proofs.
fn proof(
@@ -412,32 +506,24 @@ impl<
+ NodePrimitivesProvider,
> StateProvider for HistoricalStateProviderRef<'_, Provider>
{
/// Get storage.
/// Expects a plain (unhashed) storage key slot.
fn storage(
&self,
address: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
match self.storage_history_lookup(address, storage_key)? {
HistoryInfo::NotYetWritten => Ok(None),
HistoryInfo::InChangeset(changeset_block_number) => self
.provider
.get_storage_before_block(changeset_block_number, address, storage_key)?
.ok_or_else(|| ProviderError::StorageChangesetNotFound {
block_number: changeset_block_number,
address,
storage_key: Box::new(storage_key),
})
.map(|entry| entry.value)
.map(Some),
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self
.tx()
.cursor_dup_read::<tables::PlainStorageState>()?
.seek_by_key_subkey(address, storage_key)?
.filter(|entry| entry.key == storage_key)
.map(|entry| entry.value)
.or(Some(StorageValue::ZERO))),
self.storage_by_lookup_key(address, StorageSlotKey::plain(storage_key))
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if !self.provider.cached_storage_settings().use_hashed_state {
return Err(ProviderError::UnsupportedProvider)
}
self.storage_by_lookup_key(address, StorageSlotKey::hashed(hashed_storage_key))
}
}
@@ -630,7 +716,7 @@ mod tests {
transaction::{DbTx, DbTxMut},
BlockNumberList,
};
use reth_primitives_traits::{Account, StorageEntry};
use reth_primitives_traits::{Account, StorageEntry, StorageSlotKey};
use reth_storage_api::{
BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory,
NodePrimitivesProvider, StorageChangeSetReader, StorageSettingsCache,
@@ -885,7 +971,7 @@ mod tests {
Err(ProviderError::StateAtBlockPruned(number)) if number == provider.block_number
));
assert!(matches!(
provider.storage_history_lookup(ADDRESS, STORAGE),
provider.storage_history_lookup(ADDRESS, StorageSlotKey::plain(STORAGE)),
Err(ProviderError::StateAtBlockPruned(number)) if number == provider.block_number
));
@@ -904,7 +990,7 @@ mod tests {
Ok(HistoryInfo::MaybeInPlainState)
));
assert!(matches!(
provider.storage_history_lookup(ADDRESS, STORAGE),
provider.storage_history_lookup(ADDRESS, StorageSlotKey::plain(STORAGE)),
Ok(HistoryInfo::MaybeInPlainState)
));
@@ -923,7 +1009,7 @@ mod tests {
Ok(HistoryInfo::MaybeInPlainState)
));
assert!(matches!(
provider.storage_history_lookup(ADDRESS, STORAGE),
provider.storage_history_lookup(ADDRESS, StorageSlotKey::plain(STORAGE)),
Ok(HistoryInfo::MaybeInPlainState)
));
}
@@ -943,6 +1029,242 @@ mod tests {
assert_eq!(HistoryInfo::from_lookup(None, false, None), HistoryInfo::InPlainState);
}
#[test]
fn history_provider_get_storage_legacy() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
let tx = factory.provider_rw().unwrap().into_tx();
tx.put::<tables::StoragesHistory>(
StorageShardedKey {
address: ADDRESS,
sharded_key: ShardedKey { key: STORAGE, highest_block_number: 7 },
},
BlockNumberList::new([3, 7]).unwrap(),
)
.unwrap();
tx.put::<tables::StoragesHistory>(
StorageShardedKey {
address: ADDRESS,
sharded_key: ShardedKey { key: STORAGE, highest_block_number: u64::MAX },
},
BlockNumberList::new([10, 15]).unwrap(),
)
.unwrap();
tx.put::<tables::StoragesHistory>(
StorageShardedKey {
address: HIGHER_ADDRESS,
sharded_key: ShardedKey { key: STORAGE, highest_block_number: u64::MAX },
},
BlockNumberList::new([4]).unwrap(),
)
.unwrap();
let higher_entry_plain = StorageEntry { key: STORAGE, value: U256::from(1000) };
let higher_entry_at4 = StorageEntry { key: STORAGE, value: U256::from(0) };
let entry_plain = StorageEntry { key: STORAGE, value: U256::from(100) };
let entry_at15 = StorageEntry { key: STORAGE, value: U256::from(15) };
let entry_at10 = StorageEntry { key: STORAGE, value: U256::from(10) };
let entry_at7 = StorageEntry { key: STORAGE, value: U256::from(7) };
let entry_at3 = StorageEntry { key: STORAGE, value: U256::from(0) };
tx.put::<tables::StorageChangeSets>((3, ADDRESS).into(), entry_at3).unwrap();
tx.put::<tables::StorageChangeSets>((4, HIGHER_ADDRESS).into(), higher_entry_at4).unwrap();
tx.put::<tables::StorageChangeSets>((7, ADDRESS).into(), entry_at7).unwrap();
tx.put::<tables::StorageChangeSets>((10, ADDRESS).into(), entry_at10).unwrap();
tx.put::<tables::StorageChangeSets>((15, ADDRESS).into(), entry_at15).unwrap();
tx.put::<tables::PlainStorageState>(ADDRESS, entry_plain).unwrap();
tx.put::<tables::PlainStorageState>(HIGHER_ADDRESS, higher_entry_plain).unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
assert!(matches!(
HistoricalStateProviderRef::new(&db, 0).storage(ADDRESS, STORAGE),
Ok(None)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 3).storage(ADDRESS, STORAGE),
Ok(Some(U256::ZERO))
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 4).storage(ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == entry_at7.value
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 7).storage(ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == entry_at7.value
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 9).storage(ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == entry_at10.value
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 10).storage(ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == entry_at10.value
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 11).storage(ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == entry_at15.value
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 16).storage(ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == entry_plain.value
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 1).storage(HIGHER_ADDRESS, STORAGE),
Ok(None)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 1000).storage(HIGHER_ADDRESS, STORAGE),
Ok(Some(expected_value)) if expected_value == higher_entry_plain.value
));
}
#[test]
#[cfg(all(unix, feature = "rocksdb"))]
fn history_provider_get_storage_hashed_state() {
use crate::BlockWriter;
use alloy_primitives::keccak256;
use reth_db_api::models::StorageSettings;
use reth_execution_types::ExecutionOutcome;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use revm_database::BundleState;
use std::collections::HashMap;
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let slot = U256::from_be_bytes(*STORAGE);
let account: revm_state::AccountInfo =
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None }.into();
let higher_account: revm_state::AccountInfo =
Account { nonce: 1, balance: U256::from(2000), bytecode_hash: None }.into();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=15,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let mut addr_storage = HashMap::default();
addr_storage.insert(slot, (U256::ZERO, U256::from(100)));
let mut higher_storage = HashMap::default();
higher_storage.insert(slot, (U256::ZERO, U256::from(1000)));
type Revert = Vec<(Address, Option<Option<revm_state::AccountInfo>>, Vec<(U256, U256)>)>;
let mut reverts: Vec<Revert> = vec![Vec::new(); 16];
reverts[3] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::ZERO)])];
reverts[4] =
vec![(HIGHER_ADDRESS, Some(Some(higher_account.clone())), vec![(slot, U256::ZERO)])];
reverts[7] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::from(7))])];
reverts[10] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::from(10))])];
reverts[15] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::from(15))])];
let bundle = BundleState::new(
[
(ADDRESS, None, Some(account), addr_storage),
(HIGHER_ADDRESS, None, Some(higher_account), higher_storage),
],
reverts,
[],
);
let provider_rw = factory.provider_rw().unwrap();
provider_rw
.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome { bundle, first_block: 0, ..Default::default() },
Default::default(),
)
.unwrap();
let hashed_address = keccak256(ADDRESS);
let hashed_higher_address = keccak256(HIGHER_ADDRESS);
let hashed_storage = keccak256(STORAGE);
provider_rw
.tx_ref()
.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_storage, value: U256::from(100) },
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::HashedStorages>(
hashed_higher_address,
StorageEntry { key: hashed_storage, value: U256::from(1000) },
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::HashedAccounts>(
hashed_address,
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None },
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::HashedAccounts>(
hashed_higher_address,
Account { nonce: 1, balance: U256::from(2000), bytecode_hash: None },
)
.unwrap();
provider_rw.commit().unwrap();
let db = factory.provider().unwrap();
assert!(matches!(
HistoricalStateProviderRef::new(&db, 0).storage(ADDRESS, STORAGE),
Ok(None)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 3).storage(ADDRESS, STORAGE),
Ok(Some(U256::ZERO))
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 4).storage(ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(7)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 7).storage(ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(7)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 9).storage(ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(10)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 10).storage(ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(10)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 11).storage(ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(15)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 16).storage(ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(100)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 1).storage(HIGHER_ADDRESS, STORAGE),
Ok(None)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 1000).storage(HIGHER_ADDRESS, STORAGE),
Ok(Some(v)) if v == U256::from(1000)
));
}
#[test]
fn test_needs_prev_shard_check() {
// Only needs check when rank == 0 and found_block != block_number
@@ -951,4 +1273,105 @@ mod tests {
assert!(!needs_prev_shard_check(0, Some(5), 5)); // found_block == block_number
assert!(!needs_prev_shard_check(1, Some(10), 5)); // rank > 0
}
#[test]
fn test_historical_storage_by_hashed_key_unsupported_in_v1() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
let db = factory.provider().unwrap();
let provider = HistoricalStateProviderRef::new(&db, 1);
assert!(matches!(
provider.storage_by_hashed_key(ADDRESS, STORAGE),
Err(ProviderError::UnsupportedProvider)
));
}
#[test]
#[cfg(all(unix, feature = "rocksdb"))]
fn test_historical_storage_by_hashed_key_v2() {
use crate::BlockWriter;
use alloy_primitives::keccak256;
use reth_db_api::models::StorageSettings;
use reth_execution_types::ExecutionOutcome;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use revm_database::BundleState;
use std::collections::HashMap;
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let slot = U256::from_be_bytes(*STORAGE);
let hashed_storage = keccak256(STORAGE);
let account: revm_state::AccountInfo =
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None }.into();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=5,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let mut addr_storage = HashMap::default();
addr_storage.insert(slot, (U256::ZERO, U256::from(100)));
type Revert = Vec<(Address, Option<Option<revm_state::AccountInfo>>, Vec<(U256, U256)>)>;
let mut reverts: Vec<Revert> = vec![Vec::new(); 6];
reverts[3] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::ZERO)])];
reverts[5] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::from(50))])];
let bundle = BundleState::new([(ADDRESS, None, Some(account), addr_storage)], reverts, []);
let provider_rw = factory.provider_rw().unwrap();
provider_rw
.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome { bundle, first_block: 0, ..Default::default() },
Default::default(),
)
.unwrap();
let hashed_address = keccak256(ADDRESS);
provider_rw
.tx_ref()
.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_storage, value: U256::from(100) },
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::HashedAccounts>(
hashed_address,
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None },
)
.unwrap();
provider_rw.commit().unwrap();
let db = factory.provider().unwrap();
assert!(matches!(
HistoricalStateProviderRef::new(&db, 0).storage_by_hashed_key(ADDRESS, hashed_storage),
Ok(None)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 3).storage_by_hashed_key(ADDRESS, hashed_storage),
Ok(Some(U256::ZERO))
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 4).storage_by_hashed_key(ADDRESS, hashed_storage),
Ok(Some(v)) if v == U256::from(50)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 4).storage_by_hashed_key(ADDRESS, STORAGE),
Ok(None | Some(U256::ZERO))
));
}
}

View File

@@ -4,7 +4,9 @@ use crate::{
use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256};
use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx};
use reth_primitives_traits::{Account, Bytecode};
use reth_storage_api::{BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider};
use reth_storage_api::{
BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider, StorageSettingsCache,
};
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use reth_trie::{
proof::{Proof, StorageProof},
@@ -33,12 +35,33 @@ impl<'b, Provider: DBProvider> LatestStateProviderRef<'b, Provider> {
fn tx(&self) -> &Provider::Tx {
self.0.tx_ref()
}
fn hashed_storage_lookup(
&self,
hashed_address: B256,
hashed_slot: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
let mut cursor = self.tx().cursor_dup_read::<tables::HashedStorages>()?;
Ok(cursor
.seek_by_key_subkey(hashed_address, hashed_slot)?
.filter(|e| e.key == hashed_slot)
.map(|e| e.value))
}
}
impl<Provider: DBProvider> AccountReader for LatestStateProviderRef<'_, Provider> {
impl<Provider: DBProvider + StorageSettingsCache> AccountReader
for LatestStateProviderRef<'_, Provider>
{
/// Get basic account information.
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
self.tx().get_by_encoded_key::<tables::PlainAccountState>(address).map_err(Into::into)
if self.0.cached_storage_settings().use_hashed_state {
let hashed_address = alloy_primitives::keccak256(address);
self.tx()
.get_by_encoded_key::<tables::HashedAccounts>(&hashed_address)
.map_err(Into::into)
} else {
self.tx().get_by_encoded_key::<tables::PlainAccountState>(address).map_err(Into::into)
}
}
}
@@ -148,22 +171,41 @@ impl<Provider: DBProvider> HashedPostStateProvider for LatestStateProviderRef<'_
}
}
impl<Provider: DBProvider + BlockHashReader> StateProvider
impl<Provider: DBProvider + BlockHashReader + StorageSettingsCache> StateProvider
for LatestStateProviderRef<'_, Provider>
{
/// Get storage.
/// Get storage by plain (unhashed) storage key slot.
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
let mut cursor = self.tx().cursor_dup_read::<tables::PlainStorageState>()?;
if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? &&
entry.key == storage_key
{
return Ok(Some(entry.value))
if self.0.cached_storage_settings().use_hashed_state {
self.hashed_storage_lookup(
alloy_primitives::keccak256(account),
alloy_primitives::keccak256(storage_key),
)
} else {
let mut cursor = self.tx().cursor_dup_read::<tables::PlainStorageState>()?;
if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? &&
entry.key == storage_key
{
return Ok(Some(entry.value));
}
Ok(None)
}
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if self.0.cached_storage_settings().use_hashed_state {
self.hashed_storage_lookup(alloy_primitives::keccak256(address), hashed_storage_key)
} else {
Err(ProviderError::UnsupportedProvider)
}
Ok(None)
}
}
@@ -194,15 +236,181 @@ impl<Provider: DBProvider> LatestStateProvider<Provider> {
}
// Delegates all provider impls to [LatestStateProviderRef]
reth_storage_api::macros::delegate_provider_impls!(LatestStateProvider<Provider> where [Provider: DBProvider + BlockHashReader ]);
reth_storage_api::macros::delegate_provider_impls!(LatestStateProvider<Provider> where [Provider: DBProvider + BlockHashReader + StorageSettingsCache]);
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::create_test_provider_factory;
use alloy_primitives::{address, b256, keccak256, U256};
use reth_db_api::{
models::StorageSettings,
tables,
transaction::{DbTx, DbTxMut},
};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use reth_storage_errors::provider::ProviderError;
const fn assert_state_provider<T: StateProvider>() {}
#[expect(dead_code)]
const fn assert_latest_state_provider<T: DBProvider + BlockHashReader>() {
const fn assert_latest_state_provider<
T: DBProvider + BlockHashReader + StorageSettingsCache,
>() {
assert_state_provider::<LatestStateProvider<T>>();
}
#[test]
fn test_latest_storage_hashed_state() {
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot);
let tx = factory.provider_rw().unwrap().into_tx();
tx.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(42) },
)
.unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(provider_ref.storage(address, slot).unwrap(), Some(U256::from(42)));
let other_address = address!("0x0000000000000000000000000000000000000099");
let other_slot =
b256!("0x0000000000000000000000000000000000000000000000000000000000000099");
assert_eq!(provider_ref.storage(other_address, other_slot).unwrap(), None);
let tx = factory.provider_rw().unwrap().into_tx();
let plain_address = address!("0x0000000000000000000000000000000000000002");
let plain_slot =
b256!("0x0000000000000000000000000000000000000000000000000000000000000002");
tx.put::<tables::PlainStorageState>(
plain_address,
StorageEntry { key: plain_slot, value: U256::from(99) },
)
.unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(provider_ref.storage(plain_address, plain_slot).unwrap(), None);
}
#[test]
fn test_latest_storage_hashed_state_returns_none_for_missing() {
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(provider_ref.storage(address, slot).unwrap(), None);
}
#[test]
fn test_latest_storage_legacy() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000005");
let tx = factory.provider_rw().unwrap().into_tx();
tx.put::<tables::PlainStorageState>(
address,
StorageEntry { key: slot, value: U256::from(42) },
)
.unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(provider_ref.storage(address, slot).unwrap(), Some(U256::from(42)));
let other_slot =
b256!("0x0000000000000000000000000000000000000000000000000000000000000099");
assert_eq!(provider_ref.storage(address, other_slot).unwrap(), None);
}
#[test]
fn test_latest_storage_legacy_does_not_read_hashed() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000005");
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot);
let tx = factory.provider_rw().unwrap().into_tx();
tx.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(42) },
)
.unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(provider_ref.storage(address, slot).unwrap(), None);
}
#[test]
fn test_latest_storage_by_hashed_key_v2() {
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot);
let tx = factory.provider_rw().unwrap().into_tx();
tx.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(42) },
)
.unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(
provider_ref.storage_by_hashed_key(address, hashed_slot).unwrap(),
Some(U256::from(42))
);
assert_eq!(provider_ref.storage_by_hashed_key(address, slot).unwrap(), None);
}
#[test]
fn test_latest_storage_by_hashed_key_unsupported_in_v1() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state);
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert!(matches!(
provider_ref.storage_by_hashed_key(address, slot),
Err(ProviderError::UnsupportedProvider)
));
}
}

View File

@@ -10,17 +10,15 @@ use reth_stages_types::StageId;
use reth_storage_api::{
BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory,
DatabaseProviderROFactory, PruneCheckpointReader, StageCheckpointReader,
StorageChangeSetReader,
StorageChangeSetReader, StorageSettingsCache,
};
use reth_trie::{
hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory},
trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory},
updates::TrieUpdatesSorted,
HashedPostStateSorted, KeccakKeyHasher,
};
use reth_trie_db::{
ChangesetCache, DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory,
HashedPostStateSorted,
};
use reth_trie_db::{ChangesetCache, DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
use std::{
sync::Arc,
time::{Duration, Instant},
@@ -198,7 +196,8 @@ where
+ ChangeSetReader
+ StorageChangeSetReader
+ DBProvider
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
{
/// Resolves the effective overlay (trie updates, hashed state).
///
@@ -336,10 +335,7 @@ where
let _guard = debug_span!(target: "providers::state::overlay", "Retrieving hashed state reverts").entered();
let start = Instant::now();
let res = HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(
provider,
from_block + 1..,
)?;
let res = reth_trie_db::from_reverts_auto(provider, from_block + 1..)?;
retrieve_hashed_state_reverts_duration = start.elapsed();
res
};
@@ -450,7 +446,8 @@ where
+ PruneCheckpointReader
+ BlockNumReader
+ ChangeSetReader
+ StorageChangeSetReader,
+ StorageChangeSetReader
+ StorageSettingsCache,
{
type Provider = OverlayStateProvider<F::Provider>;

View File

@@ -34,7 +34,7 @@ use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION};
use reth_node_types::NodePrimitives;
use reth_primitives_traits::{
dashmap::DashMap, AlloyBlockHeader as _, BlockBody as _, RecoveredBlock, SealedHeader,
SignedTransaction, StorageEntry,
SignedTransaction, StorageSlotKey,
};
use reth_prune_types::PruneSegment;
use reth_stages_types::PipelineTarget;
@@ -43,7 +43,7 @@ use reth_static_file_types::{
SegmentRangeInclusive, StaticFileMap, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE,
};
use reth_storage_api::{
BlockBodyIndicesProvider, ChangeSetReader, DBProvider, PruneCheckpointReader,
BlockBodyIndicesProvider, ChangeSetReader, ChangesetEntry, DBProvider, PruneCheckpointReader,
StorageChangeSetReader, StorageSettingsCache,
};
use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError};
@@ -643,7 +643,7 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| {
StorageBeforeTx {
address: revert.address,
key: B256::new(key.to_be_bytes()),
key: StorageSlotKey::from_u256(key).to_hashed(),
value: revert_to_slot.to_previous_value(),
}
})
@@ -2520,7 +2520,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
let provider = match self.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
block_number,
@@ -2538,7 +2538,10 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
for i in offset.changeset_range() {
if let Some(change) = cursor.get_one::<StorageChangesetMask>(i.into())? {
let block_address = BlockNumberAddress((block_number, change.address));
let entry = StorageEntry { key: change.key, value: change.value };
let entry = ChangesetEntry {
key: StorageSlotKey::hashed(change.key),
value: change.value,
};
changeset.push((block_address, entry));
}
}
@@ -2553,7 +2556,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
) -> ProviderResult<Option<ChangesetEntry>> {
let provider = match self.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
block_number,
@@ -2602,7 +2605,10 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
.get_one::<StorageChangesetMask>(low.into())?
.filter(|change| change.address == address && change.key == storage_key)
{
return Ok(Some(StorageEntry { key: change.key, value: change.value }));
return Ok(Some(ChangesetEntry {
key: StorageSlotKey::hashed(change.key),
value: change.value,
}));
}
Ok(None)
@@ -2611,7 +2617,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
let range = self.bound_range(range, StaticFileSegment::StorageChangeSets);
self.walk_storage_changeset_range(range).collect()
}

View File

@@ -1170,13 +1170,13 @@ mod tests {
let result = sf_rw.get_storage_before_block(0, test_address, test_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, test_key);
assert_eq!(entry.key.as_b256(), test_key);
assert_eq!(entry.value, U256::ZERO);
let result = sf_rw.get_storage_before_block(2, test_address, test_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, test_key);
assert_eq!(entry.key.as_b256(), test_key);
assert_eq!(entry.value, U256::from(9));
let result = sf_rw.get_storage_before_block(1, test_address, test_key).unwrap();
@@ -1188,7 +1188,7 @@ mod tests {
let result = sf_rw.get_storage_before_block(1, other_address, other_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, other_key);
assert_eq!(entry.key.as_b256(), other_key);
}
}
@@ -1334,20 +1334,20 @@ mod tests {
let result = sf_rw.get_storage_before_block(block_num, address, keys[0]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, keys[0]);
assert_eq!(entry.key.as_b256(), keys[0]);
assert_eq!(entry.value, U256::from(0));
let result =
sf_rw.get_storage_before_block(block_num, address, keys[num_slots - 1]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, keys[num_slots - 1]);
assert_eq!(entry.key.as_b256(), keys[num_slots - 1]);
let mid = num_slots / 2;
let result = sf_rw.get_storage_before_block(block_num, address, keys[mid]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key, keys[mid]);
assert_eq!(entry.key.as_b256(), keys[mid]);
let missing_key = B256::with_last_byte(255);
let result = sf_rw.get_storage_before_block(block_num, address, missing_key).unwrap();
@@ -1356,7 +1356,7 @@ mod tests {
for i in (0..num_slots).step_by(10) {
let result = sf_rw.get_storage_before_block(block_num, address, keys[i]).unwrap();
assert!(result.is_some());
assert_eq!(result.unwrap().key, keys[i]);
assert_eq!(result.unwrap().key.as_b256(), keys[i]);
}
}
}

View File

@@ -22,20 +22,20 @@ use reth_chainspec::{ChainInfo, EthChainSpec};
use reth_db::transaction::DbTx;
use reth_db_api::{
mock::{DatabaseMock, TxMock},
models::{AccountBeforeTx, StoredBlockBodyIndices},
models::{AccountBeforeTx, StorageSettings, StoredBlockBodyIndices},
};
use reth_ethereum_primitives::EthPrimitives;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{
Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader,
SignerRecoverable, StorageEntry,
SignerRecoverable,
};
use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_storage_api::{
BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory,
BlockBodyIndicesProvider, BytecodeReader, ChangesetEntry, DBProvider, DatabaseProviderFactory,
HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider,
StorageChangeSetReader, StorageRootProvider,
StorageChangeSetReader, StorageRootProvider, StorageSettingsCache,
};
use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult};
use reth_trie::{
@@ -883,6 +883,14 @@ where
let lock = self.accounts.lock();
Ok(lock.get(&account).and_then(|account| account.storage.get(&storage_key)).copied())
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
}
impl<T, ChainSpec> BytecodeReader for MockEthProvider<T, ChainSpec>
@@ -903,6 +911,16 @@ where
}
}
impl<T: NodePrimitives, ChainSpec: Send + Sync> StorageSettingsCache
for MockEthProvider<T, ChainSpec>
{
fn cached_storage_settings(&self) -> StorageSettings {
StorageSettings::default()
}
fn set_storage_settings_cache(&self, _settings: StorageSettings) {}
}
impl<T: NodePrimitives, ChainSpec: EthChainSpec + Send + Sync + 'static> StateProviderFactory
for MockEthProvider<T, ChainSpec>
{
@@ -1011,7 +1029,7 @@ impl<T: NodePrimitives, ChainSpec: Send + Sync> StorageChangeSetReader
fn storage_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>> {
Ok(Vec::default())
}
@@ -1020,14 +1038,14 @@ impl<T: NodePrimitives, ChainSpec: Send + Sync> StorageChangeSetReader
_block_number: BlockNumber,
_address: Address,
_storage_key: B256,
) -> ProviderResult<Option<StorageEntry>> {
) -> ProviderResult<Option<ChangesetEntry>> {
Ok(None)
}
fn storage_changesets_range(
&self,
_range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>> {
Ok(Vec::default())
}

View File

@@ -10,7 +10,7 @@ use reth_chain_state::{
CanonStateSubscriptions, ForkChoiceSubscriptions, PersistedBlockSubscriptions,
};
use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy};
use reth_storage_api::{NodePrimitivesProvider, StorageChangeSetReader};
use reth_storage_api::{NodePrimitivesProvider, StorageChangeSetReader, StorageSettingsCache};
use std::fmt::Debug;
/// Helper trait to unify all provider traits for simplicity.
@@ -21,7 +21,8 @@ pub trait FullProvider<N: NodeTypesWithDB>:
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader,
+ StorageChangeSetReader
+ StorageSettingsCache,
> + NodePrimitivesProvider<Primitives = N::Primitives>
+ StaticFileProviderFactory<Primitives = N::Primitives>
+ RocksDBProviderFactory
@@ -55,7 +56,8 @@ impl<T, N: NodeTypesWithDB> FullProvider<N> for T where
+ StageCheckpointReader
+ PruneCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader,
+ StorageChangeSetReader
+ StorageSettingsCache,
> + NodePrimitivesProvider<Primitives = N::Primitives>
+ StaticFileProviderFactory<Primitives = N::Primitives>
+ RocksDBProviderFactory

View File

@@ -1081,6 +1081,14 @@ where
})
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> Result<Option<U256>, ProviderError> {
Err(ProviderError::UnsupportedProvider)
}
fn account_code(&self, addr: &Address) -> Result<Option<Bytecode>, ProviderError> {
self.block_on_async(async {
let code = self

View File

@@ -1,3 +1,4 @@
use crate::ChangesetEntry;
use alloc::collections::{BTreeMap, BTreeSet};
use alloy_primitives::{map::B256Map, Address, BlockNumber, B256};
use auto_impl::auto_impl;
@@ -47,7 +48,7 @@ pub trait HashingWriter: Send {
/// Mapping of hashed keys of updated accounts to their respective updated hashed slots.
fn unwind_storage_hashing(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
changesets: impl Iterator<Item = (BlockNumberAddress, ChangesetEntry)>,
) -> ProviderResult<B256Map<BTreeSet<B256>>>;
/// Unwind and clear storage hashing in a given block range.

View File

@@ -1,9 +1,9 @@
use crate::ChangesetEntry;
use alloy_primitives::{Address, BlockNumber, B256};
use auto_impl::auto_impl;
use core::ops::{RangeBounds, RangeInclusive};
use reth_db_api::models::BlockNumberAddress;
use reth_db_models::AccountBeforeTx;
use reth_primitives_traits::StorageEntry;
use reth_storage_errors::provider::ProviderResult;
/// History Writer
@@ -36,7 +36,7 @@ pub trait HistoryWriter: Send {
/// Returns number of changesets walked.
fn unwind_storage_history_indices(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
changesets: impl Iterator<Item = (BlockNumberAddress, ChangesetEntry)>,
) -> ProviderResult<usize>;
/// Unwind and clear storage history indices in a given block range.

View File

@@ -41,6 +41,7 @@ macro_rules! delegate_provider_impls {
}
StateProvider $(where [$($generics)*])? {
fn storage(&self, account: alloy_primitives::Address, storage_key: alloy_primitives::StorageKey) -> reth_storage_api::errors::provider::ProviderResult<Option<alloy_primitives::StorageValue>>;
fn storage_by_hashed_key(&self, address: alloy_primitives::Address, hashed_storage_key: alloy_primitives::StorageKey) -> reth_storage_api::errors::provider::ProviderResult<Option<alloy_primitives::StorageValue>>;
}
BytecodeReader $(where [$($generics)*])? {
fn bytecode_by_hash(&self, code_hash: &alloy_primitives::B256) -> reth_storage_api::errors::provider::ProviderResult<Option<reth_primitives_traits::Bytecode>>;

View File

@@ -10,7 +10,7 @@ use crate::{
};
#[cfg(feature = "db-api")]
use crate::{DBProvider, DatabaseProviderFactory, StorageChangeSetReader};
use crate::{DBProvider, DatabaseProviderFactory, StorageChangeSetReader, StorageSettingsCache};
use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
use alloy_consensus::transaction::TransactionMeta;
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
@@ -413,9 +413,7 @@ impl<C: Send + Sync, N: NodePrimitives> StorageChangeSetReader for NoopProvider<
fn storage_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<
Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>,
> {
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, crate::ChangesetEntry)>> {
Ok(Vec::default())
}
@@ -424,16 +422,14 @@ impl<C: Send + Sync, N: NodePrimitives> StorageChangeSetReader for NoopProvider<
_block_number: BlockNumber,
_address: Address,
_storage_key: B256,
) -> ProviderResult<Option<reth_primitives_traits::StorageEntry>> {
) -> ProviderResult<Option<crate::ChangesetEntry>> {
Ok(None)
}
fn storage_changesets_range(
&self,
_range: impl core::ops::RangeBounds<BlockNumber>,
) -> ProviderResult<
Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>,
> {
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, crate::ChangesetEntry)>> {
Ok(Vec::default())
}
@@ -542,6 +538,14 @@ impl<C: Send + Sync, N: NodePrimitives> StateProvider for NoopProvider<C, N> {
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
fn storage_by_hashed_key(
&self,
_account: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
Err(ProviderError::UnsupportedProvider)
}
}
impl<C: Send + Sync, N: NodePrimitives> BytecodeReader for NoopProvider<C, N> {
@@ -695,3 +699,12 @@ impl<ChainSpec: Send + Sync, N: NodePrimitives> DatabaseProviderFactory
Ok(self.clone())
}
}
#[cfg(feature = "db-api")]
impl<ChainSpec: Send + Sync, N: Send + Sync> StorageSettingsCache for NoopProvider<ChainSpec, N> {
fn cached_storage_settings(&self) -> reth_db_api::models::StorageSettings {
reth_db_api::models::StorageSettings::default()
}
fn set_storage_settings_cache(&self, _settings: reth_db_api::models::StorageSettings) {}
}

View File

@@ -41,12 +41,27 @@ pub trait StateProvider:
+ HashedPostStateProvider
{
/// Get storage of given account.
///
/// When `use_hashed_state` is enabled, the `account` and `storage_key` are hashed internally
/// before lookup. Callers must pass **unhashed** (plain) values.
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>>;
/// Get storage using a pre-hashed storage key.
///
/// Unlike [`Self::storage`], `hashed_storage_key` must already be keccak256-hashed.
/// The `address` remains unhashed (plain) since history indices are keyed by plain address.
/// This is used when changeset keys are pre-hashed (e.g., `use_hashed_state` mode)
/// to avoid double-hashing.
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>>;
/// Get account code by its address.
///
/// Returns `None` if the account doesn't exist or account is not a contract

View File

@@ -2,11 +2,38 @@ use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use alloy_primitives::{Address, BlockNumber, B256};
use alloy_primitives::{Address, BlockNumber, B256, U256};
use core::ops::{RangeBounds, RangeInclusive};
use reth_primitives_traits::StorageEntry;
use reth_primitives_traits::{StorageEntry, StorageSlotKey};
use reth_storage_errors::provider::ProviderResult;
/// A storage changeset entry whose key is tagged as [`StorageSlotKey::Plain`] or
/// [`StorageSlotKey::Hashed`] by the reader that produced it.
///
/// Unlike [`StorageEntry`] (the raw DB row type with an untagged `B256` key),
/// this type carries provenance so downstream code can call
/// [`StorageSlotKey::to_hashed`] without consulting `StorageSettings`.
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct ChangesetEntry {
/// Storage slot key, tagged with its hashing status.
pub key: StorageSlotKey,
/// Value at this storage slot before the change.
pub value: U256,
}
impl ChangesetEntry {
/// Convert to a raw [`StorageEntry`] (drops the tag).
pub const fn into_storage_entry(self) -> StorageEntry {
StorageEntry { key: self.key.as_b256(), value: self.value }
}
}
impl From<ChangesetEntry> for StorageEntry {
fn from(e: ChangesetEntry) -> Self {
e.into_storage_entry()
}
}
/// Storage reader
#[auto_impl::auto_impl(&, Box)]
pub trait StorageReader: Send {
@@ -37,26 +64,35 @@ pub trait StorageReader: Send {
#[auto_impl::auto_impl(&, Box)]
pub trait StorageChangeSetReader: Send {
/// Iterate over storage changesets and return the storage state from before this block.
///
/// Returned entries have their keys tagged as [`StorageSlotKey::Plain`] or
/// [`StorageSlotKey::Hashed`] based on the current storage mode.
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>>;
/// Search the block's changesets for the given address and storage key, and return the result.
///
/// The `storage_key` must match the key format used by the storage mode
/// (plain in v1, keccak256-hashed in v2).
///
/// Returns `None` if the storage slot was not changed in this block.
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<StorageEntry>>;
) -> ProviderResult<Option<ChangesetEntry>>;
/// Get all storage changesets in a range of blocks.
///
/// Returned entries have their keys tagged as [`StorageSlotKey::Plain`] or
/// [`StorageSlotKey::Hashed`] based on the current storage mode.
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>>;
/// Get the total count of all storage changes.
fn storage_changeset_count(&self) -> ProviderResult<usize>;
@@ -73,7 +109,7 @@ pub trait StorageChangeSetReader: Send {
.into_iter()
.map(|(block_address, entry)| reth_db_models::StorageBeforeTx {
address: block_address.address(),
key: entry.key,
key: entry.key.as_b256(),
value: entry.value,
})
.collect()

View File

@@ -16,3 +16,20 @@ impl KeyHasher for KeccakKeyHasher {
keccak256(bytes)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_keccak_key_hasher_always_hashes_regardless_of_length() {
use alloy_primitives::Address;
let addr = Address::repeat_byte(0x42);
assert_eq!(KeccakKeyHasher::hash_key(addr), keccak256(addr));
let slot = B256::repeat_byte(0x42);
assert_eq!(KeccakKeyHasher::hash_key(slot), keccak256(slot));
assert_ne!(KeccakKeyHasher::hash_key(slot), slot);
}
}

View File

@@ -7,17 +7,18 @@
//! - **Reorg support**: Quickly access changesets to revert blocks during chain reorganizations
//! - **Memory efficiency**: Automatic eviction ensures bounded memory usage
use crate::{DatabaseHashedPostState, DatabaseStateRoot, DatabaseTrieCursorFactory};
use crate::{DatabaseStateRoot, DatabaseTrieCursorFactory};
use alloy_primitives::{map::B256Map, BlockNumber, B256};
use parking_lot::RwLock;
use reth_storage_api::{
BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader,
StorageSettingsCache,
};
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use reth_trie::{
changesets::compute_trie_changesets,
trie_cursor::{InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory},
HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted,
StateRoot, TrieInputSorted,
};
use reth_trie_common::updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted};
use std::{collections::BTreeMap, ops::RangeInclusive, sync::Arc, time::Instant};
@@ -66,7 +67,8 @@ where
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
{
debug!(
target: "trie::changeset_cache",
@@ -77,14 +79,11 @@ where
// Step 1: Collect/calculate state reverts
// This is just the changes from this specific block
let individual_state_revert = HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(
provider,
block_number..=block_number,
)?;
let individual_state_revert =
crate::state::from_reverts_auto(provider, block_number..=block_number)?;
// This reverts all changes from db tip back to just after block was processed
let cumulative_state_revert =
HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(provider, (block_number + 1)..)?;
let cumulative_state_revert = crate::state::from_reverts_auto(provider, (block_number + 1)..)?;
// This reverts all changes from db tip back to just after block-1 was processed
let mut cumulative_state_revert_prev = cumulative_state_revert.clone();
@@ -180,7 +179,8 @@ where
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
{
let tx = provider.tx_ref();
@@ -334,7 +334,8 @@ impl ChangesetCache {
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
{
// Try cache first (with read lock)
{
@@ -423,7 +424,8 @@ impl ChangesetCache {
+ StageCheckpointReader
+ ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader,
+ BlockNumReader
+ StorageSettingsCache,
{
// Get the database tip block number
let db_tip_block = provider

View File

@@ -17,7 +17,7 @@ pub use hashed_cursor::{
};
pub use prefix_set::load_prefix_sets_with_provider;
pub use proof::{DatabaseProof, DatabaseStorageProof};
pub use state::{DatabaseHashedPostState, DatabaseStateRoot};
pub use state::{from_reverts_auto, DatabaseHashedPostState, DatabaseStateRoot};
pub use storage::{hashed_storage_from_reverts_with_provider, DatabaseStorageRoot};
pub use trie_cursor::{
DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory,

View File

@@ -1,4 +1,5 @@
use alloy_primitives::{
keccak256,
map::{HashMap, HashSet},
BlockNumber, B256,
};
@@ -9,23 +10,26 @@ use reth_db_api::{
tables,
transaction::DbTx,
};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::{ChangeSetReader, DBProvider, StorageChangeSetReader};
use reth_storage_errors::provider::ProviderError;
use reth_trie::{
prefix_set::{PrefixSetMut, TriePrefixSets},
KeyHasher, Nibbles,
Nibbles,
};
/// Load prefix sets using a provider that implements [`ChangeSetReader`]. This function can read
/// changesets from both static files and database.
pub fn load_prefix_sets_with_provider<Provider, KH>(
///
/// Storage keys from changesets are tagged as
/// [`Plain`](reth_primitives_traits::StorageSlotKey::Plain)
/// or [`Hashed`](reth_primitives_traits::StorageSlotKey::Hashed) by the reader, so callers need
/// not pass a `use_hashed_state` flag. Addresses are always hashed.
pub fn load_prefix_sets_with_provider<Provider>(
provider: &Provider,
range: RangeInclusive<BlockNumber>,
) -> Result<TriePrefixSets, ProviderError>
where
Provider: ChangeSetReader + StorageChangeSetReader + DBProvider,
KH: KeyHasher,
{
let tx = provider.tx_ref();
@@ -41,7 +45,7 @@ where
let mut account_hashed_state_cursor = tx.cursor_read::<tables::HashedAccounts>()?;
for (_, AccountBeforeTx { address, .. }) in account_changesets {
let hashed_address = KH::hash_key(address);
let hashed_address = keccak256(address);
account_prefix_set.insert(Nibbles::unpack(hashed_address));
if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() {
@@ -51,13 +55,13 @@ where
// Walk storage changesets using the provider (handles static files + database)
let storage_changesets = provider.storage_changesets_range(range)?;
for (BlockNumberAddress((_, address)), StorageEntry { key, .. }) in storage_changesets {
let hashed_address = KH::hash_key(address);
for (BlockNumberAddress((_, address)), storage_entry) in storage_changesets {
let hashed_address = keccak256(address);
account_prefix_set.insert(Nibbles::unpack(hashed_address));
storage_prefix_sets
.entry(hashed_address)
.or_default()
.insert(Nibbles::unpack(KH::hash_key(key)));
.insert(Nibbles::unpack(storage_entry.key.to_hashed()));
}
Ok(TriePrefixSets {

View File

@@ -1,18 +1,18 @@
use crate::{
load_prefix_sets_with_provider, DatabaseHashedCursorFactory, DatabaseTrieCursorFactory,
};
use alloy_primitives::{map::B256Map, BlockNumber, B256};
use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
use alloy_primitives::{keccak256, map::B256Map, BlockNumber, B256};
use reth_db_api::{
models::{AccountBeforeTx, BlockNumberAddress},
transaction::DbTx,
};
use reth_execution_errors::StateRootError;
use reth_storage_api::{BlockNumReader, ChangeSetReader, DBProvider, StorageChangeSetReader};
use reth_storage_api::{
BlockNumReader, ChangeSetReader, DBProvider, StorageChangeSetReader, StorageSettingsCache,
};
use reth_storage_errors::provider::ProviderError;
use reth_trie::{
hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory,
updates::TrieUpdates, HashedPostStateSorted, HashedStorageSorted, KeccakKeyHasher, KeyHasher,
StateRoot, StateRootProgress, TrieInputSorted,
updates::TrieUpdates, HashedPostStateSorted, HashedStorageSorted, StateRoot, StateRootProgress,
TrieInputSorted,
};
use std::{
collections::HashSet,
@@ -32,7 +32,10 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// An instance of state root calculator with account and storage prefixes loaded.
fn incremental_root_calculator(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<Self, StateRootError>;
@@ -43,7 +46,10 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// The updated state root.
fn incremental_root(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<B256, StateRootError>;
@@ -56,7 +62,10 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// The updated state root and the trie updates.
fn incremental_root_with_updates(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<(B256, TrieUpdates), StateRootError>;
@@ -67,7 +76,10 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
///
/// The intermediate progress of state root computation.
fn incremental_root_with_progress(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<StateRootProgress, StateRootError>;
@@ -130,7 +142,12 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
pub trait DatabaseHashedPostState: Sized {
/// Initializes [`HashedPostStateSorted`] from reverts. Iterates over state reverts in the
/// specified range and aggregates them into sorted hashed state.
fn from_reverts<KH: KeyHasher>(
///
/// Storage keys from changesets are tagged as
/// [`Plain`](reth_primitives_traits::StorageSlotKey::Plain) or
/// [`Hashed`](reth_primitives_traits::StorageSlotKey::Hashed) by the reader, so no
/// `use_hashed_state` flag is needed. Addresses are always hashed.
fn from_reverts(
provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider),
range: impl RangeBounds<BlockNumber>,
) -> Result<HashedPostStateSorted, ProviderError>;
@@ -144,16 +161,22 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root_calculator(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<Self, StateRootError> {
let loaded_prefix_sets =
load_prefix_sets_with_provider::<_, KeccakKeyHasher>(provider, range)?;
crate::prefix_set::load_prefix_sets_with_provider(provider, range)?;
Ok(Self::from_tx(provider.tx_ref()).with_prefix_sets(loaded_prefix_sets))
}
fn incremental_root(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<B256, StateRootError> {
debug!(target: "trie::loader", ?range, "incremental state root");
@@ -161,7 +184,10 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root_with_updates(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<(B256, TrieUpdates), StateRootError> {
debug!(target: "trie::loader", ?range, "incremental state root");
@@ -169,7 +195,10 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
fn incremental_root_with_progress(
provider: &'a (impl ChangeSetReader + StorageChangeSetReader + DBProvider<Tx = TX>),
provider: &'a (impl ChangeSetReader
+ StorageChangeSetReader
+ StorageSettingsCache
+ DBProvider<Tx = TX>),
range: RangeInclusive<BlockNumber>,
) -> Result<StateRootProgress, StateRootError> {
debug!(target: "trie::loader", ?range, "incremental state root with progress");
@@ -236,6 +265,21 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX>
}
}
/// Calls [`HashedPostStateSorted::from_reverts`].
///
/// This is a convenience wrapper kept for backward compatibility. The storage
/// key tagging is now handled internally by the changeset reader.
pub fn from_reverts_auto(
provider: &(impl ChangeSetReader
+ StorageChangeSetReader
+ BlockNumReader
+ DBProvider
+ StorageSettingsCache),
range: impl RangeBounds<BlockNumber>,
) -> Result<HashedPostStateSorted, ProviderError> {
HashedPostStateSorted::from_reverts(provider, range)
}
impl DatabaseHashedPostState for HashedPostStateSorted {
/// Builds a sorted hashed post-state from reverts.
///
@@ -243,9 +287,12 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
/// This avoids intermediate `HashMap` allocations since MDBX data is already sorted.
///
/// - Reads the first occurrence of each changed account/storage slot in the range.
/// - Hashes keys and returns them already ordered for trie iteration.
/// - Addresses are always keccak256-hashed.
/// - Storage keys are tagged by the changeset reader and hashed via
/// [`StorageSlotKey::to_hashed`](reth_primitives_traits::StorageSlotKey::to_hashed).
/// - Returns keys already ordered for trie iteration.
#[instrument(target = "trie::db", skip(provider), fields(range))]
fn from_reverts<KH: KeyHasher>(
fn from_reverts(
provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider),
range: impl RangeBounds<BlockNumber>,
) -> Result<Self, ProviderError> {
@@ -268,7 +315,7 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
for entry in provider.account_changesets_range(start..end)? {
let (_, AccountBeforeTx { address, info }) = entry;
if seen_accounts.insert(address) {
accounts.push((KH::hash_key(address), info));
accounts.push((keccak256(address), info));
}
}
accounts.sort_unstable_by_key(|(hash, _)| *hash);
@@ -283,12 +330,12 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
for (BlockNumberAddress((_, address)), storage) in
provider.storage_changesets_range(start..=end_inclusive)?
{
if seen_storage_keys.insert((address, storage.key)) {
let hashed_address = KH::hash_key(address);
if seen_storage_keys.insert((address, storage.key.as_b256())) {
let hashed_address = keccak256(address);
storages
.entry(hashed_address)
.or_default()
.push((KH::hash_key(storage.key), storage.value));
.push((storage.key.to_hashed(), storage.value));
}
}
}
@@ -309,7 +356,7 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{hex, map::HashMap, Address, B256, U256};
use alloy_primitives::{hex, keccak256, map::HashMap, Address, B256, U256};
use reth_db::test_utils::create_test_rw_db;
use reth_db_api::{
database::Database,
@@ -438,12 +485,11 @@ mod tests {
)
.unwrap();
let sorted =
HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(&*provider, 1..=3).unwrap();
let sorted = HashedPostStateSorted::from_reverts(&*provider, 1..=3).unwrap();
// Verify first occurrences were kept (nonce 1, not 2)
assert_eq!(sorted.accounts.len(), 2);
let hashed_addr1 = KeccakKeyHasher::hash_key(address1);
let hashed_addr1 = keccak256(address1);
let account1 = sorted.accounts.iter().find(|(addr, _)| *addr == hashed_addr1).unwrap();
assert_eq!(account1.1.unwrap().nonce, 1);
@@ -475,9 +521,225 @@ mod tests {
.unwrap();
// Query a range with no data
let sorted =
HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(&*provider, 1..=10).unwrap();
let sorted = HashedPostStateSorted::from_reverts(&*provider, 1..=10).unwrap();
assert!(sorted.accounts.is_empty());
assert!(sorted.storages.is_empty());
}
#[test]
fn from_reverts_with_hashed_state() {
use reth_db_api::models::StorageBeforeTx;
use reth_provider::{StaticFileProviderFactory, StaticFileSegment, StaticFileWriter};
let factory = create_test_provider_factory();
let mut settings = factory.cached_storage_settings();
settings.use_hashed_state = true;
settings.storage_changesets_in_static_files = true;
factory.set_storage_settings_cache(settings);
let provider = factory.provider_rw().unwrap();
let address1 = Address::with_last_byte(1);
let address2 = Address::with_last_byte(2);
let plain_slot1 = B256::from(U256::from(11));
let plain_slot2 = B256::from(U256::from(22));
let hashed_slot1 = keccak256(plain_slot1);
let hashed_slot2 = keccak256(plain_slot2);
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
1,
AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 1, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
2,
AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 2, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::AccountChangeSets>(3, AccountBeforeTx { address: address2, info: None })
.unwrap();
{
let sf = factory.static_file_provider();
let mut writer = sf.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
writer.append_storage_changeset(vec![], 0).unwrap();
writer
.append_storage_changeset(
vec![StorageBeforeTx {
address: address1,
key: hashed_slot2,
value: U256::from(200),
}],
1,
)
.unwrap();
writer
.append_storage_changeset(
vec![StorageBeforeTx {
address: address1,
key: hashed_slot1,
value: U256::from(100),
}],
2,
)
.unwrap();
writer
.append_storage_changeset(
vec![StorageBeforeTx {
address: address1,
key: hashed_slot1,
value: U256::from(999),
}],
3,
)
.unwrap();
writer.commit().unwrap();
}
let sorted = HashedPostStateSorted::from_reverts(&*provider, 1..=3).unwrap();
assert_eq!(sorted.accounts.len(), 2);
let hashed_addr1 = keccak256(address1);
let hashed_addr2 = keccak256(address2);
let account1 = sorted.accounts.iter().find(|(addr, _)| *addr == hashed_addr1).unwrap();
assert_eq!(account1.1.unwrap().nonce, 1);
let account2 = sorted.accounts.iter().find(|(addr, _)| *addr == hashed_addr2).unwrap();
assert!(account2.1.is_none());
assert!(sorted.accounts.windows(2).all(|w| w[0].0 <= w[1].0));
let storage = sorted.storages.get(&hashed_addr1).expect("storage for address1");
assert_eq!(storage.storage_slots.len(), 2);
let found_slot1 = storage.storage_slots.iter().find(|(k, _)| *k == hashed_slot1).unwrap();
assert_eq!(found_slot1.1, U256::from(100));
let found_slot2 = storage.storage_slots.iter().find(|(k, _)| *k == hashed_slot2).unwrap();
assert_eq!(found_slot2.1, U256::from(200));
assert_ne!(hashed_slot1, plain_slot1);
assert_ne!(hashed_slot2, plain_slot2);
assert!(storage.storage_slots.windows(2).all(|w| w[0].0 <= w[1].0));
}
#[test]
fn from_reverts_legacy_keccak_hashes_all_keys() {
let factory = create_test_provider_factory();
let provider = factory.provider_rw().unwrap();
let address1 = Address::with_last_byte(1);
let address2 = Address::with_last_byte(2);
let plain_slot1 = B256::from(U256::from(11));
let plain_slot2 = B256::from(U256::from(22));
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
1,
AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 10, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
2,
AccountBeforeTx {
address: address2,
info: Some(Account { nonce: 20, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::AccountChangeSets>(
3,
AccountBeforeTx {
address: address1,
info: Some(Account { nonce: 99, ..Default::default() }),
},
)
.unwrap();
provider
.tx_ref()
.put::<tables::StorageChangeSets>(
BlockNumberAddress((1, address1)),
StorageEntry { key: plain_slot1, value: U256::from(100) },
)
.unwrap();
provider
.tx_ref()
.put::<tables::StorageChangeSets>(
BlockNumberAddress((2, address1)),
StorageEntry { key: plain_slot2, value: U256::from(200) },
)
.unwrap();
provider
.tx_ref()
.put::<tables::StorageChangeSets>(
BlockNumberAddress((3, address2)),
StorageEntry { key: plain_slot1, value: U256::from(300) },
)
.unwrap();
let sorted = HashedPostStateSorted::from_reverts(&*provider, 1..=3).unwrap();
let expected_hashed_addr1 = keccak256(address1);
let expected_hashed_addr2 = keccak256(address2);
assert_eq!(sorted.accounts.len(), 2);
let account1 =
sorted.accounts.iter().find(|(addr, _)| *addr == expected_hashed_addr1).unwrap();
assert_eq!(account1.1.unwrap().nonce, 10);
let account2 =
sorted.accounts.iter().find(|(addr, _)| *addr == expected_hashed_addr2).unwrap();
assert_eq!(account2.1.unwrap().nonce, 20);
assert!(sorted.accounts.windows(2).all(|w| w[0].0 <= w[1].0));
let expected_hashed_slot1 = keccak256(plain_slot1);
let expected_hashed_slot2 = keccak256(plain_slot2);
assert_ne!(expected_hashed_slot1, plain_slot1);
assert_ne!(expected_hashed_slot2, plain_slot2);
let storage1 = sorted.storages.get(&expected_hashed_addr1).expect("storage for address1");
assert_eq!(storage1.storage_slots.len(), 2);
assert!(storage1
.storage_slots
.iter()
.any(|(k, v)| *k == expected_hashed_slot1 && *v == U256::from(100)));
assert!(storage1
.storage_slots
.iter()
.any(|(k, v)| *k == expected_hashed_slot2 && *v == U256::from(200)));
assert!(storage1.storage_slots.windows(2).all(|w| w[0].0 <= w[1].0));
let storage2 = sorted.storages.get(&expected_hashed_addr2).expect("storage for address2");
assert_eq!(storage2.storage_slots.len(), 1);
assert_eq!(storage2.storage_slots[0].0, expected_hashed_slot1);
assert_eq!(storage2.storage_slots[0].1, U256::from(300));
}
}

View File

@@ -47,7 +47,7 @@ where
provider.storage_changesets_range(from..=tip)?
{
if storage_address == address {
let hashed_slot = keccak256(storage_change.key);
let hashed_slot = storage_change.key.to_hashed();
if let hash_map::Entry::Vacant(entry) = storage.storage.entry(hashed_slot) {
entry.insert(storage_change.value);
}
@@ -101,3 +101,131 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX>
.root()
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::Header;
use alloy_primitives::U256;
use reth_db_api::{models::BlockNumberAddress, tables, transaction::DbTxMut};
use reth_primitives_traits::StorageEntry;
use reth_provider::{
test_utils::create_test_provider_factory, StaticFileProviderFactory, StaticFileSegment,
StaticFileWriter, StorageSettingsCache,
};
fn append_storage_changesets_to_static_files(
factory: &impl StaticFileProviderFactory<
Primitives: reth_primitives_traits::NodePrimitives<BlockHeader = Header>,
>,
changesets: Vec<(u64, Vec<reth_db_api::models::StorageBeforeTx>)>,
) {
let sf = factory.static_file_provider();
let mut writer = sf.latest_writer(StaticFileSegment::StorageChangeSets).unwrap();
for (block_number, changeset) in changesets {
writer.append_storage_changeset(changeset, block_number).unwrap();
}
writer.commit().unwrap();
}
fn append_headers_to_static_files(
factory: &impl StaticFileProviderFactory<
Primitives: reth_primitives_traits::NodePrimitives<BlockHeader = Header>,
>,
up_to_block: u64,
) {
let sf = factory.static_file_provider();
let mut writer = sf.latest_writer(StaticFileSegment::Headers).unwrap();
let mut header = Header::default();
for num in 0..=up_to_block {
header.number = num;
writer.append_header(&header, &B256::ZERO).unwrap();
}
writer.commit().unwrap();
}
#[test]
fn test_hashed_storage_from_reverts_legacy() {
let factory = create_test_provider_factory();
let provider = factory.provider_rw().unwrap();
assert!(!provider.cached_storage_settings().use_hashed_state);
let address = Address::with_last_byte(42);
let slot1 = B256::from(U256::from(100));
let slot2 = B256::from(U256::from(200));
append_headers_to_static_files(&factory, 5);
provider
.tx_ref()
.put::<tables::StorageChangeSets>(
BlockNumberAddress((1, address)),
StorageEntry { key: slot1, value: U256::from(10) },
)
.unwrap();
provider
.tx_ref()
.put::<tables::StorageChangeSets>(
BlockNumberAddress((2, address)),
StorageEntry { key: slot2, value: U256::from(20) },
)
.unwrap();
provider
.tx_ref()
.put::<tables::StorageChangeSets>(
BlockNumberAddress((3, address)),
StorageEntry { key: slot1, value: U256::from(999) },
)
.unwrap();
let result = hashed_storage_from_reverts_with_provider(&*provider, address, 1).unwrap();
let hashed_slot1 = keccak256(slot1);
let hashed_slot2 = keccak256(slot2);
assert_eq!(result.storage.len(), 2);
assert_eq!(result.storage.get(&hashed_slot1), Some(&U256::from(10)));
assert_eq!(result.storage.get(&hashed_slot2), Some(&U256::from(20)));
}
#[test]
fn test_hashed_storage_from_reverts_hashed_state() {
use reth_db_api::models::StorageBeforeTx;
let factory = create_test_provider_factory();
let mut settings = factory.cached_storage_settings();
settings.use_hashed_state = true;
settings.storage_changesets_in_static_files = true;
factory.set_storage_settings_cache(settings);
let provider = factory.provider_rw().unwrap();
assert!(provider.cached_storage_settings().use_hashed_state);
assert!(provider.cached_storage_settings().storage_changesets_in_static_files);
let address = Address::with_last_byte(42);
let plain_slot1 = B256::from(U256::from(100));
let plain_slot2 = B256::from(U256::from(200));
let hashed_slot1 = keccak256(plain_slot1);
let hashed_slot2 = keccak256(plain_slot2);
append_headers_to_static_files(&factory, 5);
append_storage_changesets_to_static_files(
&factory,
vec![
(0, vec![]),
(1, vec![StorageBeforeTx { address, key: hashed_slot1, value: U256::from(10) }]),
(2, vec![StorageBeforeTx { address, key: hashed_slot2, value: U256::from(20) }]),
(3, vec![StorageBeforeTx { address, key: hashed_slot1, value: U256::from(999) }]),
],
);
let result = hashed_storage_from_reverts_with_provider(&*provider, address, 1).unwrap();
assert_eq!(result.storage.len(), 2);
assert_eq!(result.storage.get(&hashed_slot1), Some(&U256::from(10)));
assert_eq!(result.storage.get(&hashed_slot2), Some(&U256::from(20)));
}
}

View File

@@ -0,0 +1,170 @@
# op-reth db settings set use_hashed_state
Use hashed state tables (HashedAccounts/HashedStorages) as canonical state
```bash
$ op-reth db settings set use_hashed_state --help
```
```txt
Usage: op-reth db settings set use_hashed_state [OPTIONS] <VALUE>
Arguments:
<VALUE>
[possible values: true, false]
Options:
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev
[default: optimism]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
[default: 5]
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```