feat(storage): slot preimage DB for plain changeset keys in v2 (#22379)

Co-authored-by: Amp <amp@ampcode.com>
Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Georgios Konstantopoulos
2026-02-23 10:01:44 -08:00
committed by GitHub
parent 80bf5532ac
commit 815037e27d
46 changed files with 1913 additions and 1189 deletions

2
Cargo.lock generated
View File

@@ -10131,6 +10131,7 @@ dependencies = [
"futures-util",
"itertools 0.14.0",
"num-traits",
"page_size",
"paste",
"rand 0.9.2",
"rayon",
@@ -10154,6 +10155,7 @@ dependencies = [
"reth-execution-types",
"reth-exex",
"reth-fs-util",
"reth-libmdbx",
"reth-network-p2p",
"reth-network-peers",
"reth-primitives-traits",

View File

@@ -1061,14 +1061,6 @@ mod tests {
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
}
impl BytecodeReader for MockStateProvider {

View File

@@ -223,26 +223,6 @@ impl<N: NodePrimitives> StateProvider for MemoryOverlayStateProviderRef<'_, N> {
self.historical.storage(address, storage_key)
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
let hashed_address = keccak256(address);
let state = &self.trie_input().state;
if let Some(hs) = state.storages.get(&hashed_address) {
if let Some(value) = hs.storage.get(&hashed_storage_key) {
return Ok(Some(*value));
}
if hs.wiped {
return Ok(Some(StorageValue::ZERO));
}
}
self.historical.storage_by_hashed_key(address, hashed_storage_key)
}
}
impl<N: NodePrimitives> BytecodeReader for MemoryOverlayStateProviderRef<'_, N> {

View File

@@ -98,7 +98,7 @@ impl Command {
)?;
if let Some(entry) = entry {
let se: reth_primitives_traits::StorageEntry = entry.into();
let se: reth_primitives_traits::StorageEntry = entry;
println!("{}", serde_json::to_string_pretty(&se)?);
} else {
error!(target: "reth::cli", "No content for the given table key.");
@@ -110,7 +110,7 @@ impl Command {
let serializable: Vec<_> = changesets
.into_iter()
.map(|(addr, entry)| {
let se: reth_primitives_traits::StorageEntry = entry.into();
let se: reth_primitives_traits::StorageEntry = entry;
(addr, se)
})
.collect();

View File

@@ -52,7 +52,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
Comp: CliNodeComponents<N>,
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
{
let Environment { provider_factory, config, .. } =
let Environment { provider_factory, config, data_dir: _ } =
self.env.init::<N>(AccessRights::RW, runtime)?;
let target = self.command.unwind_target(provider_factory.clone())?;

View File

@@ -351,14 +351,6 @@ impl<S: StateProvider, const PREWARM: bool> StateProvider for CachedStateProvide
self.state_provider.storage(account, storage_key)
}
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
self.state_provider.storage_by_hashed_key(address, hashed_storage_key)
}
}
impl<S: BytecodeReader, const PREWARM: bool> BytecodeReader for CachedStateProvider<S, PREWARM> {

View File

@@ -199,17 +199,6 @@ impl<S: StateProvider> StateProvider for InstrumentedStateProvider<S> {
self.record_storage_fetch(start.elapsed());
res
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
let start = Instant::now();
let res = self.state_provider.storage_by_hashed_key(address, hashed_storage_key);
self.record_storage_fetch(start.elapsed());
res
}
}
impl<S: BytecodeReader> BytecodeReader for InstrumentedStateProvider<S> {

View File

@@ -168,7 +168,7 @@ pub use alloy_primitives::{logs_bloom, Log, LogData};
pub mod proofs;
mod storage;
pub use storage::{StorageEntry, StorageSlotKey, ValueWithSubKey};
pub use storage::{StorageEntry, ValueWithSubKey};
pub mod sync;

View File

@@ -1,4 +1,4 @@
use alloy_primitives::{keccak256, B256, U256};
use alloy_primitives::{B256, U256};
/// Trait for `DupSort` table values that contain a subkey.
///
@@ -12,117 +12,6 @@ pub trait ValueWithSubKey {
fn get_subkey(&self) -> Self::SubKey;
}
/// A storage slot key that tracks whether it holds a plain (unhashed) EVM slot
/// or a keccak256-hashed slot.
///
/// This enum replaces the `use_hashed_state: bool` parameter pattern by carrying
/// provenance with the key itself. Once tagged at a read/write boundary, downstream
/// code can call [`Self::to_hashed`] without risk of double-hashing — hashing a
/// [`StorageSlotKey::Hashed`] is a no-op.
///
/// The on-disk encoding is unchanged (raw 32-byte [`B256`]). The variant is set
/// by the code that knows the context (which table, which storage mode).
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum StorageSlotKey {
/// An unhashed EVM storage slot, as produced by REVM execution.
Plain(B256),
/// A keccak256-hashed storage slot, as stored in `HashedStorages` and
/// in v2-mode `StorageChangeSets`.
Hashed(B256),
}
impl Default for StorageSlotKey {
fn default() -> Self {
Self::Plain(B256::ZERO)
}
}
impl StorageSlotKey {
/// Create a plain slot key from a REVM [`U256`] storage index.
pub const fn from_u256(slot: U256) -> Self {
Self::Plain(B256::new(slot.to_be_bytes()))
}
/// Create a plain slot key from a raw [`B256`].
pub const fn plain(key: B256) -> Self {
Self::Plain(key)
}
/// Create a hashed slot key from a raw [`B256`].
pub const fn hashed(key: B256) -> Self {
Self::Hashed(key)
}
/// Tag a raw [`B256`] based on the storage mode.
///
/// When `use_hashed_state` is true the key is assumed already hashed.
/// When false it is assumed to be a plain slot.
pub const fn from_raw(key: B256, use_hashed_state: bool) -> Self {
if use_hashed_state {
Self::Hashed(key)
} else {
Self::Plain(key)
}
}
/// Returns the raw [`B256`] regardless of variant.
pub const fn as_b256(&self) -> B256 {
match *self {
Self::Plain(b) | Self::Hashed(b) => b,
}
}
/// Returns `true` if this key is already hashed.
pub const fn is_hashed(&self) -> bool {
matches!(self, Self::Hashed(_))
}
/// Returns `true` if this key is plain (unhashed).
pub const fn is_plain(&self) -> bool {
matches!(self, Self::Plain(_))
}
/// Produce the keccak256-hashed form of this slot key.
///
/// - If already [`Hashed`](Self::Hashed), returns the inner value as-is (no double-hash).
/// - If [`Plain`](Self::Plain), applies keccak256 and returns the result.
pub fn to_hashed(&self) -> B256 {
match *self {
Self::Hashed(b) => b,
Self::Plain(b) => keccak256(b),
}
}
/// Convert a plain slot to its changeset representation.
///
/// In v2 mode (`use_hashed_state = true`), the changeset stores hashed keys,
/// so the plain key is hashed. In v1 mode, the plain key is stored as-is.
///
/// Panics (debug) if called on an already-hashed key.
pub fn to_changeset_key(self, use_hashed_state: bool) -> B256 {
debug_assert!(self.is_plain(), "to_changeset_key called on already-hashed key");
if use_hashed_state {
self.to_hashed()
} else {
self.as_b256()
}
}
/// Like [`to_changeset_key`](Self::to_changeset_key) but returns a tagged
/// [`StorageSlotKey`] instead of a raw [`B256`].
///
/// Panics (debug) if called on an already-hashed key.
pub fn to_changeset(self, use_hashed_state: bool) -> Self {
Self::from_raw(self.to_changeset_key(use_hashed_state), use_hashed_state)
}
}
impl From<StorageSlotKey> for B256 {
fn from(key: StorageSlotKey) -> Self {
key.as_b256()
}
}
/// Account storage entry.
///
/// `key` is the subkey when used as a value in the `StorageChangeSets` table.
@@ -142,14 +31,6 @@ impl StorageEntry {
pub const fn new(key: B256, value: U256) -> Self {
Self { key, value }
}
/// Tag this entry's key as a [`StorageSlotKey`] based on the storage mode.
///
/// When `use_hashed_state` is true, the key is tagged as already-hashed.
/// When false, it is tagged as plain.
pub const fn slot_key(&self, use_hashed_state: bool) -> StorageSlotKey {
StorageSlotKey::from_raw(self.key, use_hashed_state)
}
}
impl ValueWithSubKey for StorageEntry {

View File

@@ -135,7 +135,7 @@ impl StorageHistory {
let (block_address, entry) = result?;
let block_number = block_address.block_number();
let address = block_address.address();
highest_deleted_storages.insert((address, entry.key.as_b256()), block_number);
highest_deleted_storages.insert((address, entry.key), block_number);
last_changeset_pruned_block = Some(block_number);
pruned_changesets += 1;
limiter.increment_deleted_entries_count();
@@ -273,7 +273,7 @@ impl StorageHistory {
let (block_address, entry) = result?;
let block_number = block_address.block_number();
let address = block_address.address();
highest_deleted_storages.insert((address, entry.key.as_b256()), block_number);
highest_deleted_storages.insert((address, entry.key), block_number);
last_changeset_pruned_block = Some(block_number);
changesets_processed += 1;
limiter.increment_deleted_entries_count();

View File

@@ -160,14 +160,6 @@ impl StateProvider for StateProviderTest {
) -> ProviderResult<Option<alloy_primitives::StorageValue>> {
Ok(self.accounts.get(&account).and_then(|(storage, _)| storage.get(&storage_key).copied()))
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<alloy_primitives::StorageValue>> {
Ok(None)
}
}
impl BytecodeReader for StateProviderTest {

View File

@@ -154,14 +154,6 @@ impl StateProvider for StateProviderTraitObjWrapper {
self.0.storage(account, storage_key)
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: alloy_primitives::StorageKey,
) -> reth_errors::ProviderResult<Option<alloy_primitives::StorageValue>> {
self.0.storage_by_hashed_key(address, hashed_storage_key)
}
fn account_code(
&self,
addr: &Address,

View File

@@ -13,13 +13,14 @@ workspace = true
[dependencies]
# reth
reth-chainspec = { workspace = true, optional = true }
reth-chainspec.workspace = true
reth-codecs.workspace = true
reth-config.workspace = true
reth-consensus.workspace = true
reth-db.workspace = true
reth-db-api.workspace = true
reth-etl.workspace = true
reth-libmdbx.workspace = true
reth-evm = { workspace = true, features = ["metrics"] }
reth-era-downloader.workspace = true
reth-era-utils.workspace = true
@@ -59,6 +60,7 @@ tracing.workspace = true
thiserror.workspace = true
itertools.workspace = true
rayon.workspace = true
page_size.workspace = true
num-traits.workspace = true
tempfile = { workspace = true, optional = true }
bincode.workspace = true
@@ -100,14 +102,13 @@ criterion = { workspace = true, features = ["async_tokio"] }
[features]
test-utils = [
"dep:reth-chainspec",
"reth-network-p2p/test-utils",
"reth-db/test-utils",
"reth-provider/test-utils",
"reth-stages-api/test-utils",
"dep:reth-testing-utils",
"dep:tempfile",
"reth-chainspec?/test-utils",
"reth-chainspec/test-utils",
"reth-consensus/test-utils",
"reth-evm/test-utils",
"reth-downloaders/test-utils",

View File

@@ -2,6 +2,7 @@ use crate::stages::MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD;
use alloy_consensus::BlockHeader;
use alloy_primitives::BlockNumber;
use num_traits::Zero;
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_config::config::ExecutionConfig;
use reth_consensus::FullConsensus;
use reth_db::{static_file::HeaderMask, tables};
@@ -13,7 +14,7 @@ use reth_provider::{
providers::{StaticFileProvider, StaticFileWriter},
BlockHashReader, BlockReader, DBProvider, EitherWriter, ExecutionOutcome, HeaderProvider,
LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriteConfig, StateWriter,
StaticFileProviderFactory, StatsReader, StorageSettingsCache, TransactionVariant,
StaticFileProviderFactory, StatsReader, StoragePath, StorageSettingsCache, TransactionVariant,
};
use reth_revm::database::StateProviderDatabase;
use reth_stages_api::{
@@ -35,6 +36,8 @@ use tracing::*;
use super::missing_static_data_error;
mod slot_preimages;
/// The execution stage executes all transactions and
/// update history indexes.
///
@@ -268,7 +271,9 @@ where
> + StatsReader
+ BlockHashReader
+ StateWriter<Receipt = <E::Primitives as NodePrimitives>::Receipt>
+ StorageSettingsCache,
+ StorageSettingsCache
+ StoragePath
+ ChainSpecProvider<ChainSpec: EthereumHardforks>,
{
/// Return the id of the stage
fn id(&self) -> StageId {
@@ -462,6 +467,26 @@ where
}
}
// When using hashed state (storage.v2), inject plain storage-slot keys into wipe
// reverts for self-destructed accounts. Without this, the changeset writer would only
// see hashed slot keys (from `HashedStorages`) which pollutes the entire codebase.
//
// SELFDESTRUCT no longer destroys storage post-Cancun, so this is only needed for
// pre-Cancun blocks. Post-Cancun we can remove the preimage db entirely.
if provider.cached_storage_settings().use_hashed_state() {
let start_header = provider
.header_by_number(start_block)?
.ok_or_else(|| ProviderError::HeaderNotFound(start_block.into()))?;
let path = provider.storage_path().join("preimage");
if !provider.chain_spec().is_cancun_active_at_timestamp(start_header.timestamp()) {
slot_preimages::inject_plain_wipe_slots(&path, provider, &mut state)?;
} else if path.exists() {
// Post-Cancun: no more self-destructs, preimage db is no longer needed.
let _ = std::fs::remove_dir_all(&path);
}
}
// Write output. When `use_hashed_state` is enabled, `write_state` skips writing to
// plain account/storage tables and only writes bytecodes and changesets. The hashed
// state is then written separately below.
@@ -517,6 +542,8 @@ where
})
}
reject_cancun_boundary_unwind(provider, input.checkpoint.block_number, unwind_to)?;
self.ensure_consistency(provider, input.checkpoint.block_number, Some(unwind_to))?;
// Unwind account and storage changesets, as well as receipts.
@@ -576,6 +603,40 @@ where
}
}
fn reject_cancun_boundary_unwind<Provider>(
provider: &Provider,
checkpoint_block: u64,
unwind_to: u64,
) -> Result<(), StageError>
where
Provider: HeaderProvider + ChainSpecProvider<ChainSpec: EthereumHardforks>,
{
let checkpoint_header = provider
.header_by_number(checkpoint_block)?
.ok_or_else(|| ProviderError::HeaderNotFound(checkpoint_block.into()))?;
let unwind_to_header = provider
.header_by_number(unwind_to)?
.ok_or_else(|| ProviderError::HeaderNotFound(unwind_to.into()))?;
let checkpoint_is_cancun =
provider.chain_spec().is_cancun_active_at_timestamp(checkpoint_header.timestamp());
let unwind_to_is_cancun =
provider.chain_spec().is_cancun_active_at_timestamp(unwind_to_header.timestamp());
if checkpoint_is_cancun && !unwind_to_is_cancun {
return Err(StageError::Fatal(
std::io::Error::other(format!(
"execution unwind across Cancun activation boundary is not allowed: checkpoint \
block #{checkpoint_block} (ts={}) is Cancun-active but unwind target \
#{unwind_to} (ts={}) is pre-Cancun",
checkpoint_header.timestamp(),
unwind_to_header.timestamp()
))
.into(),
))
}
Ok(())
}
fn execution_checkpoint<N>(
provider: &StaticFileProvider<N>,
start_block: BlockNumber,
@@ -687,7 +748,7 @@ mod tests {
use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256};
use alloy_rlp::Decodable;
use assert_matches::assert_matches;
use reth_chainspec::ChainSpecBuilder;
use reth_chainspec::{ChainSpecBuilder, EthereumHardfork, ForkCondition};
use reth_db_api::{
models::{metadata::StorageSettings, AccountBeforeTx},
transaction::{DbTx, DbTxMut},
@@ -695,10 +756,11 @@ mod tests {
use reth_ethereum_consensus::EthBeaconConsensus;
use reth_ethereum_primitives::Block;
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::{Account, Bytecode, SealedBlock, StorageEntry};
use reth_primitives_traits::{Account, Block as _, Bytecode, SealedBlock, StorageEntry};
use reth_provider::{
test_utils::create_test_provider_factory, AccountReader, BlockWriter,
DatabaseProviderFactory, ReceiptProvider, StaticFileProviderFactory,
test_utils::{create_test_provider_factory, create_test_provider_factory_with_chain_spec},
AccountReader, BlockWriter, DatabaseProviderFactory, ReceiptProvider,
StaticFileProviderFactory,
};
use reth_prune::PruneModes;
use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig};
@@ -1118,6 +1180,75 @@ mod tests {
}
}
#[test]
fn unwind_from_cancun_to_pre_cancun_is_rejected() {
let chain_spec = Arc::new(
ChainSpecBuilder::mainnet()
.berlin_activated()
.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(15))
.build(),
);
let factory = create_test_provider_factory_with_chain_spec(chain_spec);
let provider = factory.database_provider_rw().unwrap();
let mut rng = generators::rng();
let mut genesis = generators::random_block(
&mut rng,
0,
generators::BlockParams { tx_count: Some(0), ..Default::default() },
)
.unseal();
genesis.header.timestamp = 0;
let genesis = genesis.seal_slow();
let mut block_1 = generators::random_block(
&mut rng,
1,
generators::BlockParams {
parent: Some(genesis.hash()),
tx_count: Some(0),
..Default::default()
},
)
.unseal();
block_1.header.timestamp = 10;
let block_1 = block_1.seal_slow();
let mut block_2 = generators::random_block(
&mut rng,
2,
generators::BlockParams {
parent: Some(block_1.hash()),
tx_count: Some(0),
..Default::default()
},
)
.unseal();
block_2.header.timestamp = 20;
let block_2 = block_2.seal_slow();
provider.insert_block(&genesis.try_recover().unwrap()).unwrap();
provider.insert_block(&block_1.try_recover().unwrap()).unwrap();
provider.insert_block(&block_2.try_recover().unwrap()).unwrap();
provider
.static_file_provider()
.latest_writer(StaticFileSegment::Headers)
.unwrap()
.commit()
.unwrap();
let mut execution_stage = stage();
let err = execution_stage
.unwind(
&provider,
UnwindInput { checkpoint: StageCheckpoint::new(2), unwind_to: 1, bad_block: None },
)
.unwrap_err();
assert_matches!(err, StageError::Fatal(_));
assert!(err.to_string().contains("across Cancun activation boundary"));
}
#[tokio::test]
async fn test_selfdestruct() {
let test_db = TestStageDB::default();

View File

@@ -0,0 +1,219 @@
use alloy_primitives::{keccak256, map::HashSet, B256};
use eyre::Context;
use rayon::slice::ParallelSliceMut;
use reth_db::tables;
use reth_db_api::{
cursor::{DbCursorRO, DbDupCursorRO},
transaction::DbTx,
};
use reth_libmdbx::{
DatabaseFlags, Environment, EnvironmentFlags, Geometry, Mode, SyncMode, WriteFlags, RO,
};
use reth_provider::{DBProvider, ExecutionOutcome};
use reth_revm::revm::database::states::RevertToSlot;
use reth_stages_api::StageError;
use std::path::Path;
use tracing::trace;
/// Separate MDBX environment for storing `keccak256(slot) → slot` preimage mappings.
///
/// Used only during [`super::ExecutionStage`] for pre-Cancun selfdestruct handling where
/// the original storage slot keys must be recovered from their hashed representation.
///
/// The database is append-only and not unwound — duplicate inserts are silently skipped.
/// After Cancun (where `SELFDESTRUCT` no longer destroys storage) the database can be pruned.
#[derive(Debug)]
struct SlotPreimages {
env: Environment,
}
impl SlotPreimages {
/// Opens (or creates) the slot-preimage MDBX environment at the given directory `path`.
///
/// Uses subdir mode (`no_sub_dir = false`), so MDBX creates `mdbx.dat` / `mdbx.lck`
/// under the directory (e.g. `db/preimage/mdbx.dat`).
fn open(path: &Path) -> eyre::Result<Self> {
const GIGABYTE: usize = 1024 * 1024 * 1024;
const TERABYTE: usize = GIGABYTE * 1024;
let mut builder = Environment::builder();
builder.set_max_dbs(1);
let os_page_size = page_size::get().clamp(4096, 0x10000);
builder.set_geometry(Geometry {
size: Some(0..(8 * TERABYTE)),
growth_step: Some(4 * GIGABYTE as isize),
shrink_threshold: Some(0),
page_size: Some(reth_libmdbx::PageSize::Set(os_page_size)),
});
builder.write_map();
builder.set_flags(EnvironmentFlags {
no_sub_dir: false,
mode: Mode::ReadWrite { sync_mode: SyncMode::Durable },
..Default::default()
});
let env = builder.open(path).wrap_err_with(|| {
format!("failed to open slot-preimage MDBX env at {}", path.display())
})?;
// Ensure the unnamed default DB exists.
{
let tx = env.begin_rw_txn()?;
let _db = tx.create_db(None, DatabaseFlags::empty())?;
tx.commit()?;
}
trace!(target: "stages::slot_preimages", ?path, "Opened slot-preimage store");
Ok(Self { env })
}
/// Batch-insert `hashed_slot → plain_slot` preimage entries.
///
/// Entries must be pre-sorted by key for optimal insert performance.
/// Existing keys are skipped after cursor lookup.
fn insert_preimages(&self, entries: &[(B256, B256)]) -> eyre::Result<()> {
let tx = self.env.begin_rw_txn()?;
let db = tx.open_db(None)?;
let mut cursor = tx.cursor(db.dbi())?;
for (hashed_slot, plain_slot) in entries {
if cursor.set_key::<[u8; 32], [u8; 32]>(hashed_slot.as_slice())?.is_some() {
continue;
}
cursor.put(hashed_slot.as_slice(), plain_slot.as_slice(), WriteFlags::empty())?;
}
tx.commit()?;
trace!(target: "stages::slot_preimages", count = entries.len(), "Inserted slot preimages");
Ok(())
}
/// Opens a read-only transaction for batch lookups.
///
/// Reuse the returned [`SlotPreimagesReader`] for multiple `get` calls to avoid
/// the overhead of opening a new RO transaction per lookup.
fn reader(&self) -> eyre::Result<SlotPreimagesReader> {
let tx = self.env.begin_ro_txn()?;
let dbi = tx.open_db(None)?.dbi();
Ok(SlotPreimagesReader { tx, dbi })
}
}
/// Read-only handle for batch slot-preimage lookups within a single MDBX transaction.
struct SlotPreimagesReader {
tx: reth_libmdbx::Transaction<RO>,
dbi: reth_libmdbx::ffi::MDBX_dbi,
}
impl SlotPreimagesReader {
/// Point-lookup of a slot preimage by its keccak256 hash.
fn get(&self, hashed_slot: &B256) -> eyre::Result<Option<B256>> {
let result: Option<[u8; 32]> = self.tx.get(self.dbi, hashed_slot.as_ref())?;
Ok(result.map(B256::from))
}
}
/// Collects `keccak256(slot) → slot` preimage entries from the bundle state and stores
/// them in the auxiliary preimage database, then rewrites wipe reverts for self-destructed
/// accounts to use plain slot keys instead of relying on the hashed-storage DB walk.
///
/// This eliminates the need for the changeset writer to read from `HashedStorages` during
/// storage wipes, keeping all changeset keys in plain format.
pub(super) fn inject_plain_wipe_slots<P: DBProvider, R>(
slot_preimages_path: &Path,
provider: &P,
state: &mut ExecutionOutcome<R>,
) -> Result<(), StageError> {
// Collect preimage entries from bundle state and reverts.
// StorageKey in revm is U256, representing a plain EVM slot index.
let mut preimage_entries = Vec::new();
let mut seen_hashes = HashSet::new();
for account in state.bundle.state().values() {
for &slot_key in account.storage.keys() {
let plain = B256::from(slot_key.to_be_bytes());
let hashed = keccak256(plain);
if seen_hashes.insert(hashed) {
preimage_entries.push((hashed, plain));
}
}
}
for block_reverts in state.bundle.reverts.iter() {
for (_, revert) in block_reverts {
for &slot_key in revert.storage.keys() {
let plain = B256::from(slot_key.to_be_bytes());
let hashed = keccak256(plain);
if seen_hashes.insert(hashed) {
preimage_entries.push((hashed, plain));
}
}
}
}
// Pre-sort entries by hash key for optimal MDBX insert performance.
preimage_entries.par_sort_unstable_by_key(|(hash, _)| *hash);
// Lazily open the preimage store and insert entries.
let preimages = SlotPreimages::open(slot_preimages_path).map_err(fatal)?;
if !preimage_entries.is_empty() {
preimages.insert_preimages(&preimage_entries).map_err(fatal)?;
}
// Find all wipe reverts (self-destructed accounts) and inject plain slot keys.
// Open a single RO transaction for all preimage lookups in this batch.
let reader = preimages.reader().map_err(fatal)?;
for block_reverts in state.bundle.reverts.iter_mut() {
for (address, revert) in block_reverts.iter_mut() {
if !revert.wipe_storage {
continue;
}
// Walk all hashed storage slots for this account in the DB and look up
// their plain-key preimages.
let addr = *address;
let hashed_address = keccak256(addr);
let mut cursor = provider.tx_ref().cursor_dup_read::<tables::HashedStorages>()?;
if let Some((_, entry)) = cursor.seek_exact(hashed_address)? {
inject_preimage_entry(&reader, revert, addr, entry.key, entry.value)?;
while let Some(entry) = cursor.next_dup_val()? {
inject_preimage_entry(&reader, revert, addr, entry.key, entry.value)?;
}
}
}
}
Ok(())
}
/// Looks up the plain-key preimage for a single hashed storage slot and inserts it
/// into the account revert if not already present.
fn inject_preimage_entry(
reader: &SlotPreimagesReader,
revert: &mut reth_revm::revm::database::AccountRevert,
address: alloy_primitives::Address,
hashed_slot: B256,
value: alloy_primitives::U256,
) -> Result<(), StageError> {
let plain_slot = reader.get(&hashed_slot).map_err(fatal)?.ok_or_else(|| {
fatal(eyre::eyre!("missing slot preimage for {hashed_slot:?} (addr={address:?})"))
})?;
// Convert B256 plain slot to U256 StorageKey for the revert map.
let plain_key = alloy_primitives::U256::from_be_bytes(plain_slot.0);
revert.storage.entry(plain_key).or_insert(RevertToSlot::Some(value));
Ok(())
}
#[inline]
fn fatal<E>(err: E) -> StageError
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
StageError::Fatal(err.into())
}

View File

@@ -208,10 +208,7 @@ where
for (idx, changeset_result) in walker.enumerate() {
let (BlockNumberAddress((block_number, address)), storage) = changeset_result?;
cache
.entry(AddressStorageKey((address, storage.key.as_b256())))
.or_default()
.push(block_number);
cache.entry(AddressStorageKey((address, storage.key))).or_default().push(block_number);
if idx > 0 && idx % interval == 0 && total_changesets > 1000 {
info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices");

View File

@@ -3,7 +3,7 @@
use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip1559, TxReceipt};
use alloy_eips::eip1559::INITIAL_BASE_FEE;
use alloy_genesis::{Genesis, GenesisAccount};
use alloy_primitives::{bytes, keccak256, Address, Bytes, TxKind, B256, U256};
use alloy_primitives::{bytes, Address, Bytes, TxKind, B256, U256};
use reth_chainspec::{ChainSpecBuilder, ChainSpecProvider, MAINNET};
use reth_config::config::StageConfig;
use reth_consensus::noop::NoopConsensus;
@@ -89,11 +89,6 @@ fn assert_changesets_queryable(
"storage changesets should be queryable from static files for blocks {:?}",
block_range
);
// Verify keys are in hashed format (v2 mode)
for (_, entry) in &storage_changesets {
assert!(entry.key.is_hashed(), "v2: storage changeset keys should be tagged as hashed");
}
} else {
let storage_changesets: Vec<_> = provider
.tx_ref()
@@ -105,16 +100,6 @@ fn assert_changesets_queryable(
"storage changesets should be queryable from MDBX for blocks {:?}",
block_range
);
// Verify keys are plain (not hashed) in v1 mode
for (_, entry) in &storage_changesets {
let key = entry.key;
assert_ne!(
key,
keccak256(key),
"v1: storage changeset key should be plain (not its own keccak256)"
);
}
}
// Verify account changesets

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ use crate::{
transaction::{DbTx, DbTxMut},
DatabaseError,
};
use std::{fmt::Debug, sync::Arc};
use std::{fmt::Debug, path::PathBuf, sync::Arc};
/// Main Database trait that can open read-only and read-write transactions.
///
@@ -22,6 +22,9 @@ pub trait Database: Send + Sync + Debug {
#[track_caller]
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError>;
/// Returns the path to the database directory.
fn path(&self) -> PathBuf;
/// Takes a function and passes a read-only transaction into it, making sure it's closed in the
/// end of the execution.
fn view<T, F>(&self, f: F) -> Result<T, DatabaseError>
@@ -62,6 +65,10 @@ impl<DB: Database> Database for Arc<DB> {
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
<DB as Database>::tx_mut(self)
}
fn path(&self) -> PathBuf {
<DB as Database>::path(self)
}
}
impl<DB: Database> Database for &DB {
@@ -75,4 +82,8 @@ impl<DB: Database> Database for &DB {
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
<DB as Database>::tx_mut(self)
}
fn path(&self) -> PathBuf {
<DB as Database>::path(self)
}
}

View File

@@ -16,7 +16,7 @@ use crate::{
DatabaseError,
};
use core::ops::Bound;
use std::{collections::BTreeMap, ops::RangeBounds};
use std::{collections::BTreeMap, ops::RangeBounds, path::PathBuf};
/// Mock database implementation for testing and development.
///
@@ -50,6 +50,10 @@ impl Database for DatabaseMock {
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
Ok(TxMock::default())
}
fn path(&self) -> PathBuf {
PathBuf::default()
}
}
impl DatabaseMetrics for DatabaseMock {}

View File

@@ -25,7 +25,7 @@ use reth_tracing::tracing::error;
use std::{
collections::HashMap,
ops::{Deref, Range},
path::Path,
path::{Path, PathBuf},
sync::Arc,
time::{SystemTime, UNIX_EPOCH},
};
@@ -244,6 +244,8 @@ impl DatabaseArguments {
pub struct DatabaseEnv {
/// Libmdbx-sys environment.
inner: Environment,
/// Path to the database directory.
path: PathBuf,
/// Opened DBIs for reuse.
/// Important: Do not manually close these DBIs, like via `mdbx_dbi_close`.
/// More generally, do not dynamically create, re-open, or drop tables at
@@ -277,6 +279,10 @@ impl Database for DatabaseEnv {
)
.map_err(|e| DatabaseError::InitTx(e.into()))
}
fn path(&self) -> PathBuf {
self.path.clone()
}
}
impl DatabaseMetrics for DatabaseEnv {
@@ -508,6 +514,7 @@ impl DatabaseEnv {
let env = Self {
inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?,
path: path.to_path_buf(),
dbis: Arc::default(),
metrics: None,
_lock_file,

View File

@@ -140,6 +140,10 @@ pub mod test_utils {
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
self.db().tx_mut()
}
fn path(&self) -> std::path::PathBuf {
self.db().path()
}
}
impl<DB: DatabaseMetrics> DatabaseMetrics for TempDatabase<DB> {
@@ -241,7 +245,7 @@ mod tests {
// Test that TempDatabase properly cleans up its directory when dropped
let temp_path = {
let db = crate::test_utils::create_test_rw_db();
let path = db.path().to_path_buf();
let path = db.path();
assert!(path.exists(), "Database directory should exist while TempDatabase is alive");
path
// TempDatabase dropped here

View File

@@ -5,7 +5,8 @@ use crate::ProviderResult;
use alloy_primitives::BlockNumber;
use reth_db::models::AccountBeforeTx;
use reth_db_api::models::BlockNumberAddress;
use reth_storage_api::{ChangeSetReader, ChangesetEntry, StorageChangeSetReader};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::{ChangeSetReader, StorageChangeSetReader};
use std::ops::{Bound, RangeBounds};
/// Iterator that walks account changesets from static files in a block range.
@@ -109,7 +110,7 @@ pub struct StaticFileStorageChangesetWalker<P> {
/// Current block being processed
current_block: BlockNumber,
/// Changesets for current block
current_changesets: Vec<(BlockNumberAddress, ChangesetEntry)>,
current_changesets: Vec<(BlockNumberAddress, StorageEntry)>,
/// Index within current block's changesets
changeset_index: usize,
}
@@ -143,7 +144,7 @@ impl<P> Iterator for StaticFileStorageChangesetWalker<P>
where
P: StorageChangeSetReader,
{
type Item = ProviderResult<(BlockNumberAddress, ChangesetEntry)>;
type Item = ProviderResult<(BlockNumberAddress, StorageEntry)>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(changeset) = self.current_changesets.get(self.changeset_index).copied() {

View File

@@ -23,13 +23,11 @@ use reth_chainspec::ChainInfo;
use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices};
use reth_execution_types::ExecutionOutcome;
use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy};
use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader};
use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{
BlockBodyIndicesProvider, ChangesetEntry, NodePrimitivesProvider, StorageChangeSetReader,
};
use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{HashedPostState, KeccakKeyHasher};
use revm_database::BundleState;
@@ -715,7 +713,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for BlockchainProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
self.consistent_provider()?.storage_changeset(block_number)
}
@@ -724,14 +722,14 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for BlockchainProvider<N> {
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
) -> ProviderResult<Option<StorageEntry>> {
self.consistent_provider()?.get_storage_before_block(block_number, address, storage_key)
}
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
self.consistent_provider()?.storage_changesets_range(range)
}

View File

@@ -21,16 +21,13 @@ use reth_chainspec::ChainInfo;
use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices};
use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit};
use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy};
use reth_primitives_traits::{
Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry, StorageSlotKey,
};
use reth_primitives_traits::{Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{
BlockBodyIndicesProvider, ChangesetEntry, DatabaseProviderFactory, NodePrimitivesProvider,
StateProvider, StateProviderBox, StorageChangeSetReader, StorageSettingsCache,
TryIntoHistoricalStateProvider,
BlockBodyIndicesProvider, DatabaseProviderFactory, NodePrimitivesProvider, StateProvider,
StateProviderBox, StorageChangeSetReader, TryIntoHistoricalStateProvider,
};
use reth_storage_errors::provider::ProviderResult;
use revm_database::states::PlainStorageRevert;
@@ -220,13 +217,13 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
/// Populate a [`BundleStateInit`] and [`RevertsInit`] based on the given storage and account
/// changesets.
///
/// When `use_hashed_state` is enabled, storage changeset keys are already hashed, so current
/// values are read directly from [`reth_db_api::tables::HashedStorages`]. Otherwise, values
/// are read via [`StateProvider::storage`] which queries plain state tables.
/// Storage changeset keys are always plain (unhashed). Current values are read via
/// [`StateProvider::storage`], which handles hashing internally when `use_hashed_state` is
/// enabled.
fn populate_bundle_state(
&self,
account_changeset: Vec<(u64, AccountBeforeTx)>,
storage_changeset: Vec<(BlockNumberAddress, ChangesetEntry)>,
storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>,
block_range_end: BlockNumber,
) -> ProviderResult<(BundleStateInit, RevertsInit)> {
let mut state: BundleStateInit = HashMap::default();
@@ -263,16 +260,10 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
};
// match storage.
match account_state.2.entry(old_storage.key.as_b256()) {
match account_state.2.entry(old_storage.key) {
hash_map::Entry::Vacant(entry) => {
let new_storage_value = match old_storage.key {
StorageSlotKey::Hashed(_) => state_provider
.storage_by_hashed_key(address, old_storage.key.as_b256())?
.unwrap_or_default(),
StorageSlotKey::Plain(_) => state_provider
.storage(address, old_storage.key.as_b256())?
.unwrap_or_default(),
};
let new_storage_value =
state_provider.storage(address, old_storage.key)?.unwrap_or_default();
entry.insert((old_storage.value, new_storage_value));
}
hash_map::Entry::Occupied(mut entry) => {
@@ -286,7 +277,7 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
.entry(address)
.or_default()
.1
.push(StorageEntry::from(old_storage));
.push(old_storage);
}
Ok((state, reverts))
@@ -1312,8 +1303,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state();
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
@@ -1329,10 +1319,10 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
.flatten()
.flat_map(|revert: PlainStorageRevert| {
revert.storage_revert.into_iter().map(move |(key, value)| {
let tagged_key = StorageSlotKey::from_u256(key).to_changeset(use_hashed);
let plain_key = B256::from(key.to_be_bytes());
(
BlockNumberAddress((block_number, revert.address)),
ChangesetEntry { key: tagged_key, value: value.to_previous_value() },
StorageEntry { key: plain_key, value: value.to_previous_value() },
)
})
})
@@ -1367,8 +1357,7 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state();
) -> ProviderResult<Option<StorageEntry>> {
if let Some(state) =
self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into()))
{
@@ -1387,9 +1376,9 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
return None
}
revert.storage_revert.into_iter().find_map(|(key, value)| {
let tagged_key = StorageSlotKey::from_u256(key).to_changeset(use_hashed);
(tagged_key.as_b256() == storage_key).then(|| ChangesetEntry {
key: tagged_key,
let plain_key = B256::from(key.to_be_bytes());
(plain_key == storage_key).then(|| StorageEntry {
key: plain_key,
value: value.to_previous_value(),
})
})
@@ -1415,14 +1404,12 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
let range = to_range(range);
let mut changesets = Vec::new();
let database_start = range.start;
let mut database_end = range.end;
let use_hashed = self.storage_provider.cached_storage_settings().use_hashed_state();
if let Some(head_block) = &self.head_block {
database_end = head_block.anchor().number;
@@ -1440,14 +1427,10 @@ impl<N: ProviderNodeTypes> StorageChangeSetReader for ConsistentProvider<N> {
.flatten()
.flat_map(|revert: PlainStorageRevert| {
revert.storage_revert.into_iter().map(move |(key, value)| {
let tagged_key =
StorageSlotKey::from_u256(key).to_changeset(use_hashed);
let plain_key = B256::from(key.to_be_bytes());
(
BlockNumberAddress((state.number(), revert.address)),
ChangesetEntry {
key: tagged_key,
value: value.to_previous_value(),
},
StorageEntry { key: plain_key, value: value.to_previous_value() },
)
})
});
@@ -2084,178 +2067,6 @@ mod tests {
Ok(())
}
#[test]
fn test_get_state_storage_value_hashed_state() -> eyre::Result<()> {
use alloy_primitives::{keccak256, U256};
use reth_db_api::{models::StorageSettings, tables, transaction::DbTxMut};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use std::collections::HashMap;
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let slot_b256 = B256::from(slot);
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot_b256);
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let blocks = random_block_range(
&mut rng,
0..=1,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
[
Vec::new(),
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.tx_ref().put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(100) },
)?;
provider_rw.tx_ref().put::<tables::HashedAccounts>(hashed_address, account)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let consistent_provider = provider.consistent_provider()?;
let outcome =
consistent_provider.get_state(1..=1)?.expect("should return execution outcome");
let state = &outcome.bundle.state;
let account_state = state.get(&address).expect("should have account in bundle state");
let storage = &account_state.storage;
let slot_as_u256 = U256::from_be_bytes(*hashed_slot);
let storage_slot = storage.get(&slot_as_u256).expect("should have the slot in storage");
assert_eq!(
storage_slot.present_value,
U256::from(100),
"present_value should be 100 (the actual value in HashedStorages)"
);
Ok(())
}
#[test]
#[cfg(all(unix, feature = "rocksdb"))]
fn test_get_state_storage_value_hashed_state_historical() -> eyre::Result<()> {
use alloy_primitives::{keccak256, U256};
use reth_db_api::{models::StorageSettings, tables, transaction::DbTxMut};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use std::collections::HashMap;
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let slot_b256 = B256::from(slot);
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot_b256);
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let blocks = random_block_range(
&mut rng,
0..=3,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(300)));
s
})],
[
Vec::new(),
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
vec![(address, Some(Some(account.into())), vec![(slot, U256::from(100))])],
vec![(address, Some(Some(account.into())), vec![(slot, U256::from(200))])],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.tx_ref().put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(300) },
)?;
provider_rw.tx_ref().put::<tables::HashedAccounts>(hashed_address, account)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let consistent_provider = provider.consistent_provider()?;
let outcome =
consistent_provider.get_state(1..=2)?.expect("should return execution outcome");
let state = &outcome.bundle.state;
let account_state = state.get(&address).expect("should have account in bundle state");
let storage = &account_state.storage;
let slot_as_u256 = U256::from_be_bytes(*hashed_slot);
let storage_slot = storage.get(&slot_as_u256).expect("should have the slot in storage");
assert_eq!(
storage_slot.present_value,
U256::from(200),
"present_value should be 200 (the value at block 2, not 300 which is the latest)"
);
Ok(())
}
#[test]
fn test_get_state_storage_value_plain_state() -> eyre::Result<()> {
use alloy_primitives::U256;
@@ -2337,104 +2148,6 @@ mod tests {
Ok(())
}
#[test]
fn test_storage_changeset_consistent_keys_hashed_state() -> eyre::Result<()> {
use alloy_primitives::{keccak256, U256};
use reth_db_api::models::StorageSettings;
use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache};
use std::collections::HashMap;
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let (database_blocks, in_memory_blocks) = random_blocks(&mut rng, 1, 1, None, None, 0..1);
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
database_blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])]],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let in_mem_block = in_memory_blocks.first().unwrap();
let senders = in_mem_block.senders().expect("failed to recover senders");
let chain = NewCanonicalChain::Commit {
new: vec![ExecutedBlock {
recovered_block: Arc::new(RecoveredBlock::new_sealed(
in_mem_block.clone(),
senders,
)),
execution_output: Arc::new(BlockExecutionOutput {
state: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::from(100), U256::from(200)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::from(100))])]],
[],
),
result: BlockExecutionResult {
receipts: Default::default(),
requests: Default::default(),
gas_used: 0,
blob_gas_used: 0,
},
}),
..Default::default()
}],
};
provider.canonical_in_memory_state.update_chain(chain);
let consistent_provider = provider.consistent_provider()?;
let db_changeset = consistent_provider.storage_changeset(0)?;
let mem_changeset = consistent_provider.storage_changeset(1)?;
let slot_b256 = B256::from(slot);
let _hashed_slot_b256 = keccak256(slot_b256);
assert_eq!(db_changeset.len(), 1);
assert_eq!(mem_changeset.len(), 1);
let db_key = db_changeset[0].1.key;
let mem_key = mem_changeset[0].1.key;
assert_eq!(
db_key, mem_key,
"DB and in-memory changesets should return the same key format (hashed) for the same logical slot"
);
Ok(())
}
#[test]
fn test_storage_changeset_consistent_keys_plain_state() -> eyre::Result<()> {
use alloy_primitives::U256;
@@ -2521,8 +2234,8 @@ mod tests {
assert_eq!(db_changeset.len(), 1);
assert_eq!(mem_changeset.len(), 1);
let db_key = db_changeset[0].1.key.as_b256();
let mem_key = mem_changeset[0].1.key.as_b256();
let db_key = db_changeset[0].1.key;
let mem_key = mem_changeset[0].1.key;
assert_eq!(db_key, slot_b256, "DB changeset should use plain (unhashed) key");
assert_eq!(mem_key, slot_b256, "In-memory changeset should use plain (unhashed) key");
@@ -2534,101 +2247,6 @@ mod tests {
Ok(())
}
#[test]
fn test_storage_changesets_range_consistent_keys_hashed_state() -> eyre::Result<()> {
use alloy_primitives::U256;
use reth_db_api::models::StorageSettings;
use reth_storage_api::{StorageChangeSetReader, StorageSettingsCache};
use std::collections::HashMap;
let mut rng = generators::rng();
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let (database_blocks, in_memory_blocks) = random_blocks(&mut rng, 2, 1, None, None, 0..1);
let address = alloy_primitives::Address::with_last_byte(1);
let account = reth_primitives_traits::Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: None,
};
let slot = U256::from(0x42);
let provider_rw = factory.provider_rw()?;
provider_rw.append_blocks_with_state(
database_blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome {
bundle: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::ZERO, U256::from(100)));
s
})],
vec![
vec![(address, Some(Some(account.into())), vec![(slot, U256::ZERO)])],
vec![],
],
[],
),
first_block: 0,
..Default::default()
},
Default::default(),
)?;
provider_rw.commit()?;
let provider = BlockchainProvider::new(factory)?;
let in_mem_block = in_memory_blocks.first().unwrap();
let senders = in_mem_block.senders().expect("failed to recover senders");
let chain = NewCanonicalChain::Commit {
new: vec![ExecutedBlock {
recovered_block: Arc::new(RecoveredBlock::new_sealed(
in_mem_block.clone(),
senders,
)),
execution_output: Arc::new(BlockExecutionOutput {
state: BundleState::new(
[(address, None, Some(account.into()), {
let mut s = HashMap::default();
s.insert(slot, (U256::from(100), U256::from(200)));
s
})],
[[(address, Some(Some(account.into())), vec![(slot, U256::from(100))])]],
[],
),
result: BlockExecutionResult {
receipts: Default::default(),
requests: Default::default(),
gas_used: 0,
blob_gas_used: 0,
},
}),
..Default::default()
}],
};
provider.canonical_in_memory_state.update_chain(chain);
let consistent_provider = provider.consistent_provider()?;
let all_changesets = consistent_provider.storage_changesets_range(0..=2)?;
assert_eq!(all_changesets.len(), 2, "should have one changeset entry per block");
let keys: Vec<B256> = all_changesets.iter().map(|(_, entry)| entry.key.as_b256()).collect();
assert_eq!(
keys[0], keys[1],
"same logical slot should produce identical keys whether from DB or memory"
);
Ok(())
}
#[test]
fn test_storage_changesets_range_consistent_keys_plain_state() -> eyre::Result<()> {
use alloy_primitives::U256;
@@ -2715,7 +2333,7 @@ mod tests {
assert_eq!(all_changesets.len(), 2, "should have one changeset entry per block");
let slot_b256 = B256::from(slot);
let keys: Vec<B256> = all_changesets.iter().map(|(_, entry)| entry.key.as_b256()).collect();
let keys: Vec<B256> = all_changesets.iter().map(|(_, entry)| entry.key).collect();
assert_eq!(
keys[0], keys[1],

View File

@@ -119,6 +119,7 @@ impl<N: ProviderNodeTypes> ProviderFactory<N> {
rocksdb_provider.clone(),
ChangesetCache::new(),
runtime.clone(),
db.path(),
)
.storage_settings()?
.unwrap_or(legacy_settings);
@@ -246,6 +247,7 @@ impl<N: ProviderNodeTypes> ProviderFactory<N> {
self.rocksdb_provider.clone(),
self.changeset_cache.clone(),
self.runtime.clone(),
self.db.path(),
))
}
@@ -265,6 +267,7 @@ impl<N: ProviderNodeTypes> ProviderFactory<N> {
self.rocksdb_provider.clone(),
self.changeset_cache.clone(),
self.runtime.clone(),
self.db.path(),
)))
}
@@ -285,6 +288,7 @@ impl<N: ProviderNodeTypes> ProviderFactory<N> {
self.rocksdb_provider.clone(),
self.changeset_cache.clone(),
self.runtime.clone(),
self.db.path(),
))
}

View File

@@ -52,7 +52,7 @@ use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult, Chain, Ex
use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy};
use reth_primitives_traits::{
Account, Block as _, BlockBody as _, Bytecode, FastInstant as Instant, RecoveredBlock,
SealedHeader, StorageEntry, StorageSlotKey,
SealedHeader, StorageEntry,
};
use reth_prune_types::{
PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_UNWIND_SAFE_DISTANCE,
@@ -60,8 +60,8 @@ use reth_prune_types::{
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{
BlockBodyIndicesProvider, BlockBodyReader, ChangesetEntry, MetadataProvider, MetadataWriter,
NodePrimitivesProvider, StateProvider, StateWriteConfig, StorageChangeSetReader,
BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter,
NodePrimitivesProvider, StateProvider, StateWriteConfig, StorageChangeSetReader, StoragePath,
StorageSettingsCache, TryIntoHistoricalStateProvider, WriteStateInput,
};
use reth_storage_errors::provider::{ProviderResult, StaticFileWriterError};
@@ -78,6 +78,7 @@ use std::{
collections::{BTreeMap, BTreeSet},
fmt::Debug,
ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive},
path::PathBuf,
sync::Arc,
};
use tracing::{debug, instrument, trace};
@@ -201,6 +202,8 @@ pub struct DatabaseProvider<TX, N: NodeTypes> {
changeset_cache: ChangesetCache,
/// Task runtime for spawning parallel I/O work.
runtime: reth_tasks::Runtime,
/// Path to the database directory.
db_path: PathBuf,
/// Pending `RocksDB` batches to be committed at provider commit time.
#[cfg_attr(not(all(unix, feature = "rocksdb")), allow(dead_code))]
pending_rocksdb_batches: PendingRocksDBBatches,
@@ -358,6 +361,7 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
rocksdb_provider: RocksDBProvider,
changeset_cache: ChangesetCache,
runtime: reth_tasks::Runtime,
db_path: PathBuf,
commit_order: CommitOrder,
) -> Self {
Self {
@@ -370,6 +374,7 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
rocksdb_provider,
changeset_cache,
runtime,
db_path,
pending_rocksdb_batches: Default::default(),
commit_order,
minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE,
@@ -389,6 +394,7 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
rocksdb_provider: RocksDBProvider,
changeset_cache: ChangesetCache,
runtime: reth_tasks::Runtime,
db_path: PathBuf,
) -> Self {
Self::new_rw_inner(
tx,
@@ -400,6 +406,7 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
rocksdb_provider,
changeset_cache,
runtime,
db_path,
CommitOrder::Normal,
)
}
@@ -416,6 +423,7 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
rocksdb_provider: RocksDBProvider,
changeset_cache: ChangesetCache,
runtime: reth_tasks::Runtime,
db_path: PathBuf,
) -> Self {
Self::new_rw_inner(
tx,
@@ -427,6 +435,7 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
rocksdb_provider,
changeset_cache,
runtime,
db_path,
CommitOrder::Unwind,
)
}
@@ -984,6 +993,7 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
rocksdb_provider: RocksDBProvider,
changeset_cache: ChangesetCache,
runtime: reth_tasks::Runtime,
db_path: PathBuf,
) -> Self {
Self {
tx,
@@ -995,6 +1005,7 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
rocksdb_provider,
changeset_cache,
runtime,
db_path,
pending_rocksdb_batches: Default::default(),
commit_order: CommitOrder::Normal,
minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE,
@@ -1198,7 +1209,7 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
fn populate_bundle_state<A, S>(
&self,
account_changeset: Vec<(u64, AccountBeforeTx)>,
storage_changeset: Vec<(BlockNumberAddress, ChangesetEntry)>,
storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>,
plain_accounts_cursor: &mut A,
plain_storage_cursor: &mut S,
) -> ProviderResult<(BundleStateInit, RevertsInit)>
@@ -1248,12 +1259,11 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
};
// match storage.
let storage_key = old_storage.key.as_b256();
match account_state.2.entry(storage_key) {
match account_state.2.entry(old_storage.key) {
hash_map::Entry::Vacant(entry) => {
let new_storage = plain_storage_cursor
.seek_by_key_subkey(address, storage_key)?
.filter(|storage| storage.key == storage_key)
.seek_by_key_subkey(address, old_storage.key)?
.filter(|storage| storage.key == old_storage.key)
.unwrap_or_default();
entry.insert((old_storage.value, new_storage.value));
}
@@ -1268,20 +1278,20 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
.entry(address)
.or_default()
.1
.push(old_storage.into_storage_entry());
.push(old_storage);
}
Ok((state, reverts))
}
/// Like [`populate_bundle_state`](Self::populate_bundle_state), but reads current values from
/// `HashedAccounts`/`HashedStorages`. Addresses are hashed via `keccak256` for DB lookups,
/// while storage keys from changesets are assumed to already be hashed and are used as-is.
/// The output `BundleStateInit`/`RevertsInit` structures remain keyed by plain address.
/// `HashedAccounts`/`HashedStorages`. Addresses and storage keys are hashed via `keccak256`
/// for DB lookups. The output `BundleStateInit`/`RevertsInit` structures remain keyed by
/// plain address and plain storage key.
fn populate_bundle_state_hashed(
&self,
account_changeset: Vec<(u64, AccountBeforeTx)>,
storage_changeset: Vec<(BlockNumberAddress, ChangesetEntry)>,
storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>,
hashed_accounts_cursor: &mut impl DbCursorRO<tables::HashedAccounts>,
hashed_storage_cursor: &mut impl DbDupCursorRO<tables::HashedStorages>,
) -> ProviderResult<(BundleStateInit, RevertsInit)> {
@@ -1318,13 +1328,14 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
hash_map::Entry::Occupied(entry) => entry.into_mut(),
};
let storage_key = old_storage.key.as_b256();
match account_state.2.entry(storage_key) {
// Storage keys in changesets are plain; hash them for HashedStorages lookup.
let hashed_storage_key = keccak256(old_storage.key);
match account_state.2.entry(old_storage.key) {
hash_map::Entry::Vacant(entry) => {
let hashed_address = keccak256(address);
let new_storage = hashed_storage_cursor
.seek_by_key_subkey(hashed_address, storage_key)?
.filter(|storage| storage.key == storage_key)
.seek_by_key_subkey(hashed_address, hashed_storage_key)?
.filter(|storage| storage.key == hashed_storage_key)
.unwrap_or_default();
entry.insert((old_storage.value, new_storage.value));
}
@@ -1339,7 +1350,7 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
.entry(address)
.or_default()
.1
.push(old_storage.into_storage_entry());
.push(old_storage);
}
Ok((state, reverts))
@@ -1499,7 +1510,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.storage_changeset(block_number)
} else {
@@ -1510,13 +1521,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
.walk_range(storage_range)?
.map(|r| {
let (bna, entry) = r?;
Ok((
bna,
ChangesetEntry {
key: StorageSlotKey::plain(entry.key),
value: entry.value,
},
))
Ok((bna, entry))
})
.collect()
}
@@ -1527,7 +1532,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
) -> ProviderResult<Option<StorageEntry>> {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.get_storage_before_block(block_number, address, storage_key)
} else {
@@ -1535,18 +1540,14 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
.tx
.cursor_dup_read::<tables::StorageChangeSets>()?
.seek_by_key_subkey(BlockNumberAddress((block_number, address)), storage_key)?
.filter(|entry| entry.key == storage_key)
.map(|entry| ChangesetEntry {
key: StorageSlotKey::plain(entry.key),
value: entry.value,
}))
.filter(|entry| entry.key == storage_key))
}
}
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
if self.cached_storage_settings().storage_v2 {
self.static_file_provider.storage_changesets_range(range)
} else {
@@ -1555,13 +1556,7 @@ impl<TX: DbTx, N: NodeTypes> StorageChangeSetReader for DatabaseProvider<TX, N>
.walk_range(BlockNumberAddressRange::from(range))?
.map(|r| {
let (bna, entry) = r?;
Ok((
bna,
ChangesetEntry {
key: StorageSlotKey::plain(entry.key),
value: entry.value,
},
))
Ok((bna, entry))
})
.collect()
}
@@ -2306,7 +2301,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> StorageReader for DatabaseProvider<TX, N>
BTreeMap::new(),
|mut accounts: BTreeMap<Address, BTreeSet<B256>>, entry| {
let (BlockNumberAddress((_, address)), storage_entry) = entry;
accounts.entry(address).or_default().insert(storage_entry.key.as_b256());
accounts.entry(address).or_default().insert(storage_entry.key);
Ok(accounts)
},
)
@@ -2336,7 +2331,7 @@ impl<TX: DbTx + 'static, N: NodeTypes> StorageReader for DatabaseProvider<TX, N>
BTreeMap::new(),
|mut storages: BTreeMap<(Address, B256), Vec<u64>>, (index, storage)| {
storages
.entry((index.address(), storage.key.as_b256()))
.entry((index.address(), storage.key))
.or_default()
.push(index.block_number());
Ok(storages)
@@ -2495,15 +2490,11 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
first_block: BlockNumber,
config: StateWriteConfig,
) -> ProviderResult<()> {
let use_hashed_state = self.cached_storage_settings().use_hashed_state();
// Write storage changes
if config.write_storage_changesets {
tracing::trace!("Writing storage changes");
let mut storages_cursor =
self.tx_ref().cursor_dup_write::<tables::PlainStorageState>()?;
let mut hashed_storages_cursor =
self.tx_ref().cursor_dup_write::<tables::HashedStorages>()?;
for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() {
let block_number = first_block + block_index as BlockNumber;
@@ -2516,9 +2507,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
for PlainStorageRevert { address, wiped, storage_revert } in storage_changes {
let mut storage = storage_revert
.into_iter()
.map(|(k, v)| {
(StorageSlotKey::from_u256(k).to_changeset_key(use_hashed_state), v)
})
.map(|(k, v)| (B256::from(k.to_be_bytes()), v))
.collect::<Vec<_>>();
// sort storage slots by key.
storage.par_sort_unstable_by_key(|a| a.0);
@@ -2527,29 +2516,13 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
// storage state has to be taken from the database and written to storage
// history. See [StorageWipe::Primary] for more details.
//
// When `use_hashed_state` is enabled, we read from `HashedStorages`
// instead of `PlainStorageState`. The hashed entries already have
// `keccak256(slot)` keys which is exactly the format needed for hashed
// changesets (static file changesets always use hashed keys when
// `use_hashed_state` is true).
//
// TODO(mediocregopher): This could be rewritten in a way which doesn't
// require collecting wiped entries into a Vec like this, see
// `write_storage_trie_changesets`.
let mut wiped_storage = Vec::new();
if wiped {
tracing::trace!(?address, "Wiping storage");
if use_hashed_state {
let hashed_address = keccak256(address);
if let Some((_, entry)) =
hashed_storages_cursor.seek_exact(hashed_address)?
{
wiped_storage.push((entry.key, entry.value));
while let Some(entry) = hashed_storages_cursor.next_dup_val()? {
wiped_storage.push((entry.key, entry.value))
}
}
} else if let Some((_, entry)) = storages_cursor.seek_exact(address)? {
if let Some((_, entry)) = storages_cursor.seek_exact(address)? {
wiped_storage.push((entry.key, entry.value));
while let Some(entry) = storages_cursor.next_dup_val()? {
wiped_storage.push((entry.key, entry.value))
@@ -2597,9 +2570,6 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
changes.storage.par_sort_by_key(|a| a.address);
changes.contracts.par_sort_by_key(|a| a.0);
// When use_hashed_state is enabled, skip plain state writes for accounts and storage.
// The hashed state is already written by the separate `write_hashed_state()` call.
// Bytecode writes remain unconditional since Bytecodes is not a plain/hashed table.
if !self.cached_storage_settings().use_hashed_state() {
// Write new account state
tracing::trace!(len = changes.accounts.len(), "Writing new account state");
@@ -2740,12 +2710,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
changeset_writer.prune_storage_changesets(block)?;
changesets
} else {
self.take::<tables::StorageChangeSets>(storage_range)?
.into_iter()
.map(|(k, v)| {
(k, ChangesetEntry { key: StorageSlotKey::plain(v.key), value: v.value })
})
.collect()
self.take::<tables::StorageChangeSets>(storage_range)?.into_iter().collect()
};
let account_changeset = if self.cached_storage_settings().storage_v2 {
let changesets = self.account_changesets_range(range)?;
@@ -2781,11 +2746,12 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
for (storage_key, (old_storage_value, _new_storage_value)) in storage {
let hashed_address = keccak256(address);
let hashed_storage_key = keccak256(storage_key);
let storage_entry =
StorageEntry { key: *storage_key, value: *old_storage_value };
StorageEntry { key: hashed_storage_key, value: *old_storage_value };
if hashed_storage_cursor
.seek_by_key_subkey(hashed_address, *storage_key)?
.filter(|s| s.key == *storage_key)
.seek_by_key_subkey(hashed_address, hashed_storage_key)?
.filter(|s| s.key == hashed_storage_key)
.is_some()
{
hashed_storage_cursor.delete_current()?
@@ -2898,12 +2864,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
changeset_writer.prune_storage_changesets(block)?;
changesets
} else {
self.take::<tables::StorageChangeSets>(storage_range)?
.into_iter()
.map(|(k, v)| {
(k, ChangesetEntry { key: StorageSlotKey::plain(v.key), value: v.value })
})
.collect()
self.take::<tables::StorageChangeSets>(storage_range)?.into_iter().collect()
};
// if there are static files for this segment, prune them.
@@ -2950,11 +2911,12 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
for (storage_key, (old_storage_value, _new_storage_value)) in storage {
let hashed_address = keccak256(address);
let hashed_storage_key = keccak256(storage_key);
let storage_entry =
StorageEntry { key: *storage_key, value: *old_storage_value };
StorageEntry { key: hashed_storage_key, value: *old_storage_value };
if hashed_storage_cursor
.seek_by_key_subkey(hashed_address, *storage_key)?
.filter(|s| s.key == *storage_key)
.seek_by_key_subkey(hashed_address, hashed_storage_key)?
.filter(|s| s.key == hashed_storage_key)
.is_some()
{
hashed_storage_cursor.delete_current()?
@@ -3212,13 +3174,13 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HashingWriter for DatabaseProvi
fn unwind_storage_hashing(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, ChangesetEntry)>,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<B256Map<BTreeSet<B256>>> {
// Aggregate all block changesets and make list of accounts that have been changed.
let mut hashed_storages = changesets
.into_iter()
.map(|(BlockNumberAddress((_, address)), storage_entry)| {
let hashed_key = storage_entry.key.to_hashed();
let hashed_key = keccak256(storage_entry.key);
(keccak256(address), hashed_key, storage_entry.value)
})
.collect::<Vec<_>>();
@@ -3361,13 +3323,11 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HistoryWriter for DatabaseProvi
fn unwind_storage_history_indices(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, ChangesetEntry)>,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<usize> {
let mut storage_changesets = changesets
.into_iter()
.map(|(BlockNumberAddress((bn, address)), storage)| {
(address, storage.key.as_b256(), bn)
})
.map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn))
.collect::<Vec<_>>();
storage_changesets.sort_by_key(|(address, key, _)| (*address, *key));
@@ -3699,7 +3659,6 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> BlockWriter
// This is necessary because with edge storage, changesets are written to static files
// whose index isn't updated until commit, making them invisible to subsequent reads
// within the same transaction.
let use_hashed = self.cached_storage_settings().use_hashed_state();
let (account_transitions, storage_transitions) = {
let mut account_transitions: BTreeMap<Address, Vec<u64>> = BTreeMap::new();
let mut storage_transitions: BTreeMap<(Address, B256), Vec<u64>> = BTreeMap::new();
@@ -3708,8 +3667,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> BlockWriter
for (address, account_revert) in block_reverts {
account_transitions.entry(*address).or_default().push(block_number);
for storage_key in account_revert.storage.keys() {
let key =
StorageSlotKey::from_u256(*storage_key).to_changeset_key(use_hashed);
let key = B256::from(storage_key.to_be_bytes());
storage_transitions.entry((*address, key)).or_default().push(block_number);
}
}
@@ -3944,6 +3902,12 @@ impl<TX: Send, N: NodeTypes> StorageSettingsCache for DatabaseProvider<TX, N> {
}
}
impl<TX: Send, N: NodeTypes> StoragePath for DatabaseProvider<TX, N> {
fn storage_path(&self) -> PathBuf {
self.db_path.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -4616,7 +4580,8 @@ mod tests {
let address = Address::random();
let hashed_address = keccak256(address);
let slot_key_already_hashed = B256::random();
let plain_slot = B256::random();
let hashed_slot = keccak256(plain_slot);
let current_value = U256::from(100);
let old_value = U256::from(42);
@@ -4626,32 +4591,26 @@ mod tests {
.tx
.cursor_dup_write::<tables::HashedStorages>()
.unwrap()
.upsert(
hashed_address,
&StorageEntry { key: slot_key_already_hashed, value: current_value },
)
.upsert(hashed_address, &StorageEntry { key: hashed_slot, value: current_value })
.unwrap();
let changesets = vec![(
BlockNumberAddress((1, address)),
ChangesetEntry {
key: StorageSlotKey::Hashed(slot_key_already_hashed),
value: old_value,
},
StorageEntry { key: plain_slot, value: old_value },
)];
let result = provider_rw.unwind_storage_hashing(changesets.into_iter()).unwrap();
assert_eq!(result.len(), 1);
assert!(result.contains_key(&hashed_address));
assert!(result[&hashed_address].contains(&slot_key_already_hashed));
assert!(result[&hashed_address].contains(&hashed_slot));
let mut cursor = provider_rw.tx.cursor_dup_read::<tables::HashedStorages>().unwrap();
let entry = cursor
.seek_by_key_subkey(hashed_address, slot_key_already_hashed)
.seek_by_key_subkey(hashed_address, hashed_slot)
.unwrap()
.expect("entry should exist");
assert_eq!(entry.key, slot_key_already_hashed);
assert_eq!(entry.key, hashed_slot);
assert_eq!(entry.value, old_value);
}
@@ -4849,7 +4808,7 @@ mod tests {
let changesets = vec![(
BlockNumberAddress((1, address)),
ChangesetEntry { key: StorageSlotKey::Plain(plain_slot), value: old_value },
StorageEntry { key: plain_slot, value: old_value },
)];
let result = provider_rw.unwind_storage_hashing(changesets.into_iter()).unwrap();
@@ -4964,7 +4923,7 @@ mod tests {
let sf = factory.static_file_provider();
let storage_cs = sf.storage_changeset(1).unwrap();
assert!(!storage_cs.is_empty());
assert_eq!(storage_cs[0].1.key.as_b256(), hashed_slot);
assert_eq!(storage_cs[0].1.key, slot_key);
let account_cs = sf.account_block_changeset(1).unwrap();
assert!(!account_cs.is_empty());
@@ -5170,8 +5129,8 @@ mod tests {
for (_, entry) in &storage_cs {
assert!(
entry.key.is_hashed(),
"v2: static file storage changeset should have hashed slot keys"
entry.key != keccak256(entry.key),
"v2: static file storage changeset should have plain slot keys"
);
}
}
@@ -5192,10 +5151,7 @@ mod tests {
for s in 1..=slots_per_account as u64 {
let slot = U256::from(s + acct_idx as u64 * 100);
let slot_key = B256::from(slot);
let hashed_slot = keccak256(slot_key);
let shards =
rocksdb.storage_history_shards(address, hashed_slot).unwrap();
let shards = rocksdb.storage_history_shards(address, slot_key).unwrap();
assert!(
!shards.is_empty(),
"v2: RocksDB StoragesHistory missing for block {block_num} acct {acct_idx} slot {s}"
@@ -5377,11 +5333,7 @@ mod tests {
let sf = factory.static_file_provider();
let storage_cs = sf.storage_changeset(1).unwrap();
assert!(!storage_cs.is_empty(), "v2: storage changesets should be in static files");
assert_eq!(
storage_cs[0].1.key.as_b256(),
hashed_slot,
"v2: changeset key should be hashed"
);
assert_eq!(storage_cs[0].1.key, slot_key, "v2: changeset key should be plain");
provider_rw.remove_state_above(0).unwrap();
@@ -5414,78 +5366,6 @@ mod tests {
assert_eq!(mdbx_account_cs, 0, "v2: MDBX AccountChangeSets should remain empty");
}
#[test]
fn test_populate_bundle_state_hashed_with_hashed_keys() {
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let address = Address::with_last_byte(1);
let hashed_address = keccak256(address);
let slot_key = B256::from(U256::from(42));
let hashed_slot = keccak256(slot_key);
let current_value = U256::from(100);
let old_value = U256::from(50);
let provider_rw = factory.provider_rw().unwrap();
provider_rw
.tx
.cursor_write::<tables::HashedAccounts>()
.unwrap()
.upsert(hashed_address, &Account { nonce: 1, balance: U256::ZERO, bytecode_hash: None })
.unwrap();
provider_rw
.tx
.cursor_dup_write::<tables::HashedStorages>()
.unwrap()
.upsert(hashed_address, &StorageEntry { key: hashed_slot, value: current_value })
.unwrap();
let storage_changeset = vec![(
BlockNumberAddress((1, address)),
ChangesetEntry { key: StorageSlotKey::Hashed(hashed_slot), value: old_value },
)];
let account_changeset = vec![(
1u64,
AccountBeforeTx {
address,
info: Some(Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None }),
},
)];
let mut hashed_accounts_cursor =
provider_rw.tx.cursor_read::<tables::HashedAccounts>().unwrap();
let mut hashed_storage_cursor =
provider_rw.tx.cursor_dup_read::<tables::HashedStorages>().unwrap();
let (state, reverts) = provider_rw
.populate_bundle_state_hashed(
account_changeset,
storage_changeset,
&mut hashed_accounts_cursor,
&mut hashed_storage_cursor,
)
.unwrap();
let (_, new_account, storage_map) =
state.get(&address).expect("address should be in state");
assert!(new_account.is_some());
assert_eq!(new_account.unwrap().nonce, 1);
let (old_val, new_val) =
storage_map.get(&hashed_slot).expect("hashed slot should be in storage map");
assert_eq!(*old_val, old_value);
assert_eq!(*new_val, current_value);
let block_reverts = reverts.get(&1).expect("block 1 should have reverts");
let (_, storage_reverts) =
block_reverts.get(&address).expect("address should have reverts");
assert_eq!(storage_reverts.len(), 1);
assert_eq!(storage_reverts[0].key, hashed_slot);
assert_eq!(storage_reverts[0].value, old_value);
}
#[test]
#[cfg(all(unix, feature = "rocksdb"))]
fn test_unwind_storage_history_indices_v2() {
@@ -5494,15 +5374,14 @@ mod tests {
let address = Address::with_last_byte(1);
let slot_key = B256::from(U256::from(42));
let hashed_slot = keccak256(slot_key);
{
let rocksdb = factory.rocksdb_provider();
let mut batch = rocksdb.batch();
batch.append_storage_history_shard(address, hashed_slot, vec![3u64, 7, 10]).unwrap();
batch.append_storage_history_shard(address, slot_key, vec![3u64, 7, 10]).unwrap();
batch.commit().unwrap();
let shards = rocksdb.storage_history_shards(address, hashed_slot).unwrap();
let shards = rocksdb.storage_history_shards(address, slot_key).unwrap();
assert!(!shards.is_empty(), "history should be written to rocksdb");
}
@@ -5511,11 +5390,11 @@ mod tests {
let changesets = vec![
(
BlockNumberAddress((7, address)),
ChangesetEntry { key: StorageSlotKey::Hashed(hashed_slot), value: U256::from(5) },
StorageEntry { key: slot_key, value: U256::from(5) },
),
(
BlockNumberAddress((10, address)),
ChangesetEntry { key: StorageSlotKey::Hashed(hashed_slot), value: U256::from(8) },
StorageEntry { key: slot_key, value: U256::from(8) },
),
];
@@ -5525,7 +5404,7 @@ mod tests {
provider_rw.commit().unwrap();
let rocksdb = factory.rocksdb_provider();
let shards = rocksdb.storage_history_shards(address, hashed_slot).unwrap();
let shards = rocksdb.storage_history_shards(address, slot_key).unwrap();
assert!(
!shards.is_empty(),

View File

@@ -317,10 +317,7 @@ impl RocksDBProvider {
let unique_keys: HashSet<_> = changesets
.into_iter()
.map(|(block_addr, entry)| {
// entry.key is a hashed storage key
(block_addr.address(), entry.key.as_b256(), checkpoint + 1)
})
.map(|(block_addr, entry)| (block_addr.address(), entry.key, checkpoint + 1))
.collect();
let indices: Vec<_> = unique_keys.into_iter().collect();

View File

@@ -2,7 +2,6 @@ use super::metrics::{RocksDBMetrics, RocksDBOperation, ROCKSDB_TABLES};
use crate::providers::{compute_history_rank, needs_prev_shard_check, HistoryInfo};
use alloy_consensus::transaction::TxHashRef;
use alloy_primitives::{
keccak256,
map::{AddressMap, HashMap},
Address, BlockNumber, TxNumber, B256,
};
@@ -1345,9 +1344,8 @@ impl RocksDBProvider {
for revert in storage_block_reverts {
for (slot, _) in revert.storage_revert {
let plain_key = B256::new(slot.to_be_bytes());
let key = keccak256(plain_key);
storage_history
.entry((revert.address, key))
.entry((revert.address, plain_key))
.or_default()
.push(block_number);
}

View File

@@ -11,7 +11,7 @@ use reth_db_api::{
transaction::DbTx,
BlockNumberList,
};
use reth_primitives_traits::{Account, Bytecode, StorageSlotKey};
use reth_primitives_traits::{Account, Bytecode};
use reth_storage_api::{
BlockNumReader, BytecodeReader, DBProvider, NodePrimitivesProvider, StateProofProvider,
StorageChangeSetReader, StorageRootProvider, StorageSettingsCache,
@@ -169,10 +169,12 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
}
/// Lookup a storage key in the `StoragesHistory` table using `EitherReader`.
///
/// `lookup_key` is always a plain (unhashed) storage key.
pub fn storage_history_lookup(
&self,
address: Address,
storage_key: StorageSlotKey,
lookup_key: B256,
) -> ProviderResult<HistoryInfo>
where
Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider,
@@ -181,16 +183,6 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
return Err(ProviderError::StateAtBlockPruned(self.block_number))
}
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state() {
storage_key.to_hashed()
} else {
debug_assert!(
storage_key.is_plain(),
"expected plain storage key when use_hashed_state is false"
);
storage_key.as_b256()
};
self.provider.with_rocksdb_tx(|rocks_tx_ref| {
let mut reader = EitherReader::new_storages_history(self.provider, rocks_tx_ref)?;
reader.storage_history_info(
@@ -205,27 +197,16 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
/// Resolves a storage value by looking up the given key in history, changesets, or
/// plain state.
///
/// Accepts a [`StorageSlotKey`]; the correct lookup key is derived internally
/// based on the storage mode.
/// `lookup_key` is always a plain (unhashed) storage key.
fn storage_by_lookup_key(
&self,
address: Address,
storage_key: StorageSlotKey,
lookup_key: B256,
) -> ProviderResult<Option<StorageValue>>
where
Provider: StorageSettingsCache + RocksDBProviderFactory + NodePrimitivesProvider,
{
let lookup_key = if self.provider.cached_storage_settings().use_hashed_state() {
storage_key.to_hashed()
} else {
debug_assert!(
storage_key.is_plain(),
"expected plain storage key when use_hashed_state is false"
);
storage_key.as_b256()
};
match self.storage_history_lookup(address, storage_key)? {
match self.storage_history_lookup(address, lookup_key)? {
HistoryInfo::NotYetWritten => Ok(None),
HistoryInfo::InChangeset(changeset_block_number) => self
.provider
@@ -240,11 +221,12 @@ impl<'b, Provider: DBProvider + ChangeSetReader + StorageChangeSetReader + Block
HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => {
if self.provider.cached_storage_settings().use_hashed_state() {
let hashed_address = alloy_primitives::keccak256(address);
let hashed_slot = alloy_primitives::keccak256(lookup_key);
Ok(self
.tx()
.cursor_dup_read::<tables::HashedStorages>()?
.seek_by_key_subkey(hashed_address, lookup_key)?
.filter(|entry| entry.key == lookup_key)
.seek_by_key_subkey(hashed_address, hashed_slot)?
.filter(|entry| entry.key == hashed_slot)
.map(|entry| entry.value)
.or(Some(StorageValue::ZERO)))
} else {
@@ -572,18 +554,7 @@ impl<
address: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
self.storage_by_lookup_key(address, StorageSlotKey::plain(storage_key))
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if !self.provider.cached_storage_settings().use_hashed_state() {
return Err(ProviderError::UnsupportedProvider)
}
self.storage_by_lookup_key(address, StorageSlotKey::hashed(hashed_storage_key))
self.storage_by_lookup_key(address, storage_key)
}
}
@@ -776,7 +747,7 @@ mod tests {
transaction::{DbTx, DbTxMut},
BlockNumberList,
};
use reth_primitives_traits::{Account, StorageEntry, StorageSlotKey};
use reth_primitives_traits::{Account, StorageEntry};
use reth_storage_api::{
BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory,
NodePrimitivesProvider, StorageChangeSetReader, StorageSettingsCache,
@@ -1031,7 +1002,7 @@ mod tests {
Err(ProviderError::StateAtBlockPruned(number)) if number == provider.block_number
));
assert!(matches!(
provider.storage_history_lookup(ADDRESS, StorageSlotKey::plain(STORAGE)),
provider.storage_history_lookup(ADDRESS, STORAGE),
Err(ProviderError::StateAtBlockPruned(number)) if number == provider.block_number
));
@@ -1050,7 +1021,7 @@ mod tests {
Ok(HistoryInfo::MaybeInPlainState)
));
assert!(matches!(
provider.storage_history_lookup(ADDRESS, StorageSlotKey::plain(STORAGE)),
provider.storage_history_lookup(ADDRESS, STORAGE),
Ok(HistoryInfo::MaybeInPlainState)
));
@@ -1069,7 +1040,7 @@ mod tests {
Ok(HistoryInfo::MaybeInPlainState)
));
assert!(matches!(
provider.storage_history_lookup(ADDRESS, StorageSlotKey::plain(STORAGE)),
provider.storage_history_lookup(ADDRESS, STORAGE),
Ok(HistoryInfo::MaybeInPlainState)
));
}
@@ -1333,105 +1304,4 @@ mod tests {
assert!(!needs_prev_shard_check(0, Some(5), 5)); // found_block == block_number
assert!(!needs_prev_shard_check(1, Some(10), 5)); // rank > 0
}
#[test]
fn test_historical_storage_by_hashed_key_unsupported_in_v1() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let db = factory.provider().unwrap();
let provider = HistoricalStateProviderRef::new(&db, 1);
assert!(matches!(
provider.storage_by_hashed_key(ADDRESS, STORAGE),
Err(ProviderError::UnsupportedProvider)
));
}
#[test]
#[cfg(all(unix, feature = "rocksdb"))]
fn test_historical_storage_by_hashed_key_v2() {
use crate::BlockWriter;
use alloy_primitives::keccak256;
use reth_db_api::models::StorageSettings;
use reth_execution_types::ExecutionOutcome;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use revm_database::BundleState;
use std::collections::HashMap;
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let slot = U256::from_be_bytes(*STORAGE);
let hashed_storage = keccak256(STORAGE);
let account: revm_state::AccountInfo =
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None }.into();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=5,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
let mut addr_storage = HashMap::default();
addr_storage.insert(slot, (U256::ZERO, U256::from(100)));
type Revert = Vec<(Address, Option<Option<revm_state::AccountInfo>>, Vec<(U256, U256)>)>;
let mut reverts: Vec<Revert> = vec![Vec::new(); 6];
reverts[3] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::ZERO)])];
reverts[5] = vec![(ADDRESS, Some(Some(account.clone())), vec![(slot, U256::from(50))])];
let bundle = BundleState::new([(ADDRESS, None, Some(account), addr_storage)], reverts, []);
let provider_rw = factory.provider_rw().unwrap();
provider_rw
.append_blocks_with_state(
blocks
.into_iter()
.map(|b| b.try_recover().expect("failed to seal block with senders"))
.collect(),
&ExecutionOutcome { bundle, first_block: 0, ..Default::default() },
Default::default(),
)
.unwrap();
let hashed_address = keccak256(ADDRESS);
provider_rw
.tx_ref()
.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_storage, value: U256::from(100) },
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::HashedAccounts>(
hashed_address,
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None },
)
.unwrap();
provider_rw.commit().unwrap();
let db = factory.provider().unwrap();
assert!(matches!(
HistoricalStateProviderRef::new(&db, 0).storage_by_hashed_key(ADDRESS, hashed_storage),
Ok(None)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 3).storage_by_hashed_key(ADDRESS, hashed_storage),
Ok(Some(U256::ZERO))
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 4).storage_by_hashed_key(ADDRESS, hashed_storage),
Ok(Some(v)) if v == U256::from(50)
));
assert!(matches!(
HistoricalStateProviderRef::new(&db, 4).storage_by_hashed_key(ADDRESS, STORAGE),
Ok(None | Some(U256::ZERO))
));
}
}

View File

@@ -263,18 +263,6 @@ impl<Provider: DBProvider + BlockHashReader + StorageSettingsCache> StateProvide
Ok(None)
}
}
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
if self.0.cached_storage_settings().use_hashed_state() {
self.hashed_storage_lookup(alloy_primitives::keccak256(address), hashed_storage_key)
} else {
Err(ProviderError::UnsupportedProvider)
}
}
}
impl<Provider: DBProvider + BlockHashReader> BytecodeReader
@@ -318,7 +306,6 @@ mod tests {
};
use reth_primitives_traits::StorageEntry;
use reth_storage_api::StorageSettingsCache;
use reth_storage_errors::provider::ProviderError;
const fn assert_state_provider<T: StateProvider>() {}
#[expect(dead_code)]
@@ -434,51 +421,4 @@ mod tests {
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(provider_ref.storage(address, slot).unwrap(), None);
}
#[test]
fn test_latest_storage_by_hashed_key_v2() {
let factory = create_test_provider_factory();
factory.set_storage_settings_cache(StorageSettings::v2());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
let hashed_address = keccak256(address);
let hashed_slot = keccak256(slot);
let tx = factory.provider_rw().unwrap().into_tx();
tx.put::<tables::HashedStorages>(
hashed_address,
StorageEntry { key: hashed_slot, value: U256::from(42) },
)
.unwrap();
tx.commit().unwrap();
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert_eq!(
provider_ref.storage_by_hashed_key(address, hashed_slot).unwrap(),
Some(U256::from(42))
);
assert_eq!(provider_ref.storage_by_hashed_key(address, slot).unwrap(), None);
}
#[test]
fn test_latest_storage_by_hashed_key_unsupported_in_v1() {
let factory = create_test_provider_factory();
assert!(!factory.provider().unwrap().cached_storage_settings().use_hashed_state());
let address = address!("0x0000000000000000000000000000000000000001");
let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
let db = factory.provider().unwrap();
let provider_ref = LatestStateProviderRef::new(&db);
assert!(matches!(
provider_ref.storage_by_hashed_key(address, slot),
Err(ProviderError::UnsupportedProvider)
));
}
}

View File

@@ -34,7 +34,7 @@ use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION};
use reth_node_types::NodePrimitives;
use reth_primitives_traits::{
dashmap::DashMap, AlloyBlockHeader as _, BlockBody as _, RecoveredBlock, SealedHeader,
SignedTransaction, StorageSlotKey,
SignedTransaction, StorageEntry,
};
use reth_prune_types::PruneSegment;
use reth_stages_types::PipelineTarget;
@@ -43,7 +43,7 @@ use reth_static_file_types::{
SegmentRangeInclusive, StaticFileMap, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE,
};
use reth_storage_api::{
BlockBodyIndicesProvider, ChangeSetReader, ChangesetEntry, DBProvider, PruneCheckpointReader,
BlockBodyIndicesProvider, ChangeSetReader, DBProvider, PruneCheckpointReader,
StorageChangeSetReader, StorageSettingsCache,
};
use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError};
@@ -643,7 +643,7 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
revert.storage_revert.into_iter().map(move |(key, revert_to_slot)| {
StorageBeforeTx {
address: revert.address,
key: StorageSlotKey::from_u256(key).to_hashed(),
key: B256::from(key.to_be_bytes()),
value: revert_to_slot.to_previous_value(),
}
})
@@ -2520,7 +2520,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
let provider = match self.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
block_number,
@@ -2538,10 +2538,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
for i in offset.changeset_range() {
if let Some(change) = cursor.get_one::<StorageChangesetMask>(i.into())? {
let block_address = BlockNumberAddress((block_number, change.address));
let entry = ChangesetEntry {
key: StorageSlotKey::hashed(change.key),
value: change.value,
};
let entry = StorageEntry { key: change.key, value: change.value };
changeset.push((block_address, entry));
}
}
@@ -2556,7 +2553,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
) -> ProviderResult<Option<StorageEntry>> {
let provider = match self.get_segment_provider_for_block(
StaticFileSegment::StorageChangeSets,
block_number,
@@ -2605,10 +2602,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
.get_one::<StorageChangesetMask>(low.into())?
.filter(|change| change.address == address && change.key == storage_key)
{
return Ok(Some(ChangesetEntry {
key: StorageSlotKey::hashed(change.key),
value: change.value,
}));
return Ok(Some(StorageEntry { key: change.key, value: change.value }));
}
Ok(None)
@@ -2617,7 +2611,7 @@ impl<N: NodePrimitives> StorageChangeSetReader for StaticFileProvider<N> {
fn storage_changesets_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(BlockNumberAddress, StorageEntry)>> {
let range = self.bound_range(range, StaticFileSegment::StorageChangeSets);
self.walk_storage_changeset_range(range).collect()
}

View File

@@ -1170,13 +1170,13 @@ mod tests {
let result = sf_rw.get_storage_before_block(0, test_address, test_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key.as_b256(), test_key);
assert_eq!(entry.key, test_key);
assert_eq!(entry.value, U256::ZERO);
let result = sf_rw.get_storage_before_block(2, test_address, test_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key.as_b256(), test_key);
assert_eq!(entry.key, test_key);
assert_eq!(entry.value, U256::from(9));
let result = sf_rw.get_storage_before_block(1, test_address, test_key).unwrap();
@@ -1188,7 +1188,7 @@ mod tests {
let result = sf_rw.get_storage_before_block(1, other_address, other_key).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key.as_b256(), other_key);
assert_eq!(entry.key, other_key);
}
}
@@ -1334,20 +1334,20 @@ mod tests {
let result = sf_rw.get_storage_before_block(block_num, address, keys[0]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key.as_b256(), keys[0]);
assert_eq!(entry.key, keys[0]);
assert_eq!(entry.value, U256::from(0));
let result =
sf_rw.get_storage_before_block(block_num, address, keys[num_slots - 1]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key.as_b256(), keys[num_slots - 1]);
assert_eq!(entry.key, keys[num_slots - 1]);
let mid = num_slots / 2;
let result = sf_rw.get_storage_before_block(block_num, address, keys[mid]).unwrap();
assert!(result.is_some());
let entry = result.unwrap();
assert_eq!(entry.key.as_b256(), keys[mid]);
assert_eq!(entry.key, keys[mid]);
let missing_key = B256::with_last_byte(255);
let result = sf_rw.get_storage_before_block(block_num, address, missing_key).unwrap();
@@ -1356,7 +1356,7 @@ mod tests {
for i in (0..num_slots).step_by(10) {
let result = sf_rw.get_storage_before_block(block_num, address, keys[i]).unwrap();
assert!(result.is_some());
assert_eq!(result.unwrap().key.as_b256(), keys[i]);
assert_eq!(result.unwrap().key, keys[i]);
}
}
}

View File

@@ -28,12 +28,12 @@ use reth_ethereum_primitives::EthPrimitives;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{
Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader,
SignerRecoverable,
SignerRecoverable, StorageEntry,
};
use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_storage_api::{
BlockBodyIndicesProvider, BytecodeReader, ChangesetEntry, DBProvider, DatabaseProviderFactory,
BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory,
HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider,
StorageChangeSetReader, StorageRootProvider, StorageSettingsCache,
};
@@ -883,14 +883,6 @@ where
let lock = self.accounts.lock();
Ok(lock.get(&account).and_then(|account| account.storage.get(&storage_key)).copied())
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
}
impl<T, ChainSpec> BytecodeReader for MockEthProvider<T, ChainSpec>
@@ -1029,7 +1021,7 @@ impl<T: NodePrimitives, ChainSpec: Send + Sync> StorageChangeSetReader
fn storage_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
Ok(Vec::default())
}
@@ -1038,14 +1030,14 @@ impl<T: NodePrimitives, ChainSpec: Send + Sync> StorageChangeSetReader
_block_number: BlockNumber,
_address: Address,
_storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>> {
) -> ProviderResult<Option<StorageEntry>> {
Ok(None)
}
fn storage_changesets_range(
&self,
_range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>> {
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>> {
Ok(Vec::default())
}

View File

@@ -1091,14 +1091,6 @@ where
})
}
fn storage_by_hashed_key(
&self,
_address: Address,
_hashed_storage_key: StorageKey,
) -> Result<Option<U256>, ProviderError> {
Err(ProviderError::UnsupportedProvider)
}
fn account_code(&self, addr: &Address) -> Result<Option<Bytecode>, ProviderError> {
self.block_on_async(async {
let code = self

View File

@@ -1,4 +1,3 @@
use crate::ChangesetEntry;
use alloc::collections::{BTreeMap, BTreeSet};
use alloy_primitives::{map::B256Map, Address, BlockNumber, B256};
use auto_impl::auto_impl;
@@ -48,7 +47,7 @@ pub trait HashingWriter: Send {
/// Mapping of hashed keys of updated accounts to their respective updated hashed slots.
fn unwind_storage_hashing(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, ChangesetEntry)>,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<B256Map<BTreeSet<B256>>>;
/// Unwind and clear storage hashing in a given block range.

View File

@@ -1,9 +1,9 @@
use crate::ChangesetEntry;
use alloy_primitives::{Address, BlockNumber, B256};
use auto_impl::auto_impl;
use core::ops::{RangeBounds, RangeInclusive};
use reth_db_api::models::BlockNumberAddress;
use reth_db_models::AccountBeforeTx;
use reth_primitives_traits::StorageEntry;
use reth_storage_errors::provider::ProviderResult;
/// History Writer
@@ -36,7 +36,7 @@ pub trait HistoryWriter: Send {
/// Returns number of changesets walked.
fn unwind_storage_history_indices(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, ChangesetEntry)>,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<usize>;
/// Unwind and clear storage history indices in a given block range.

View File

@@ -98,6 +98,8 @@ pub use header_sync_gap::HeaderSyncGapProvider;
#[cfg(feature = "db-api")]
pub mod metadata;
#[cfg(all(feature = "db-api", feature = "std"))]
pub use metadata::StoragePath;
#[cfg(feature = "db-api")]
pub use metadata::{MetadataProvider, MetadataWriter, StorageSettingsCache};
#[cfg(feature = "db-api")]

View File

@@ -41,7 +41,6 @@ macro_rules! delegate_provider_impls {
}
StateProvider $(where [$($generics)*])? {
fn storage(&self, account: alloy_primitives::Address, storage_key: alloy_primitives::StorageKey) -> reth_storage_api::errors::provider::ProviderResult<Option<alloy_primitives::StorageValue>>;
fn storage_by_hashed_key(&self, address: alloy_primitives::Address, hashed_storage_key: alloy_primitives::StorageKey) -> reth_storage_api::errors::provider::ProviderResult<Option<alloy_primitives::StorageValue>>;
}
BytecodeReader $(where [$($generics)*])? {
fn bytecode_by_hash(&self, code_hash: &alloy_primitives::B256) -> reth_storage_api::errors::provider::ProviderResult<Option<reth_primitives_traits::Bytecode>>;

View File

@@ -56,3 +56,10 @@ pub trait StorageSettingsCache: Send {
/// [`MetadataWriter::write_storage_settings`]
fn set_storage_settings_cache(&self, settings: StorageSettings);
}
/// Trait for accessing the database directory path.
#[cfg(feature = "std")]
pub trait StoragePath: Send {
/// Returns the path to the database directory (e.g. `<datadir>/db`).
fn storage_path(&self) -> std::path::PathBuf;
}

View File

@@ -413,7 +413,9 @@ impl<C: Send + Sync, N: NodePrimitives> StorageChangeSetReader for NoopProvider<
fn storage_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, crate::ChangesetEntry)>> {
) -> ProviderResult<
Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>,
> {
Ok(Vec::default())
}
@@ -422,14 +424,16 @@ impl<C: Send + Sync, N: NodePrimitives> StorageChangeSetReader for NoopProvider<
_block_number: BlockNumber,
_address: Address,
_storage_key: B256,
) -> ProviderResult<Option<crate::ChangesetEntry>> {
) -> ProviderResult<Option<reth_primitives_traits::StorageEntry>> {
Ok(None)
}
fn storage_changesets_range(
&self,
_range: impl core::ops::RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, crate::ChangesetEntry)>> {
) -> ProviderResult<
Vec<(reth_db_api::models::BlockNumberAddress, reth_primitives_traits::StorageEntry)>,
> {
Ok(Vec::default())
}
@@ -538,14 +542,6 @@ impl<C: Send + Sync, N: NodePrimitives> StateProvider for NoopProvider<C, N> {
) -> ProviderResult<Option<StorageValue>> {
Ok(None)
}
fn storage_by_hashed_key(
&self,
_account: Address,
_hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> {
Err(ProviderError::UnsupportedProvider)
}
}
impl<C: Send + Sync, N: NodePrimitives> BytecodeReader for NoopProvider<C, N> {

View File

@@ -41,27 +41,12 @@ pub trait StateProvider:
+ HashedPostStateProvider
{
/// Get storage of given account.
///
/// When `use_hashed_state` is enabled, the `account` and `storage_key` are hashed internally
/// before lookup. Callers must pass **unhashed** (plain) values.
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>>;
/// Get storage using a pre-hashed storage key.
///
/// Unlike [`Self::storage`], `hashed_storage_key` must already be keccak256-hashed.
/// The `address` remains unhashed (plain) since history indices are keyed by plain address.
/// This is used when changeset keys are pre-hashed (e.g., `use_hashed_state` mode)
/// to avoid double-hashing.
fn storage_by_hashed_key(
&self,
address: Address,
hashed_storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>>;
/// Get account code by its address.
///
/// Returns `None` if the account doesn't exist or account is not a contract

View File

@@ -2,38 +2,11 @@ use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use alloy_primitives::{Address, BlockNumber, B256, U256};
use alloy_primitives::{Address, BlockNumber, B256};
use core::ops::RangeInclusive;
use reth_primitives_traits::{StorageEntry, StorageSlotKey};
use reth_primitives_traits::StorageEntry;
use reth_storage_errors::provider::ProviderResult;
/// A storage changeset entry whose key is tagged as [`StorageSlotKey::Plain`] or
/// [`StorageSlotKey::Hashed`] by the reader that produced it.
///
/// Unlike [`StorageEntry`] (the raw DB row type with an untagged `B256` key),
/// this type carries provenance so downstream code can call
/// [`StorageSlotKey::to_hashed`] without consulting `StorageSettings`.
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct ChangesetEntry {
/// Storage slot key, tagged with its hashing status.
pub key: StorageSlotKey,
/// Value at this storage slot before the change.
pub value: U256,
}
impl ChangesetEntry {
/// Convert to a raw [`StorageEntry`] (drops the tag).
pub const fn into_storage_entry(self) -> StorageEntry {
StorageEntry { key: self.key.as_b256(), value: self.value }
}
}
impl From<ChangesetEntry> for StorageEntry {
fn from(e: ChangesetEntry) -> Self {
e.into_storage_entry()
}
}
/// Storage reader
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait StorageReader: Send {
@@ -64,35 +37,26 @@ pub trait StorageReader: Send {
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait StorageChangeSetReader: Send {
/// Iterate over storage changesets and return the storage state from before this block.
///
/// Returned entries have their keys tagged as [`StorageSlotKey::Plain`] or
/// [`StorageSlotKey::Hashed`] based on the current storage mode.
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>>;
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
/// Search the block's changesets for the given address and storage key, and return the result.
///
/// The `storage_key` must match the key format used by the storage mode
/// (plain in v1, keccak256-hashed in v2).
///
/// Returns `None` if the storage slot was not changed in this block.
fn get_storage_before_block(
&self,
block_number: BlockNumber,
address: Address,
storage_key: B256,
) -> ProviderResult<Option<ChangesetEntry>>;
) -> ProviderResult<Option<StorageEntry>>;
/// Get all storage changesets in a range of blocks.
///
/// Returned entries have their keys tagged as [`StorageSlotKey::Plain`] or
/// [`StorageSlotKey::Hashed`] based on the current storage mode.
fn storage_changesets_range(
&self,
range: impl core::ops::RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, ChangesetEntry)>>;
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
/// Get the total count of all storage changes.
fn storage_changeset_count(&self) -> ProviderResult<usize>;
@@ -109,7 +73,7 @@ pub trait StorageChangeSetReader: Send {
.into_iter()
.map(|(block_address, entry)| reth_db_models::StorageBeforeTx {
address: block_address.address(),
key: entry.key.as_b256(),
key: entry.key,
value: entry.value,
})
.collect()

View File

@@ -19,11 +19,6 @@ use reth_trie::{
/// Load prefix sets using a provider that implements [`ChangeSetReader`]. This function can read
/// changesets from both static files and database.
///
/// Storage keys from changesets are tagged as
/// [`Plain`](reth_primitives_traits::StorageSlotKey::Plain)
/// or [`Hashed`](reth_primitives_traits::StorageSlotKey::Hashed) by the reader, so callers need
/// not pass a `use_hashed_state` flag. Addresses are always hashed.
pub fn load_prefix_sets_with_provider<Provider>(
provider: &Provider,
range: RangeInclusive<BlockNumber>,
@@ -61,7 +56,7 @@ where
storage_prefix_sets
.entry(hashed_address)
.or_default()
.insert(Nibbles::unpack(storage_entry.key.to_hashed()));
.insert(Nibbles::unpack(keccak256(storage_entry.key)));
}
Ok(TriePrefixSets {

View File

@@ -145,11 +145,6 @@ pub trait DatabaseStateRoot<'a, TX>: Sized {
pub trait DatabaseHashedPostState: Sized {
/// Initializes [`HashedPostStateSorted`] from reverts. Iterates over state reverts in the
/// specified range and aggregates them into sorted hashed state.
///
/// Storage keys from changesets are tagged as
/// [`Plain`](reth_primitives_traits::StorageSlotKey::Plain) or
/// [`Hashed`](reth_primitives_traits::StorageSlotKey::Hashed) by the reader, so no
/// `use_hashed_state` flag is needed. Addresses are always hashed.
fn from_reverts(
provider: &(impl ChangeSetReader + StorageChangeSetReader + BlockNumReader + DBProvider),
range: impl RangeBounds<BlockNumber>,
@@ -269,9 +264,6 @@ impl<'a, TX: DbTx, A: crate::TrieTableAdapter> DatabaseStateRoot<'a, TX>
}
/// Calls [`HashedPostStateSorted::from_reverts`].
///
/// This is a convenience wrapper kept for backward compatibility. The storage
/// key tagging is now handled internally by the changeset reader.
pub fn from_reverts_auto(
provider: &(impl ChangeSetReader
+ StorageChangeSetReader
@@ -291,8 +283,7 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
///
/// - Reads the first occurrence of each changed account/storage slot in the range.
/// - Addresses are always keccak256-hashed.
/// - Storage keys are tagged by the changeset reader and hashed via
/// [`StorageSlotKey::to_hashed`](reth_primitives_traits::StorageSlotKey::to_hashed).
/// - Storage keys are always plain and are hashed via `keccak256`.
/// - Returns keys already ordered for trie iteration.
#[instrument(target = "trie::db", skip(provider), fields(range))]
fn from_reverts(
@@ -333,12 +324,12 @@ impl DatabaseHashedPostState for HashedPostStateSorted {
for (BlockNumberAddress((_, address)), storage) in
provider.storage_changesets_range(start..=end_inclusive)?
{
if seen_storage_keys.insert((address, storage.key.as_b256())) {
if seen_storage_keys.insert((address, storage.key)) {
let hashed_address = keccak256(address);
storages
.entry(hashed_address)
.or_default()
.push((storage.key.to_hashed(), storage.value));
.push((keccak256(storage.key), storage.value));
}
}
}
@@ -598,7 +589,7 @@ mod tests {
.append_storage_changeset(
vec![StorageBeforeTx {
address: address1,
key: hashed_slot2,
key: plain_slot2,
value: U256::from(200),
}],
1,
@@ -608,7 +599,7 @@ mod tests {
.append_storage_changeset(
vec![StorageBeforeTx {
address: address1,
key: hashed_slot1,
key: plain_slot1,
value: U256::from(100),
}],
2,
@@ -618,7 +609,7 @@ mod tests {
.append_storage_changeset(
vec![StorageBeforeTx {
address: address1,
key: hashed_slot1,
key: plain_slot1,
value: U256::from(999),
}],
3,

View File

@@ -47,7 +47,7 @@ where
provider.storage_changesets_range(from..=tip)?
{
if storage_address == address {
let hashed_slot = storage_change.key.to_hashed();
let hashed_slot = keccak256(storage_change.key);
if let hash_map::Entry::Vacant(entry) = storage.storage.entry(hashed_slot) {
entry.insert(storage_change.value);
}
@@ -213,9 +213,9 @@ mod tests {
&factory,
vec![
(0, vec![]),
(1, vec![StorageBeforeTx { address, key: hashed_slot1, value: U256::from(10) }]),
(2, vec![StorageBeforeTx { address, key: hashed_slot2, value: U256::from(20) }]),
(3, vec![StorageBeforeTx { address, key: hashed_slot1, value: U256::from(999) }]),
(1, vec![StorageBeforeTx { address, key: plain_slot1, value: U256::from(10) }]),
(2, vec![StorageBeforeTx { address, key: plain_slot2, value: U256::from(20) }]),
(3, vec![StorageBeforeTx { address, key: plain_slot1, value: U256::from(999) }]),
],
);