{
+ let initial_balance = U256::from(ETH_TO_WEI) * U256::from(1000);
+
+ Arc::new(
+ ChainSpecBuilder::default()
+ .chain(MAINNET.chain)
+ .genesis(Genesis {
+ alloc: [
+ (
+ signer_address,
+ GenesisAccount { balance: initial_balance, ..Default::default() },
+ ),
+ (
+ selfdestruct_contract,
+ GenesisAccount {
+ code: Some(WRITE_OR_SELFDESTRUCT_RUNTIME_CODE),
+ ..Default::default()
+ },
+ ),
+ ]
+ .into(),
+ ..MAINNET.genesis.clone()
+ })
+ .shanghai_activated()
+ .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(30))
+ .build(),
+ )
+}
+
+fn build_execution_header(parent_hash: B256, number: u64, timestamp: u64) -> Header {
+ Header {
+ parent_hash,
+ number,
+ gas_limit: 30_000_000,
+ base_fee_per_gas: Some(INITIAL_BASE_FEE),
+ timestamp,
+ parent_beacon_block_root: (timestamp >= 30).then_some(B256::ZERO),
+ blob_gas_used: (timestamp >= 30).then_some(0),
+ excess_blob_gas: (timestamp >= 30).then_some(0),
+ ..Default::default()
+ }
+}
+
+const fn expected_destroyed_slots() -> [B256; 2] {
+ [B256::with_last_byte(0x01), B256::with_last_byte(0x02)]
+}
+
+fn assert_destroyed_changeset_entries(
+ provider: &P,
+ selfdestruct_contract: Address,
+) -> eyre::Result<()>
+where
+ P: StorageChangeSetReader,
+{
+ let expected = [
+ (B256::with_last_byte(0x01), U256::from(0x2a)),
+ (B256::with_last_byte(0x02), U256::from(0x99)),
+ ];
+ assert_destroyed_changeset_entries_in_block(provider, 2, selfdestruct_contract, &expected)
+}
+
+fn assert_destroyed_changeset_entries_in_block
(
+ provider: &P,
+ block: u64,
+ selfdestruct_contract: Address,
+ expected: &[(B256, U256)],
+) -> eyre::Result<()>
+where
+ P: StorageChangeSetReader,
+{
+ let storage_changesets = provider.storage_changesets_range(block..=block)?;
+ let destroyed_entries: Vec<_> = storage_changesets
+ .into_iter()
+ .filter_map(|(key, entry)| {
+ (key.address() == selfdestruct_contract).then_some((entry.key, entry.value))
+ })
+ .collect();
+
+ assert_eq!(
+ destroyed_entries.len(),
+ expected.len(),
+ "expected exactly {} storage changeset entries for destroyed account at block {}",
+ expected.len(),
+ block
+ );
+
+ for (slot, _) in &destroyed_entries {
+ assert_ne!(*slot, keccak256(*slot), "storage changeset key should be plain (not hashed)");
+ }
+
+ for pair in expected {
+ assert!(
+ destroyed_entries.contains(pair),
+ "missing expected storage changeset entry for destroyed account: {:?}",
+ pair
+ );
+ }
+
+ Ok(())
+}
+
+fn create_file_client_from_blocks(blocks: Vec>) -> Arc> {
+ Arc::new(FileClient::from_blocks(blocks))
+}
+
+fn build_pipeline_without_history(
+ provider_factory: TestProviderFactory,
+ header_downloader: H,
+ body_downloader: B,
+ max_block: u64,
+ tip: B256,
+) -> Pipeline
+where
+ H: HeaderDownloader + 'static,
+ B: BodyDownloader + 'static,
+{
+ let consensus = NoopConsensus::arc();
+ let stages_config = StageConfig::default();
+ let evm_config = EthEvmConfig::new(provider_factory.chain_spec());
+
+ let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
+ let static_file_producer =
+ StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
+
+ let stages = OnlineStages::new(
+ provider_factory.clone(),
+ tip_rx,
+ header_downloader,
+ body_downloader,
+ stages_config.clone(),
+ None,
+ )
+ .builder()
+ .add_set(ExecutionStages::new(
+ evm_config,
+ consensus,
+ stages_config,
+ PruneModes::default().sender_recovery,
+ ))
+ .add_set(HashingStages::default())
+ .add_stage(FinishStage::default());
+
+ let pipeline = Pipeline::builder()
+ .with_tip_sender(tip_tx)
+ .with_max_block(max_block)
+ .with_fail_on_unwind(true)
+ .add_stages(stages)
+ .build(provider_factory, static_file_producer);
+ pipeline.set_tip(tip);
+ pipeline
+}
+
+async fn run_pipeline_range(
+ provider_factory: TestProviderFactory,
+ file_client: Arc>,
+ local_head: reth_primitives_traits::SealedHeader,
+ download_range: std::ops::RangeInclusive,
+ max_block: u64,
+) -> eyre::Result<()> {
+ // Run a narrow range intentionally so the test can assert per-phase behavior.
+ let tip = file_client.tip().expect("tip");
+ let consensus = NoopConsensus::arc();
+ let stages_config = StageConfig::default();
+ let runtime = reth_tasks::Runtime::test();
+
+ let mut header_downloader = ReverseHeadersDownloaderBuilder::new(stages_config.headers)
+ .build(file_client.clone(), consensus.clone())
+ .into_task_with(&runtime);
+ header_downloader.update_local_head(local_head);
+ header_downloader.update_sync_target(SyncTarget::Tip(tip));
+
+ let mut body_downloader = BodiesDownloaderBuilder::new(stages_config.bodies)
+ .build(file_client, consensus, provider_factory.clone())
+ .into_task_with(&runtime);
+ body_downloader.set_download_range(download_range).expect("set download range");
+
+ let pipeline = build_pipeline_without_history(
+ provider_factory,
+ header_downloader,
+ body_downloader,
+ max_block,
+ tip,
+ );
+ let (_pipeline, result) = pipeline.run_as_fut(None).await;
+ result?;
+ Ok(())
+}
+
+/// Builds tiny runtime bytecode that branches on calldata:
+/// - empty calldata: writes two known slots and stops
+/// - non-empty calldata: selfdestructs to `beneficiary` and stops
+///
+/// The known slot/value pairs are used for deterministic assertions in changesets and preimages.
+const WRITE_OR_SELFDESTRUCT_RUNTIME_CODE: Bytes = bytes!(
+ "3615601c57" // CALLDATASIZE; ISZERO; PUSH1 0x1c; JUMPI
+ "737777777777777777777777777777777777777777" // PUSH20 beneficiary
+ "ff00" // SELFDESTRUCT; STOP
+ "5b" // JUMPDEST (0x1c)
+ "602a600155" // SSTORE(1, 0x2a)
+ "6099600255" // SSTORE(2, 0x99)
+ "00" // STOP
+);
+
+/// Builds tiny runtime bytecode with three callvalue-based paths:
+/// - `msg.value == 0`: write slot-set A (`(1, 0x2a)`, `(2, 0x99)`)
+/// - `msg.value == 2`: write slot-set B (`(4, 0xab)`, `(5, 0xcd)`)
+/// - `msg.value == 1`: selfdestruct to `beneficiary`
+///
+/// Used by the recreate-and-write-in-same-block scenario to ensure first and second wipes
+/// restore different slot sets for the same address.
+const WRITE_TWO_SLOT_SETS_OR_SELFDESTRUCT_RUNTIME_CODE: Bytes = bytes!(
+ "34600114602557" // if callvalue == 1 jump selfdestruct
+ "34600214601957" // if callvalue == 2 jump write slot-set B
+ "602a600155" // write slot-set A: SSTORE(1, 0x2a)
+ "6099600255" // write slot-set A: SSTORE(2, 0x99)
+ "00" // STOP
+ "5b" // JUMPDEST (0x19)
+ "60ab600455" // write slot-set B: SSTORE(4, 0xab)
+ "60cd600555" // write slot-set B: SSTORE(5, 0xcd)
+ "00" // STOP
+ "5b" // JUMPDEST (0x25)
+ "737777777777777777777777777777777777777777" // PUSH20 beneficiary
+ "ff00" // SELFDESTRUCT; STOP
+);
+
+/// Builds tiny runtime bytecode with three value-based paths:
+/// - `msg.value == 0`: SSTORE(3, 0x2b)
+/// - `msg.value == 1`: SSTORE(3, 0x07)
+/// - `msg.value == 2`: SELFDESTRUCT to `beneficiary`
+const WRITE_RESTORE_OR_SELFDESTRUCT_RUNTIME_CODE: Bytes = bytes!(
+ "34600214601b57" // if callvalue == 2 jump selfdestruct
+ "34600114601457" // if callvalue == 1 jump restore
+ "602b60035500" // default: SSTORE(3, 0x2b); STOP
+ "5b" // JUMPDEST (0x14)
+ "600760035500" // restore: SSTORE(3, 0x07); STOP
+ "5b" // JUMPDEST (0x1b)
+ "737777777777777777777777777777777777777777" // PUSH20 beneficiary
+ "ff00" // SELFDESTRUCT; STOP
+);
+
+/// Builds tiny runtime bytecode that performs all actions in one call:
+/// SSTORE(3, 0x2b) -> SSTORE(3, 0x07) -> SELFDESTRUCT.
+const WRITE_RESTORE_THEN_SELFDESTRUCT_RUNTIME_CODE: Bytes = bytes!(
+ "602b600355" // SSTORE(3, 0x2b)
+ "6007600355" // SSTORE(3, 0x07)
+ "737777777777777777777777777777777777777777" // PUSH20 beneficiary
+ "ff00" // SELFDESTRUCT; STOP
+);
+
+/// Converts contract runtime bytecode into init code that returns the runtime.
+fn init_code_for_runtime(runtime: &Bytes) -> Bytes {
+ let len = u8::try_from(runtime.len()).expect("runtime too large for PUSH1 init-code helper");
+ let mut init = Vec::with_capacity(12 + runtime.len());
+ init.extend_from_slice(&[
+ 0x60, len, // PUSH1 runtime_len
+ 0x60, 0x0c, // PUSH1 runtime_offset
+ 0x60, 0x00, // PUSH1 mem_offset
+ 0x39, // CODECOPY
+ 0x60, len, // PUSH1 runtime_len
+ 0x60, 0x00, // PUSH1 mem_offset
+ 0xf3, // RETURN
+ ]);
+ init.extend_from_slice(runtime.as_ref());
+ init.into()
+}
+
+/// Runtime bytecode for a minimal CREATE2 factory:
+/// - calldata is treated as init code
+/// - deploys with fixed `salt`
+const CREATE2_FACTORY_RUNTIME_CODE: Bytes = bytes!(
+ "366000600037" // CALLDATACOPY(0, 0, calldatasize)
+ "7f0000000000000000000000000000000000000000000000000000000000000042" // PUSH32 salt
+ "3660006000f500" // CREATE2(0, 0, calldatasize, salt); STOP
+);
+
+fn create2_address(factory: Address, salt: B256, init_code: &Bytes) -> Address {
+ let init_hash = keccak256(init_code.as_ref());
+ let mut preimage = [0_u8; 85];
+ preimage[0] = 0xff;
+ preimage[1..21].copy_from_slice(factory.as_slice());
+ preimage[21..53].copy_from_slice(salt.as_slice());
+ preimage[53..85].copy_from_slice(init_hash.as_slice());
+
+ let hash = keccak256(preimage);
+ Address::from_slice(&hash.as_slice()[12..])
+}
+
+fn assert_preimage_rows(preimage_path: &Path, slots: &[B256]) -> eyre::Result<()> {
+ let mut builder = Environment::builder();
+ builder.set_max_dbs(1);
+ builder.set_flags(EnvironmentFlags {
+ no_sub_dir: false,
+ mode: Mode::ReadOnly,
+ ..Default::default()
+ });
+
+ let env = builder.open(preimage_path)?;
+ let tx = env.begin_ro_txn()?;
+ let db = tx.open_db(None)?;
+
+ for slot in slots {
+ let hashed = keccak256(*slot);
+ let found: Option<[u8; 32]> = tx.get(db.dbi(), hashed.as_slice())?;
+ assert_eq!(
+ found.map(B256::from),
+ Some(*slot),
+ "missing/invalid preimage row for slot {:?}",
+ slot
+ );
+ }
+
+ Ok(())
+}
+
+fn assert_preimage_rows_for_provider(provider: &P, slots: &[B256]) -> eyre::Result<()>
+where
+ P: StoragePath,
+{
+ let preimage_path = provider.storage_path().join("preimage");
+ assert!(
+ preimage_path.exists(),
+ "preimage dir should exist for pre-Cancun execution at {}",
+ preimage_path.display()
+ );
+ assert_preimage_rows(&preimage_path, slots)
+}
diff --git a/crates/storage/db-api/src/database.rs b/crates/storage/db-api/src/database.rs
index 1f8e3e125a..71403505a1 100644
--- a/crates/storage/db-api/src/database.rs
+++ b/crates/storage/db-api/src/database.rs
@@ -3,7 +3,7 @@ use crate::{
transaction::{DbTx, DbTxMut},
DatabaseError,
};
-use std::{fmt::Debug, sync::Arc};
+use std::{fmt::Debug, path::PathBuf, sync::Arc};
/// Main Database trait that can open read-only and read-write transactions.
///
@@ -22,6 +22,9 @@ pub trait Database: Send + Sync + Debug {
#[track_caller]
fn tx_mut(&self) -> Result;
+ /// Returns the path to the database directory.
+ fn path(&self) -> PathBuf;
+
/// Takes a function and passes a read-only transaction into it, making sure it's closed in the
/// end of the execution.
fn view(&self, f: F) -> Result
@@ -62,6 +65,10 @@ impl Database for Arc {
fn tx_mut(&self) -> Result {
::tx_mut(self)
}
+
+ fn path(&self) -> PathBuf {
+ ::path(self)
+ }
}
impl Database for &DB {
@@ -75,4 +82,8 @@ impl Database for &DB {
fn tx_mut(&self) -> Result {
::tx_mut(self)
}
+
+ fn path(&self) -> PathBuf {
+ ::path(self)
+ }
}
diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs
index 78a2aec1e1..324f3cddac 100644
--- a/crates/storage/db-api/src/mock.rs
+++ b/crates/storage/db-api/src/mock.rs
@@ -16,7 +16,7 @@ use crate::{
DatabaseError,
};
use core::ops::Bound;
-use std::{collections::BTreeMap, ops::RangeBounds};
+use std::{collections::BTreeMap, ops::RangeBounds, path::PathBuf};
/// Mock database implementation for testing and development.
///
@@ -50,6 +50,10 @@ impl Database for DatabaseMock {
fn tx_mut(&self) -> Result {
Ok(TxMock::default())
}
+
+ fn path(&self) -> PathBuf {
+ PathBuf::default()
+ }
}
impl DatabaseMetrics for DatabaseMock {}
diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs
index 484b99e517..325a7918de 100644
--- a/crates/storage/db/src/implementation/mdbx/mod.rs
+++ b/crates/storage/db/src/implementation/mdbx/mod.rs
@@ -25,7 +25,7 @@ use reth_tracing::tracing::error;
use std::{
collections::HashMap,
ops::{Deref, Range},
- path::Path,
+ path::{Path, PathBuf},
sync::Arc,
time::{SystemTime, UNIX_EPOCH},
};
@@ -244,6 +244,8 @@ impl DatabaseArguments {
pub struct DatabaseEnv {
/// Libmdbx-sys environment.
inner: Environment,
+ /// Path to the database directory.
+ path: PathBuf,
/// Opened DBIs for reuse.
/// Important: Do not manually close these DBIs, like via `mdbx_dbi_close`.
/// More generally, do not dynamically create, re-open, or drop tables at
@@ -277,6 +279,10 @@ impl Database for DatabaseEnv {
)
.map_err(|e| DatabaseError::InitTx(e.into()))
}
+
+ fn path(&self) -> PathBuf {
+ self.path.clone()
+ }
}
impl DatabaseMetrics for DatabaseEnv {
@@ -508,6 +514,7 @@ impl DatabaseEnv {
let env = Self {
inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?,
+ path: path.to_path_buf(),
dbis: Arc::default(),
metrics: None,
_lock_file,
diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs
index 160dc7b08d..97a33b138c 100644
--- a/crates/storage/db/src/lib.rs
+++ b/crates/storage/db/src/lib.rs
@@ -140,6 +140,10 @@ pub mod test_utils {
fn tx_mut(&self) -> Result {
self.db().tx_mut()
}
+
+ fn path(&self) -> std::path::PathBuf {
+ self.db().path()
+ }
}
impl DatabaseMetrics for TempDatabase {
@@ -241,7 +245,7 @@ mod tests {
// Test that TempDatabase properly cleans up its directory when dropped
let temp_path = {
let db = crate::test_utils::create_test_rw_db();
- let path = db.path().to_path_buf();
+ let path = db.path();
assert!(path.exists(), "Database directory should exist while TempDatabase is alive");
path
// TempDatabase dropped here
diff --git a/crates/storage/provider/src/changeset_walker.rs b/crates/storage/provider/src/changeset_walker.rs
index ba4fe4811c..5eb521e3a7 100644
--- a/crates/storage/provider/src/changeset_walker.rs
+++ b/crates/storage/provider/src/changeset_walker.rs
@@ -5,7 +5,8 @@ use crate::ProviderResult;
use alloy_primitives::BlockNumber;
use reth_db::models::AccountBeforeTx;
use reth_db_api::models::BlockNumberAddress;
-use reth_storage_api::{ChangeSetReader, ChangesetEntry, StorageChangeSetReader};
+use reth_primitives_traits::StorageEntry;
+use reth_storage_api::{ChangeSetReader, StorageChangeSetReader};
use std::ops::{Bound, RangeBounds};
/// Iterator that walks account changesets from static files in a block range.
@@ -109,7 +110,7 @@ pub struct StaticFileStorageChangesetWalker {
/// Current block being processed
current_block: BlockNumber,
/// Changesets for current block
- current_changesets: Vec<(BlockNumberAddress, ChangesetEntry)>,
+ current_changesets: Vec<(BlockNumberAddress, StorageEntry)>,
/// Index within current block's changesets
changeset_index: usize,
}
@@ -143,7 +144,7 @@ impl
Iterator for StaticFileStorageChangesetWalker
where
P: StorageChangeSetReader,
{
- type Item = ProviderResult<(BlockNumberAddress, ChangesetEntry)>;
+ type Item = ProviderResult<(BlockNumberAddress, StorageEntry)>;
fn next(&mut self) -> Option {
if let Some(changeset) = self.current_changesets.get(self.changeset_index).copied() {
diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs
index 63de5fd845..a9cf4c38f4 100644
--- a/crates/storage/provider/src/providers/blockchain_provider.rs
+++ b/crates/storage/provider/src/providers/blockchain_provider.rs
@@ -23,13 +23,11 @@ use reth_chainspec::ChainInfo;
use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices};
use reth_execution_types::ExecutionOutcome;
use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy};
-use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader};
+use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
-use reth_storage_api::{
- BlockBodyIndicesProvider, ChangesetEntry, NodePrimitivesProvider, StorageChangeSetReader,
-};
+use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{HashedPostState, KeccakKeyHasher};
use revm_database::BundleState;
@@ -715,7 +713,7 @@ impl StorageChangeSetReader for BlockchainProvider {
fn storage_changeset(
&self,
block_number: BlockNumber,
- ) -> ProviderResult> {
+ ) -> ProviderResult> {
self.consistent_provider()?.storage_changeset(block_number)
}
@@ -724,14 +722,14 @@ impl StorageChangeSetReader for BlockchainProvider {
block_number: BlockNumber,
address: Address,
storage_key: B256,
- ) -> ProviderResult