mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-02-19 03:04:27 -05:00
chore(trie): Cleanup unused trie changesets code (#21323)
This commit is contained in:
@@ -1,119 +0,0 @@
|
||||
use crate::{
|
||||
db_ext::DbTxPruneExt,
|
||||
segments::{PruneInput, Segment},
|
||||
PrunerError,
|
||||
};
|
||||
use alloy_primitives::B256;
|
||||
use reth_db_api::{models::BlockNumberHashedAddress, table::Value, tables, transaction::DbTxMut};
|
||||
use reth_primitives_traits::NodePrimitives;
|
||||
use reth_provider::{
|
||||
errors::provider::ProviderResult, BlockReader, ChainStateBlockReader, DBProvider,
|
||||
NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider,
|
||||
};
|
||||
use reth_prune_types::{
|
||||
PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint,
|
||||
};
|
||||
use reth_stages_types::StageId;
|
||||
use tracing::{instrument, trace};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MerkleChangeSets {
|
||||
mode: PruneMode,
|
||||
}
|
||||
|
||||
impl MerkleChangeSets {
|
||||
pub const fn new(mode: PruneMode) -> Self {
|
||||
Self { mode }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Provider> Segment<Provider> for MerkleChangeSets
|
||||
where
|
||||
Provider: DBProvider<Tx: DbTxMut>
|
||||
+ PruneCheckpointWriter
|
||||
+ TransactionsProvider
|
||||
+ BlockReader
|
||||
+ ChainStateBlockReader
|
||||
+ NodePrimitivesProvider<Primitives: NodePrimitives<Receipt: Value>>,
|
||||
{
|
||||
fn segment(&self) -> PruneSegment {
|
||||
PruneSegment::MerkleChangeSets
|
||||
}
|
||||
|
||||
fn mode(&self) -> Option<PruneMode> {
|
||||
Some(self.mode)
|
||||
}
|
||||
|
||||
fn purpose(&self) -> PrunePurpose {
|
||||
PrunePurpose::User
|
||||
}
|
||||
|
||||
fn required_stage(&self) -> Option<StageId> {
|
||||
Some(StageId::MerkleChangeSets)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
|
||||
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
|
||||
let Some(block_range) = input.get_next_block_range() else {
|
||||
trace!(target: "pruner", "No change sets to prune");
|
||||
return Ok(SegmentOutput::done())
|
||||
};
|
||||
|
||||
let block_range_end = *block_range.end();
|
||||
let mut limiter = input.limiter;
|
||||
|
||||
// Create range for StoragesTrieChangeSets which uses BlockNumberHashedAddress as key
|
||||
let storage_range_start: BlockNumberHashedAddress =
|
||||
(*block_range.start(), B256::ZERO).into();
|
||||
let storage_range_end: BlockNumberHashedAddress =
|
||||
(*block_range.end() + 1, B256::ZERO).into();
|
||||
let storage_range = storage_range_start..storage_range_end;
|
||||
|
||||
let mut last_storages_pruned_block = None;
|
||||
let (storages_pruned, done) =
|
||||
provider.tx_ref().prune_dupsort_table_with_range::<tables::StoragesTrieChangeSets>(
|
||||
storage_range,
|
||||
&mut limiter,
|
||||
|(BlockNumberHashedAddress((block_number, _)), _)| {
|
||||
last_storages_pruned_block = Some(block_number);
|
||||
},
|
||||
)?;
|
||||
|
||||
trace!(target: "pruner", %storages_pruned, %done, "Pruned storages change sets");
|
||||
|
||||
let mut last_accounts_pruned_block = block_range_end;
|
||||
let last_storages_pruned_block = last_storages_pruned_block
|
||||
// If there's more storage changesets to prune, set the checkpoint block number to
|
||||
// previous, so we could finish pruning its storage changesets on the next run.
|
||||
.map(|block_number| if done { block_number } else { block_number.saturating_sub(1) })
|
||||
.unwrap_or(block_range_end);
|
||||
|
||||
let (accounts_pruned, done) =
|
||||
provider.tx_ref().prune_dupsort_table_with_range::<tables::AccountsTrieChangeSets>(
|
||||
block_range,
|
||||
&mut limiter,
|
||||
|row| last_accounts_pruned_block = row.0,
|
||||
)?;
|
||||
|
||||
trace!(target: "pruner", %accounts_pruned, %done, "Pruned accounts change sets");
|
||||
|
||||
let progress = limiter.progress(done);
|
||||
|
||||
Ok(SegmentOutput {
|
||||
progress,
|
||||
pruned: accounts_pruned + storages_pruned,
|
||||
checkpoint: Some(SegmentOutputCheckpoint {
|
||||
block_number: Some(last_storages_pruned_block.min(last_accounts_pruned_block)),
|
||||
tx_number: None,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
fn save_checkpoint(
|
||||
&self,
|
||||
provider: &Provider,
|
||||
checkpoint: PruneCheckpoint,
|
||||
) -> ProviderResult<()> {
|
||||
provider.save_prune_checkpoint(PruneSegment::MerkleChangeSets, checkpoint)
|
||||
}
|
||||
}
|
||||
@@ -1,449 +0,0 @@
|
||||
use crate::stages::merkle::INVALID_STATE_ROOT_ERROR_MESSAGE;
|
||||
use alloy_consensus::BlockHeader;
|
||||
use alloy_primitives::BlockNumber;
|
||||
use reth_consensus::ConsensusError;
|
||||
use reth_primitives_traits::{GotExpected, SealedHeader};
|
||||
use reth_provider::{
|
||||
BlockNumReader, ChainStateBlockReader, ChangeSetReader, DBProvider, HeaderProvider,
|
||||
ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader,
|
||||
StageCheckpointWriter, StorageChangeSetReader, TrieWriter,
|
||||
};
|
||||
use reth_prune_types::{
|
||||
PruneCheckpoint, PruneMode, PruneSegment, MERKLE_CHANGESETS_RETENTION_BLOCKS,
|
||||
};
|
||||
use reth_stages_api::{
|
||||
BlockErrorKind, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId,
|
||||
UnwindInput, UnwindOutput,
|
||||
};
|
||||
use reth_trie::{
|
||||
updates::TrieUpdates, HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted,
|
||||
};
|
||||
use reth_trie_db::{DatabaseHashedPostState, DatabaseStateRoot};
|
||||
use std::{ops::Range, sync::Arc};
|
||||
use tracing::{debug, error};
|
||||
|
||||
/// The `MerkleChangeSets` stage.
|
||||
///
|
||||
/// This stage processes and maintains trie changesets from the finalized block to the latest block.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MerkleChangeSets {
|
||||
/// The number of blocks to retain changesets for, used as a fallback when the finalized block
|
||||
/// is not found. Defaults to [`MERKLE_CHANGESETS_RETENTION_BLOCKS`] (2 epochs in beacon
|
||||
/// chain).
|
||||
retention_blocks: u64,
|
||||
}
|
||||
|
||||
impl MerkleChangeSets {
|
||||
/// Creates a new `MerkleChangeSets` stage with the default retention blocks.
|
||||
pub const fn new() -> Self {
|
||||
Self { retention_blocks: MERKLE_CHANGESETS_RETENTION_BLOCKS }
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleChangeSets` stage with a custom finalized block height.
|
||||
pub const fn with_retention_blocks(retention_blocks: u64) -> Self {
|
||||
Self { retention_blocks }
|
||||
}
|
||||
|
||||
/// Returns the range of blocks which are already computed. Will return an empty range if none
|
||||
/// have been computed.
|
||||
fn computed_range<Provider>(
|
||||
provider: &Provider,
|
||||
checkpoint: Option<StageCheckpoint>,
|
||||
) -> Result<Range<BlockNumber>, StageError>
|
||||
where
|
||||
Provider: PruneCheckpointReader,
|
||||
{
|
||||
let to = checkpoint.map(|chk| chk.block_number).unwrap_or_default();
|
||||
|
||||
// Get the prune checkpoint for MerkleChangeSets to use as the lower bound. If there's no
|
||||
// prune checkpoint or if the pruned block number is None, return empty range
|
||||
let Some(from) = provider
|
||||
.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?
|
||||
.and_then(|chk| chk.block_number)
|
||||
// prune checkpoint indicates the last block pruned, so the block after is the start of
|
||||
// the computed data
|
||||
.map(|block_number| block_number + 1)
|
||||
else {
|
||||
return Ok(0..0)
|
||||
};
|
||||
|
||||
Ok(from..to + 1)
|
||||
}
|
||||
|
||||
/// Determines the target range for changeset computation based on the checkpoint and provider
|
||||
/// state.
|
||||
///
|
||||
/// Returns the target range (exclusive end) to compute changesets for.
|
||||
fn determine_target_range<Provider>(
|
||||
&self,
|
||||
provider: &Provider,
|
||||
) -> Result<Range<BlockNumber>, StageError>
|
||||
where
|
||||
Provider: StageCheckpointReader + ChainStateBlockReader,
|
||||
{
|
||||
// Get merkle checkpoint which represents our target end block
|
||||
let merkle_checkpoint = provider
|
||||
.get_stage_checkpoint(StageId::MerkleExecute)?
|
||||
.map(|checkpoint| checkpoint.block_number)
|
||||
.unwrap_or(0);
|
||||
|
||||
let target_end = merkle_checkpoint + 1; // exclusive
|
||||
|
||||
// Calculate the target range based on the finalized block and the target block.
|
||||
// We maintain changesets from the finalized block to the latest block.
|
||||
let finalized_block = provider.last_finalized_block_number()?;
|
||||
|
||||
// Calculate the fallback start position based on retention blocks
|
||||
let retention_based_start = merkle_checkpoint.saturating_sub(self.retention_blocks);
|
||||
|
||||
// If the finalized block was way in the past then we don't want to generate changesets for
|
||||
// all of those past blocks; we only care about the recent history.
|
||||
//
|
||||
// Use maximum of finalized_block and retention_based_start if finalized_block exists,
|
||||
// otherwise just use retention_based_start.
|
||||
let mut target_start = finalized_block
|
||||
.map(|finalized| finalized.saturating_add(1).max(retention_based_start))
|
||||
.unwrap_or(retention_based_start);
|
||||
|
||||
// We cannot revert the genesis block; target_start must be >0
|
||||
target_start = target_start.max(1);
|
||||
|
||||
Ok(target_start..target_end)
|
||||
}
|
||||
|
||||
/// Calculates the trie updates given a [`TrieInputSorted`], asserting that the resulting state
|
||||
/// root matches the expected one for the block.
|
||||
fn calculate_block_trie_updates<Provider: DBProvider + HeaderProvider>(
|
||||
provider: &Provider,
|
||||
block_number: BlockNumber,
|
||||
input: TrieInputSorted,
|
||||
) -> Result<TrieUpdates, StageError> {
|
||||
let (root, trie_updates) =
|
||||
StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input).map_err(
|
||||
|e| {
|
||||
error!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
%e,
|
||||
?block_number,
|
||||
"Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}");
|
||||
StageError::Fatal(Box::new(e))
|
||||
},
|
||||
)?;
|
||||
|
||||
let block = provider
|
||||
.header_by_number(block_number)?
|
||||
.ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?;
|
||||
|
||||
let (got, expected) = (root, block.state_root());
|
||||
if got != expected {
|
||||
// Only seal the header when we need it for the error
|
||||
let header = SealedHeader::seal_slow(block);
|
||||
error!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?block_number,
|
||||
?got,
|
||||
?expected,
|
||||
"Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}",
|
||||
);
|
||||
return Err(StageError::Block {
|
||||
error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff(
|
||||
GotExpected { got, expected }.into(),
|
||||
)),
|
||||
block: Box::new(header.block_with_parent()),
|
||||
})
|
||||
}
|
||||
|
||||
Ok(trie_updates)
|
||||
}
|
||||
|
||||
fn populate_range<Provider>(
|
||||
provider: &Provider,
|
||||
target_range: Range<BlockNumber>,
|
||||
) -> Result<(), StageError>
|
||||
where
|
||||
Provider: StageCheckpointReader
|
||||
+ TrieWriter
|
||||
+ DBProvider
|
||||
+ HeaderProvider
|
||||
+ ChainStateBlockReader
|
||||
+ BlockNumReader
|
||||
+ ChangeSetReader
|
||||
+ StorageChangeSetReader,
|
||||
{
|
||||
let target_start = target_range.start;
|
||||
let target_end = target_range.end;
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?target_range,
|
||||
"Starting trie changeset computation",
|
||||
);
|
||||
|
||||
// We need to distinguish a cumulative revert and a per-block revert. A cumulative revert
|
||||
// reverts changes starting at db tip all the way to a block. A per-block revert only
|
||||
// reverts a block's changes.
|
||||
//
|
||||
// We need to calculate the cumulative HashedPostState reverts for every block in the
|
||||
// target range. The cumulative HashedPostState revert for block N can be calculated as:
|
||||
//
|
||||
//
|
||||
// ```
|
||||
// // where `extend` overwrites any shared keys
|
||||
// cumulative_state_revert(N) = cumulative_state_revert(N + 1).extend(get_block_state_revert(N))
|
||||
// ```
|
||||
//
|
||||
// We need per-block reverts to calculate the prefix set for each individual block. By
|
||||
// using the per-block reverts to calculate cumulative reverts on-the-fly we can save a
|
||||
// bunch of memory.
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?target_range,
|
||||
"Computing per-block state reverts",
|
||||
);
|
||||
let range_len = target_end - target_start;
|
||||
let mut per_block_state_reverts = Vec::with_capacity(range_len as usize);
|
||||
for block_number in target_range.clone() {
|
||||
per_block_state_reverts.push(HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(
|
||||
provider,
|
||||
block_number..=block_number,
|
||||
)?);
|
||||
}
|
||||
|
||||
// Helper to retrieve state revert data for a specific block from the pre-computed array
|
||||
let get_block_state_revert = |block_number: BlockNumber| -> &HashedPostStateSorted {
|
||||
let index = (block_number - target_start) as usize;
|
||||
&per_block_state_reverts[index]
|
||||
};
|
||||
|
||||
// Helper to accumulate state reverts from a given block to the target end
|
||||
let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostStateSorted {
|
||||
let mut cumulative_revert = HashedPostStateSorted::default();
|
||||
for n in (block_number..target_end).rev() {
|
||||
cumulative_revert.extend_ref_and_sort(get_block_state_revert(n))
|
||||
}
|
||||
cumulative_revert
|
||||
};
|
||||
|
||||
// To calculate the changeset for a block, we first need the TrieUpdates which are
|
||||
// generated as a result of processing the block. To get these we need:
|
||||
// 1) The TrieUpdates which revert the db's trie to _prior_ to the block
|
||||
// 2) The HashedPostStateSorted to revert the db's state to _after_ the block
|
||||
//
|
||||
// To get (1) for `target_start` we need to do a big state root calculation which takes
|
||||
// into account all changes between that block and db tip. For each block after the
|
||||
// `target_start` we can update (1) using the TrieUpdates which were output by the previous
|
||||
// block, only targeting the state changes of that block.
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?target_start,
|
||||
"Computing trie state at starting block",
|
||||
);
|
||||
let initial_state = compute_cumulative_state_revert(target_start);
|
||||
let initial_prefix_sets = initial_state.construct_prefix_sets();
|
||||
let initial_input =
|
||||
TrieInputSorted::new(Arc::default(), Arc::new(initial_state), initial_prefix_sets);
|
||||
// target_start will be >= 1, see `determine_target_range`.
|
||||
let mut nodes = Arc::new(
|
||||
Self::calculate_block_trie_updates(provider, target_start - 1, initial_input)?
|
||||
.into_sorted(),
|
||||
);
|
||||
|
||||
for block_number in target_range {
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?block_number,
|
||||
"Computing trie updates for block",
|
||||
);
|
||||
// Revert the state so that this block has been just processed, meaning we take the
|
||||
// cumulative revert of the subsequent block.
|
||||
let state = Arc::new(compute_cumulative_state_revert(block_number + 1));
|
||||
|
||||
// Construct prefix sets from only this block's `HashedPostStateSorted`, because we only
|
||||
// care about trie updates which occurred as a result of this block being processed.
|
||||
let prefix_sets = get_block_state_revert(block_number).construct_prefix_sets();
|
||||
|
||||
let input = TrieInputSorted::new(Arc::clone(&nodes), state, prefix_sets);
|
||||
|
||||
// Calculate the trie updates for this block, then apply those updates to the reverts.
|
||||
// We calculate the overlay which will be passed into the next step using the trie
|
||||
// reverts prior to them being updated.
|
||||
let this_trie_updates =
|
||||
Self::calculate_block_trie_updates(provider, block_number, input)?.into_sorted();
|
||||
|
||||
let trie_overlay = Arc::clone(&nodes);
|
||||
let mut nodes_mut = Arc::unwrap_or_clone(nodes);
|
||||
nodes_mut.extend_ref_and_sort(&this_trie_updates);
|
||||
nodes = Arc::new(nodes_mut);
|
||||
|
||||
// Write the changesets to the DB using the trie updates produced by the block, and the
|
||||
// trie reverts as the overlay.
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?block_number,
|
||||
"Writing trie changesets for block",
|
||||
);
|
||||
provider.write_trie_changesets(
|
||||
block_number,
|
||||
&this_trie_updates,
|
||||
Some(&trie_overlay),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MerkleChangeSets {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Provider> Stage<Provider> for MerkleChangeSets
|
||||
where
|
||||
Provider: StageCheckpointReader
|
||||
+ TrieWriter
|
||||
+ DBProvider
|
||||
+ HeaderProvider
|
||||
+ ChainStateBlockReader
|
||||
+ StageCheckpointWriter
|
||||
+ PruneCheckpointReader
|
||||
+ PruneCheckpointWriter
|
||||
+ ChangeSetReader
|
||||
+ StorageChangeSetReader
|
||||
+ BlockNumReader,
|
||||
{
|
||||
fn id(&self) -> StageId {
|
||||
StageId::MerkleChangeSets
|
||||
}
|
||||
|
||||
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
|
||||
// Get merkle checkpoint and assert that the target is the same.
|
||||
let merkle_checkpoint = provider
|
||||
.get_stage_checkpoint(StageId::MerkleExecute)?
|
||||
.map(|checkpoint| checkpoint.block_number)
|
||||
.unwrap_or(0);
|
||||
|
||||
if input.target.is_none_or(|target| merkle_checkpoint != target) {
|
||||
return Err(StageError::Fatal(eyre::eyre!("Cannot sync stage to block {:?} when MerkleExecute is at block {merkle_checkpoint:?}", input.target).into()))
|
||||
}
|
||||
|
||||
let mut target_range = self.determine_target_range(provider)?;
|
||||
|
||||
// Get the previously computed range. This will be updated to reflect the populating of the
|
||||
// target range.
|
||||
let mut computed_range = Self::computed_range(provider, input.checkpoint)?;
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?computed_range,
|
||||
?target_range,
|
||||
"Got computed and target ranges",
|
||||
);
|
||||
|
||||
// We want the target range to not include any data already computed previously, if
|
||||
// possible, so we start the target range from the end of the computed range if that is
|
||||
// greater.
|
||||
//
|
||||
// ------------------------------> Block #
|
||||
// |------computed-----|
|
||||
// |-----target-----|
|
||||
// |--actual--|
|
||||
//
|
||||
// However, if the target start is less than the previously computed start, we don't want to
|
||||
// do this, as it would leave a gap of data at `target_range.start..=computed_range.start`.
|
||||
//
|
||||
// ------------------------------> Block #
|
||||
// |---computed---|
|
||||
// |-------target-------|
|
||||
// |-------actual-------|
|
||||
//
|
||||
if target_range.start >= computed_range.start {
|
||||
target_range.start = target_range.start.max(computed_range.end);
|
||||
}
|
||||
|
||||
// If target range is empty (target_start >= target_end), stage is already successfully
|
||||
// executed.
|
||||
if target_range.start >= target_range.end {
|
||||
return Ok(ExecOutput::done(StageCheckpoint::new(target_range.end.saturating_sub(1))));
|
||||
}
|
||||
|
||||
// If our target range is a continuation of the already computed range then we can keep the
|
||||
// already computed data.
|
||||
if target_range.start == computed_range.end {
|
||||
// Clear from target_start onwards to ensure no stale data exists
|
||||
provider.clear_trie_changesets_from(target_range.start)?;
|
||||
computed_range.end = target_range.end;
|
||||
} else {
|
||||
// If our target range is not a continuation of the already computed range then we
|
||||
// simply clear the computed data, to make sure there's no gaps or conflicts.
|
||||
provider.clear_trie_changesets()?;
|
||||
computed_range = target_range.clone();
|
||||
}
|
||||
|
||||
// Populate the target range with changesets
|
||||
Self::populate_range(provider, target_range)?;
|
||||
|
||||
// Update the prune checkpoint to reflect that all data before `computed_range.start`
|
||||
// is not available.
|
||||
provider.save_prune_checkpoint(
|
||||
PruneSegment::MerkleChangeSets,
|
||||
PruneCheckpoint {
|
||||
block_number: Some(computed_range.start.saturating_sub(1)),
|
||||
tx_number: None,
|
||||
prune_mode: PruneMode::Before(computed_range.start),
|
||||
},
|
||||
)?;
|
||||
|
||||
// `computed_range.end` is exclusive.
|
||||
let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1));
|
||||
|
||||
Ok(ExecOutput::done(checkpoint))
|
||||
}
|
||||
|
||||
fn unwind(
|
||||
&mut self,
|
||||
provider: &Provider,
|
||||
input: UnwindInput,
|
||||
) -> Result<UnwindOutput, StageError> {
|
||||
// Unwinding is trivial; just clear everything after the target block.
|
||||
provider.clear_trie_changesets_from(input.unwind_to + 1)?;
|
||||
|
||||
let mut computed_range = Self::computed_range(provider, Some(input.checkpoint))?;
|
||||
computed_range.end = input.unwind_to + 1;
|
||||
if computed_range.start > computed_range.end {
|
||||
computed_range.start = computed_range.end;
|
||||
}
|
||||
|
||||
// If we've unwound so far that there are no longer enough trie changesets available then
|
||||
// simply clear them and the checkpoints, so that on next pipeline startup they will be
|
||||
// regenerated.
|
||||
//
|
||||
// We don't do this check if the target block is not greater than the retention threshold
|
||||
// (which happens near genesis), as in that case would could still have all possible
|
||||
// changesets even if the total count doesn't meet the threshold.
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?computed_range,
|
||||
retention_blocks=?self.retention_blocks,
|
||||
"Checking if computed range is over retention threshold",
|
||||
);
|
||||
if input.unwind_to > self.retention_blocks &&
|
||||
computed_range.end - computed_range.start < self.retention_blocks
|
||||
{
|
||||
debug!(
|
||||
target: "sync::stages::merkle_changesets",
|
||||
?computed_range,
|
||||
retention_blocks=?self.retention_blocks,
|
||||
"Clearing checkpoints completely",
|
||||
);
|
||||
provider.clear_trie_changesets()?;
|
||||
provider
|
||||
.save_stage_checkpoint(StageId::MerkleChangeSets, StageCheckpoint::default())?;
|
||||
return Ok(UnwindOutput { checkpoint: StageCheckpoint::default() })
|
||||
}
|
||||
|
||||
// `computed_range.end` is exclusive
|
||||
let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1));
|
||||
|
||||
Ok(UnwindOutput { checkpoint })
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,10 @@ pub enum StageId {
|
||||
note = "Static Files are generated outside of the pipeline and do not require a separate stage"
|
||||
)]
|
||||
StaticFile,
|
||||
#[deprecated(
|
||||
note = "MerkleChangeSets stage has been removed; kept for DB checkpoint compatibility"
|
||||
)]
|
||||
MerkleChangeSets,
|
||||
Era,
|
||||
Headers,
|
||||
Bodies,
|
||||
@@ -75,6 +79,8 @@ impl StageId {
|
||||
match self {
|
||||
#[expect(deprecated)]
|
||||
Self::StaticFile => "StaticFile",
|
||||
#[expect(deprecated)]
|
||||
Self::MerkleChangeSets => "MerkleChangeSets",
|
||||
Self::Era => "Era",
|
||||
Self::Headers => "Headers",
|
||||
Self::Bodies => "Bodies",
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::{
|
||||
table::{Decode, Encode},
|
||||
DatabaseError,
|
||||
};
|
||||
use alloy_primitives::{Address, BlockNumber, StorageKey, B256};
|
||||
use alloy_primitives::{Address, BlockNumber, StorageKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::ops::{Bound, Range, RangeBounds, RangeInclusive};
|
||||
|
||||
@@ -108,43 +108,6 @@ impl<R: RangeBounds<BlockNumber>> From<R> for BlockNumberAddressRange {
|
||||
}
|
||||
}
|
||||
|
||||
/// [`BlockNumber`] concatenated with [`B256`] (hashed address).
|
||||
///
|
||||
/// Since it's used as a key, it isn't compressed when encoding it.
|
||||
#[derive(
|
||||
Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Ord, PartialOrd, Hash,
|
||||
)]
|
||||
pub struct BlockNumberHashedAddress(pub (BlockNumber, B256));
|
||||
|
||||
impl From<(BlockNumber, B256)> for BlockNumberHashedAddress {
|
||||
fn from(tpl: (BlockNumber, B256)) -> Self {
|
||||
Self(tpl)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encode for BlockNumberHashedAddress {
|
||||
type Encoded = [u8; 40];
|
||||
|
||||
fn encode(self) -> Self::Encoded {
|
||||
let block_number = self.0 .0;
|
||||
let hashed_address = self.0 .1;
|
||||
|
||||
let mut buf = [0u8; 40];
|
||||
|
||||
buf[..8].copy_from_slice(&block_number.to_be_bytes());
|
||||
buf[8..].copy_from_slice(hashed_address.as_slice());
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
impl Decode for BlockNumberHashedAddress {
|
||||
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
|
||||
let num = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?);
|
||||
let hash = B256::from_slice(&value[8..]);
|
||||
Ok(Self((num, hash)))
|
||||
}
|
||||
}
|
||||
|
||||
/// [`Address`] concatenated with [`StorageKey`]. Used by `reth_etl` and history stages.
|
||||
///
|
||||
/// Since it's used as a key, it isn't compressed when encoding it.
|
||||
@@ -176,11 +139,7 @@ impl Decode for AddressStorageKey {
|
||||
}
|
||||
}
|
||||
|
||||
impl_fixed_arbitrary!(
|
||||
(BlockNumberAddress, 28),
|
||||
(BlockNumberHashedAddress, 40),
|
||||
(AddressStorageKey, 52)
|
||||
);
|
||||
impl_fixed_arbitrary!((BlockNumberAddress, 28), (AddressStorageKey, 52));
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -213,31 +172,6 @@ mod tests {
|
||||
assert_eq!(bytes, Encode::encode(key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_number_hashed_address() {
|
||||
let num = 1u64;
|
||||
let hash = B256::from_slice(&[0xba; 32]);
|
||||
let key = BlockNumberHashedAddress((num, hash));
|
||||
|
||||
let mut bytes = [0u8; 40];
|
||||
bytes[..8].copy_from_slice(&num.to_be_bytes());
|
||||
bytes[8..].copy_from_slice(hash.as_slice());
|
||||
|
||||
let encoded = Encode::encode(key);
|
||||
assert_eq!(encoded, bytes);
|
||||
|
||||
let decoded: BlockNumberHashedAddress = Decode::decode(&encoded).unwrap();
|
||||
assert_eq!(decoded, key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_number_hashed_address_rand() {
|
||||
let mut bytes = [0u8; 40];
|
||||
rng().fill(bytes.as_mut_slice());
|
||||
let key = BlockNumberHashedAddress::arbitrary(&mut Unstructured::new(&bytes)).unwrap();
|
||||
assert_eq!(bytes, Encode::encode(key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_storage_key() {
|
||||
let storage_key = StorageKey::random();
|
||||
|
||||
@@ -12,9 +12,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType};
|
||||
use reth_primitives_traits::{Account, Bytecode, StorageEntry};
|
||||
use reth_prune_types::{PruneCheckpoint, PruneSegment};
|
||||
use reth_stages_types::StageCheckpoint;
|
||||
use reth_trie_common::{
|
||||
StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, *,
|
||||
};
|
||||
use reth_trie_common::{StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, *};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod accounts;
|
||||
@@ -220,7 +218,6 @@ impl_compression_for_compact!(
|
||||
TxType,
|
||||
StorageEntry,
|
||||
BranchNodeCompact,
|
||||
TrieChangeSetsEntry,
|
||||
StoredNibbles,
|
||||
StoredNibblesSubKey,
|
||||
StorageTrieEntry,
|
||||
|
||||
@@ -21,8 +21,8 @@ use crate::{
|
||||
accounts::BlockNumberAddress,
|
||||
blocks::{HeaderHash, StoredBlockOmmers},
|
||||
storage_sharded_key::StorageShardedKey,
|
||||
AccountBeforeTx, BlockNumberHashedAddress, ClientVersion, CompactU256, IntegerList,
|
||||
ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals,
|
||||
AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey,
|
||||
StoredBlockBodyIndices, StoredBlockWithdrawals,
|
||||
},
|
||||
table::{Decode, DupSort, Encode, Table, TableInfo},
|
||||
};
|
||||
@@ -32,9 +32,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned};
|
||||
use reth_primitives_traits::{Account, Bytecode, StorageEntry};
|
||||
use reth_prune_types::{PruneCheckpoint, PruneSegment};
|
||||
use reth_stages_types::StageCheckpoint;
|
||||
use reth_trie_common::{
|
||||
BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry,
|
||||
};
|
||||
use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
|
||||
@@ -492,20 +490,6 @@ tables! {
|
||||
type SubKey = StoredNibblesSubKey;
|
||||
}
|
||||
|
||||
/// Stores the state of a node in the accounts trie prior to a particular block being executed.
|
||||
table AccountsTrieChangeSets {
|
||||
type Key = BlockNumber;
|
||||
type Value = TrieChangeSetsEntry;
|
||||
type SubKey = StoredNibblesSubKey;
|
||||
}
|
||||
|
||||
/// Stores the state of a node in a storage trie prior to a particular block being executed.
|
||||
table StoragesTrieChangeSets {
|
||||
type Key = BlockNumberHashedAddress;
|
||||
type Value = TrieChangeSetsEntry;
|
||||
type SubKey = StoredNibblesSubKey;
|
||||
}
|
||||
|
||||
/// Stores the transaction sender for each canonical transaction.
|
||||
/// It is needed to speed up execution stage and allows fetching signer without doing
|
||||
/// transaction signed recovery
|
||||
|
||||
@@ -560,6 +560,35 @@ impl DatabaseEnv {
|
||||
Ok(handles)
|
||||
}
|
||||
|
||||
/// Drops an orphaned table by name.
|
||||
///
|
||||
/// This is used to clean up tables that are no longer defined in the schema but may still
|
||||
/// exist on disk from previous versions.
|
||||
///
|
||||
/// Returns `Ok(true)` if the table existed and was dropped, `Ok(false)` if the table was not
|
||||
/// found.
|
||||
///
|
||||
/// # Safety
|
||||
/// This permanently deletes the table and all its data. Only use for tables that are
|
||||
/// confirmed to be obsolete.
|
||||
pub fn drop_orphan_table(&self, name: &str) -> Result<bool, DatabaseError> {
|
||||
let tx = self.inner.begin_rw_txn().map_err(|e| DatabaseError::InitTx(e.into()))?;
|
||||
|
||||
match tx.open_db(Some(name)) {
|
||||
Ok(db) => {
|
||||
// SAFETY: We just opened the db handle and will commit immediately after dropping.
|
||||
// No other cursors or handles exist for this table.
|
||||
unsafe {
|
||||
tx.drop_db(db.dbi()).map_err(|e| DatabaseError::Delete(e.into()))?;
|
||||
}
|
||||
tx.commit().map_err(|e| DatabaseError::Commit(e.into()))?;
|
||||
Ok(true)
|
||||
}
|
||||
Err(reth_libmdbx::Error::NotFound) => Ok(false),
|
||||
Err(e) => Err(DatabaseError::Open(e.into())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Records version that accesses the database with write privileges.
|
||||
pub fn record_client_version(&self, version: ClientVersion) -> Result<(), DatabaseError> {
|
||||
if version.is_empty() {
|
||||
@@ -646,6 +675,46 @@ mod tests {
|
||||
create_test_db(DatabaseEnvKind::RW);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn db_drop_orphan_table() {
|
||||
let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).keep();
|
||||
let db = create_test_db_with_path(DatabaseEnvKind::RW, &path);
|
||||
|
||||
// Create an orphan table by manually creating it
|
||||
let orphan_table_name = "OrphanTestTable";
|
||||
{
|
||||
let tx = db.inner.begin_rw_txn().expect(ERROR_INIT_TX);
|
||||
tx.create_db(Some(orphan_table_name), DatabaseFlags::empty())
|
||||
.expect("Failed to create orphan table");
|
||||
tx.commit().expect(ERROR_COMMIT);
|
||||
}
|
||||
|
||||
// Verify the table exists by opening it
|
||||
{
|
||||
let tx = db.inner.begin_ro_txn().expect(ERROR_INIT_TX);
|
||||
assert!(tx.open_db(Some(orphan_table_name)).is_ok(), "Orphan table should exist");
|
||||
}
|
||||
|
||||
// Drop the orphan table
|
||||
let result = db.drop_orphan_table(orphan_table_name);
|
||||
assert!(result.is_ok(), "drop_orphan_table should succeed");
|
||||
assert!(result.unwrap(), "drop_orphan_table should return true for existing table");
|
||||
|
||||
// Verify the table no longer exists
|
||||
{
|
||||
let tx = db.inner.begin_ro_txn().expect(ERROR_INIT_TX);
|
||||
assert!(
|
||||
tx.open_db(Some(orphan_table_name)).is_err(),
|
||||
"Orphan table should no longer exist"
|
||||
);
|
||||
}
|
||||
|
||||
// Dropping a non-existent table should return Ok(false)
|
||||
let result = db.drop_orphan_table("NonExistentTable");
|
||||
assert!(result.is_ok(), "drop_orphan_table should succeed for non-existent table");
|
||||
assert!(!result.unwrap(), "drop_orphan_table should return false for non-existent table");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn db_manual_put_get() {
|
||||
let env = create_test_db(DatabaseEnvKind::RW);
|
||||
|
||||
@@ -2,11 +2,16 @@
|
||||
|
||||
use crate::{is_database_empty, TableSet, Tables};
|
||||
use eyre::Context;
|
||||
use reth_tracing::tracing::info;
|
||||
use std::path::Path;
|
||||
|
||||
pub use crate::implementation::mdbx::*;
|
||||
pub use reth_libmdbx::*;
|
||||
|
||||
/// Tables that have been removed from the schema but may still exist on disk from previous
|
||||
/// versions. These will be dropped during database initialization.
|
||||
const ORPHAN_TABLES: &[&str] = &["AccountsTrieChangeSets", "StoragesTrieChangeSets"];
|
||||
|
||||
/// Creates a new database at the specified path if it doesn't exist. Does NOT create tables. Check
|
||||
/// [`init_db`].
|
||||
pub fn create_db<P: AsRef<Path>>(path: P, args: DatabaseArguments) -> eyre::Result<DatabaseEnv> {
|
||||
@@ -44,9 +49,30 @@ pub fn init_db_for<P: AsRef<Path>, TS: TableSet>(
|
||||
let mut db = create_db(path, args)?;
|
||||
db.create_and_track_tables_for::<TS>()?;
|
||||
db.record_client_version(client_version)?;
|
||||
drop_orphan_tables(&db);
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
/// Drops orphaned tables that are no longer part of the schema.
|
||||
fn drop_orphan_tables(db: &DatabaseEnv) {
|
||||
for table_name in ORPHAN_TABLES {
|
||||
match db.drop_orphan_table(table_name) {
|
||||
Ok(true) => {
|
||||
info!(target: "reth::db", table = %table_name, "Dropped orphaned database table");
|
||||
}
|
||||
Ok(false) => {}
|
||||
Err(e) => {
|
||||
reth_tracing::tracing::warn!(
|
||||
target: "reth::db",
|
||||
table = %table_name,
|
||||
%e,
|
||||
"Failed to drop orphaned database table"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing.
|
||||
pub fn open_db_read_only(
|
||||
path: impl AsRef<Path>,
|
||||
|
||||
@@ -40,8 +40,7 @@ use reth_db_api::{
|
||||
database::Database,
|
||||
models::{
|
||||
sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress,
|
||||
BlockNumberHashedAddress, ShardedKey, StorageBeforeTx, StorageSettings,
|
||||
StoredBlockBodyIndices,
|
||||
ShardedKey, StorageBeforeTx, StorageSettings, StoredBlockBodyIndices,
|
||||
},
|
||||
table::Table,
|
||||
tables,
|
||||
@@ -65,12 +64,10 @@ use reth_storage_api::{
|
||||
};
|
||||
use reth_storage_errors::provider::{ProviderResult, StaticFileWriterError};
|
||||
use reth_trie::{
|
||||
changesets::storage_trie_wiped_changeset_iter,
|
||||
trie_cursor::{InMemoryTrieCursor, TrieCursor, TrieCursorIter, TrieStorageCursor},
|
||||
updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted},
|
||||
HashedPostStateSorted, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry,
|
||||
HashedPostStateSorted, StoredNibbles,
|
||||
};
|
||||
use reth_trie_db::{ChangesetCache, DatabaseAccountTrieCursor, DatabaseStorageTrieCursor};
|
||||
use reth_trie_db::{ChangesetCache, DatabaseStorageTrieCursor};
|
||||
use revm_database::states::{
|
||||
PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset,
|
||||
};
|
||||
@@ -78,7 +75,7 @@ use std::{
|
||||
cmp::Ordering,
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
fmt::Debug,
|
||||
ops::{Deref, DerefMut, Range, RangeBounds, RangeFrom, RangeInclusive},
|
||||
ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive},
|
||||
sync::Arc,
|
||||
thread,
|
||||
time::Instant,
|
||||
@@ -792,9 +789,6 @@ impl<TX: DbTx + DbTxMut + 'static, N: NodeTypesForProvider> DatabaseProvider<TX,
|
||||
let trie_revert = self.changeset_cache.get_or_compute_range(self, from..=db_tip_block)?;
|
||||
self.write_trie_updates_sorted(&trie_revert)?;
|
||||
|
||||
// Clear trie changesets which have been unwound.
|
||||
self.clear_trie_changesets_from(from)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -2783,90 +2777,6 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> TrieWriter for DatabaseProvider
|
||||
|
||||
Ok(num_entries)
|
||||
}
|
||||
|
||||
/// Records the current values of all trie nodes which will be updated using the `TrieUpdates`
|
||||
/// into the trie changesets tables.
|
||||
///
|
||||
/// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with
|
||||
/// the same `TrieUpdates`.
|
||||
///
|
||||
/// Returns the number of keys written.
|
||||
#[instrument(level = "debug", target = "providers::db", skip_all)]
|
||||
fn write_trie_changesets(
|
||||
&self,
|
||||
block_number: BlockNumber,
|
||||
trie_updates: &TrieUpdatesSorted,
|
||||
updates_overlay: Option<&TrieUpdatesSorted>,
|
||||
) -> ProviderResult<usize> {
|
||||
let mut num_entries = 0;
|
||||
|
||||
let mut changeset_cursor =
|
||||
self.tx_ref().cursor_dup_write::<tables::AccountsTrieChangeSets>()?;
|
||||
let curr_values_cursor = self.tx_ref().cursor_read::<tables::AccountsTrie>()?;
|
||||
|
||||
// Wrap the cursor in DatabaseAccountTrieCursor
|
||||
let mut db_account_cursor = DatabaseAccountTrieCursor::new(curr_values_cursor);
|
||||
|
||||
// Create empty TrieUpdatesSorted for when updates_overlay is None
|
||||
let empty_updates = TrieUpdatesSorted::default();
|
||||
let overlay = updates_overlay.unwrap_or(&empty_updates);
|
||||
|
||||
// Wrap the cursor in InMemoryTrieCursor with the overlay
|
||||
let mut in_memory_account_cursor =
|
||||
InMemoryTrieCursor::new_account(&mut db_account_cursor, overlay);
|
||||
|
||||
for (path, _) in trie_updates.account_nodes_ref() {
|
||||
num_entries += 1;
|
||||
let node = in_memory_account_cursor.seek_exact(*path)?.map(|(_, node)| node);
|
||||
changeset_cursor.append_dup(
|
||||
block_number,
|
||||
TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(*path), node },
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut storage_updates = trie_updates.storage_tries_ref().iter().collect::<Vec<_>>();
|
||||
storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0));
|
||||
|
||||
num_entries += self.write_storage_trie_changesets(
|
||||
block_number,
|
||||
storage_updates.into_iter(),
|
||||
updates_overlay,
|
||||
)?;
|
||||
|
||||
Ok(num_entries)
|
||||
}
|
||||
|
||||
fn clear_trie_changesets(&self) -> ProviderResult<()> {
|
||||
let tx = self.tx_ref();
|
||||
tx.clear::<tables::AccountsTrieChangeSets>()?;
|
||||
tx.clear::<tables::StoragesTrieChangeSets>()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()> {
|
||||
let tx = self.tx_ref();
|
||||
{
|
||||
let range = from..;
|
||||
let mut cursor = tx.cursor_dup_write::<tables::AccountsTrieChangeSets>()?;
|
||||
let mut walker = cursor.walk_range(range)?;
|
||||
|
||||
while walker.next().transpose()?.is_some() {
|
||||
walker.delete_current()?;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let range: RangeFrom<BlockNumberHashedAddress> = (from, B256::ZERO).into()..;
|
||||
let mut cursor = tx.cursor_dup_write::<tables::StoragesTrieChangeSets>()?;
|
||||
let mut walker = cursor.walk_range(range)?;
|
||||
|
||||
while walker.next().transpose()?.is_some() {
|
||||
walker.delete_current()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> StorageTrieWriter for DatabaseProvider<TX, N> {
|
||||
@@ -2893,75 +2803,6 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> StorageTrieWriter for DatabaseP
|
||||
|
||||
Ok(num_entries)
|
||||
}
|
||||
|
||||
/// Records the current values of all trie nodes which will be updated using the
|
||||
/// `StorageTrieUpdates` into the storage trie changesets table.
|
||||
///
|
||||
/// The intended usage of this method is to call it _prior_ to calling
|
||||
/// `write_storage_trie_updates` with the same set of `StorageTrieUpdates`.
|
||||
///
|
||||
/// Returns the number of keys written.
|
||||
fn write_storage_trie_changesets<'a>(
|
||||
&self,
|
||||
block_number: BlockNumber,
|
||||
storage_tries: impl Iterator<Item = (&'a B256, &'a StorageTrieUpdatesSorted)>,
|
||||
updates_overlay: Option<&TrieUpdatesSorted>,
|
||||
) -> ProviderResult<usize> {
|
||||
let mut num_written = 0;
|
||||
|
||||
let mut changeset_cursor =
|
||||
self.tx_ref().cursor_dup_write::<tables::StoragesTrieChangeSets>()?;
|
||||
let curr_values_cursor = self.tx_ref().cursor_dup_read::<tables::StoragesTrie>()?;
|
||||
|
||||
// Wrap the cursor in DatabaseStorageTrieCursor
|
||||
let mut db_storage_cursor = DatabaseStorageTrieCursor::new(
|
||||
curr_values_cursor,
|
||||
B256::default(), // Will be set per iteration
|
||||
);
|
||||
|
||||
// Create empty TrieUpdatesSorted for when updates_overlay is None
|
||||
let empty_updates = TrieUpdatesSorted::default();
|
||||
|
||||
for (hashed_address, storage_trie_updates) in storage_tries {
|
||||
let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address));
|
||||
|
||||
// Update the hashed address for the cursor
|
||||
db_storage_cursor.set_hashed_address(*hashed_address);
|
||||
|
||||
// Get the overlay updates, or use empty updates
|
||||
let overlay = updates_overlay.unwrap_or(&empty_updates);
|
||||
|
||||
// Wrap the cursor in InMemoryTrieCursor with the overlay
|
||||
let mut in_memory_storage_cursor =
|
||||
InMemoryTrieCursor::new_storage(&mut db_storage_cursor, overlay, *hashed_address);
|
||||
|
||||
let changed_paths = storage_trie_updates.storage_nodes.iter().map(|e| e.0);
|
||||
|
||||
if storage_trie_updates.is_deleted() {
|
||||
let all_nodes = TrieCursorIter::new(&mut in_memory_storage_cursor);
|
||||
|
||||
for wiped in storage_trie_wiped_changeset_iter(changed_paths, all_nodes)? {
|
||||
let (path, node) = wiped?;
|
||||
num_written += 1;
|
||||
changeset_cursor.append_dup(
|
||||
changeset_key,
|
||||
TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node },
|
||||
)?;
|
||||
}
|
||||
} else {
|
||||
for path in changed_paths {
|
||||
let node = in_memory_storage_cursor.seek_exact(path)?.map(|(_, node)| node);
|
||||
num_written += 1;
|
||||
changeset_cursor.append_dup(
|
||||
changeset_key,
|
||||
TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node },
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(num_written)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HashingWriter for DatabaseProvider<TX, N> {
|
||||
@@ -3732,7 +3573,7 @@ mod tests {
|
||||
use alloy_primitives::map::B256Map;
|
||||
use reth_ethereum_primitives::Receipt;
|
||||
use reth_testing_utils::generators::{self, random_block, BlockParams};
|
||||
use reth_trie::Nibbles;
|
||||
use reth_trie::{Nibbles, StoredNibblesSubKey};
|
||||
|
||||
#[test]
|
||||
fn test_receipts_by_block_range_empty_range() {
|
||||
@@ -3976,781 +3817,6 @@ mod tests {
|
||||
assert_eq!(range_result, individual_results);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_trie_changesets() {
|
||||
use reth_db_api::models::BlockNumberHashedAddress;
|
||||
use reth_trie::{BranchNodeCompact, StorageTrieEntry};
|
||||
|
||||
let factory = create_test_provider_factory();
|
||||
let provider_rw = factory.provider_rw().unwrap();
|
||||
|
||||
let block_number = 1u64;
|
||||
|
||||
// Create some test nibbles and nodes
|
||||
let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]);
|
||||
let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]);
|
||||
|
||||
let node1 = BranchNodeCompact::new(
|
||||
0b1111_1111_1111_1111, // state_mask
|
||||
0b0000_0000_0000_0000, // tree_mask
|
||||
0b0000_0000_0000_0000, // hash_mask
|
||||
vec![], // hashes
|
||||
None, // root hash
|
||||
);
|
||||
|
||||
// Pre-populate AccountsTrie with a node that will be updated (for account_nibbles1)
|
||||
{
|
||||
let mut cursor = provider_rw.tx_ref().cursor_write::<tables::AccountsTrie>().unwrap();
|
||||
cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap();
|
||||
}
|
||||
|
||||
// Create account trie updates: one Some (update) and one None (removal)
|
||||
let account_nodes = vec![
|
||||
(account_nibbles1, Some(node1.clone())), // This will update existing node
|
||||
(account_nibbles2, None), // This will be a removal (no existing node)
|
||||
];
|
||||
|
||||
// Create storage trie updates
|
||||
let storage_address1 = B256::from([1u8; 32]); // Normal storage trie
|
||||
let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie
|
||||
|
||||
let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]);
|
||||
let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]);
|
||||
let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]);
|
||||
|
||||
let storage_node1 = BranchNodeCompact::new(
|
||||
0b1111_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
let storage_node2 = BranchNodeCompact::new(
|
||||
0b0000_1111_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
// Create an old version of storage_node1 to prepopulate
|
||||
let storage_node1_old = BranchNodeCompact::new(
|
||||
0b1010_0000_0000_0000, // Different mask to show it's an old value
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
// Pre-populate StoragesTrie for normal storage (storage_address1)
|
||||
{
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_write::<tables::StoragesTrie>().unwrap();
|
||||
// Add node that will be updated (storage_nibbles1) with old value
|
||||
let entry = StorageTrieEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles1),
|
||||
node: storage_node1_old.clone(),
|
||||
};
|
||||
cursor.upsert(storage_address1, &entry).unwrap();
|
||||
}
|
||||
|
||||
// Pre-populate StoragesTrie for wiped storage (storage_address2)
|
||||
{
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_write::<tables::StoragesTrie>().unwrap();
|
||||
// Add node that will be updated (storage_nibbles1)
|
||||
let entry1 = StorageTrieEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles1),
|
||||
node: storage_node1.clone(),
|
||||
};
|
||||
cursor.upsert(storage_address2, &entry1).unwrap();
|
||||
// Add node that won't be updated but exists (storage_nibbles3)
|
||||
let entry3 = StorageTrieEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles3),
|
||||
node: storage_node2.clone(),
|
||||
};
|
||||
cursor.upsert(storage_address2, &entry3).unwrap();
|
||||
}
|
||||
|
||||
// Normal storage trie: one Some (update) and one None (new)
|
||||
let storage_trie1 = StorageTrieUpdatesSorted {
|
||||
is_deleted: false,
|
||||
storage_nodes: vec![
|
||||
(storage_nibbles1, Some(storage_node1.clone())), // This will update existing node
|
||||
(storage_nibbles2, None), // This is a new node
|
||||
],
|
||||
};
|
||||
|
||||
// Wiped storage trie
|
||||
let storage_trie2 = StorageTrieUpdatesSorted {
|
||||
is_deleted: true,
|
||||
storage_nodes: vec![
|
||||
(storage_nibbles1, Some(storage_node1.clone())), // Updated node already in db
|
||||
(storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in db
|
||||
* storage_nibbles3 is in db
|
||||
* but not updated */
|
||||
],
|
||||
};
|
||||
|
||||
let mut storage_tries = B256Map::default();
|
||||
storage_tries.insert(storage_address1, storage_trie1);
|
||||
storage_tries.insert(storage_address2, storage_trie2);
|
||||
|
||||
let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries);
|
||||
|
||||
// Write the changesets
|
||||
let num_written =
|
||||
provider_rw.write_trie_changesets(block_number, &trie_updates, None).unwrap();
|
||||
|
||||
// Verify number of entries written
|
||||
// Account changesets: 2 (one update, one removal)
|
||||
// Storage changesets:
|
||||
// - Normal storage: 2 (one update, one removal)
|
||||
// - Wiped storage: 3 (two updated, one existing not updated)
|
||||
// Total: 2 + 2 + 3 = 7
|
||||
assert_eq!(num_written, 7);
|
||||
|
||||
// Verify account changesets were written correctly
|
||||
{
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_read::<tables::AccountsTrieChangeSets>().unwrap();
|
||||
|
||||
// Get all entries for this block to see what was written
|
||||
let all_entries = cursor
|
||||
.walk_dup(Some(block_number), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
|
||||
// Assert the full value of all_entries in a single assert_eq
|
||||
assert_eq!(
|
||||
all_entries,
|
||||
vec![
|
||||
(
|
||||
block_number,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(account_nibbles1),
|
||||
node: Some(node1),
|
||||
}
|
||||
),
|
||||
(
|
||||
block_number,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(account_nibbles2),
|
||||
node: None,
|
||||
}
|
||||
),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
// Verify storage changesets were written correctly
|
||||
{
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_read::<tables::StoragesTrieChangeSets>().unwrap();
|
||||
|
||||
// Check normal storage trie changesets
|
||||
let key1 = BlockNumberHashedAddress((block_number, storage_address1));
|
||||
let entries1 =
|
||||
cursor.walk_dup(Some(key1), None).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
entries1,
|
||||
vec![
|
||||
(
|
||||
key1,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles1),
|
||||
node: Some(storage_node1_old), // Old value that was prepopulated
|
||||
}
|
||||
),
|
||||
(
|
||||
key1,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles2),
|
||||
node: None, // New node, no previous value
|
||||
}
|
||||
),
|
||||
]
|
||||
);
|
||||
|
||||
// Check wiped storage trie changesets
|
||||
let key2 = BlockNumberHashedAddress((block_number, storage_address2));
|
||||
let entries2 =
|
||||
cursor.walk_dup(Some(key2), None).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
entries2,
|
||||
vec![
|
||||
(
|
||||
key2,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles1),
|
||||
node: Some(storage_node1), // Was in db, so has old value
|
||||
}
|
||||
),
|
||||
(
|
||||
key2,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles2),
|
||||
node: None, // Was not in db
|
||||
}
|
||||
),
|
||||
(
|
||||
key2,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles3),
|
||||
node: Some(storage_node2), // Existing node in wiped storage
|
||||
}
|
||||
),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
provider_rw.commit().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_trie_changesets_with_overlay() {
|
||||
use reth_db_api::models::BlockNumberHashedAddress;
|
||||
use reth_trie::BranchNodeCompact;
|
||||
|
||||
let factory = create_test_provider_factory();
|
||||
let provider_rw = factory.provider_rw().unwrap();
|
||||
|
||||
let block_number = 1u64;
|
||||
|
||||
// Create some test nibbles and nodes
|
||||
let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]);
|
||||
let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]);
|
||||
|
||||
let node1 = BranchNodeCompact::new(
|
||||
0b1111_1111_1111_1111, // state_mask
|
||||
0b0000_0000_0000_0000, // tree_mask
|
||||
0b0000_0000_0000_0000, // hash_mask
|
||||
vec![], // hashes
|
||||
None, // root hash
|
||||
);
|
||||
|
||||
// NOTE: Unlike the previous test, we're NOT pre-populating the database
|
||||
// All node values will come from the overlay
|
||||
|
||||
// Create the overlay with existing values that would normally be in the DB
|
||||
let node1_old = BranchNodeCompact::new(
|
||||
0b1010_1010_1010_1010, // Different mask to show it's the overlay "existing" value
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
// Create overlay account nodes
|
||||
let overlay_account_nodes = vec![
|
||||
(account_nibbles1, Some(node1_old.clone())), // This simulates existing node in overlay
|
||||
];
|
||||
|
||||
// Create account trie updates: one Some (update) and one None (removal)
|
||||
let account_nodes = vec![
|
||||
(account_nibbles1, Some(node1)), // This will update overlay node
|
||||
(account_nibbles2, None), // This will be a removal (no existing node)
|
||||
];
|
||||
|
||||
// Create storage trie updates
|
||||
let storage_address1 = B256::from([1u8; 32]); // Normal storage trie
|
||||
let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie
|
||||
|
||||
let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]);
|
||||
let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]);
|
||||
let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]);
|
||||
|
||||
let storage_node1 = BranchNodeCompact::new(
|
||||
0b1111_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
let storage_node2 = BranchNodeCompact::new(
|
||||
0b0000_1111_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
// Create old versions for overlay
|
||||
let storage_node1_old = BranchNodeCompact::new(
|
||||
0b1010_0000_0000_0000, // Different mask to show it's an old value
|
||||
0b0000_0000_0000_0000,
|
||||
0b0000_0000_0000_0000,
|
||||
vec![],
|
||||
None,
|
||||
);
|
||||
|
||||
// Create overlay storage nodes
|
||||
let mut overlay_storage_tries = B256Map::default();
|
||||
|
||||
// Overlay for normal storage (storage_address1)
|
||||
let overlay_storage_trie1 = StorageTrieUpdatesSorted {
|
||||
is_deleted: false,
|
||||
storage_nodes: vec![
|
||||
(storage_nibbles1, Some(storage_node1_old.clone())), /* Simulates existing in
|
||||
* overlay */
|
||||
],
|
||||
};
|
||||
|
||||
// Overlay for wiped storage (storage_address2)
|
||||
let overlay_storage_trie2 = StorageTrieUpdatesSorted {
|
||||
is_deleted: false,
|
||||
storage_nodes: vec![
|
||||
(storage_nibbles1, Some(storage_node1.clone())), // Existing in overlay
|
||||
(storage_nibbles3, Some(storage_node2.clone())), // Also existing in overlay
|
||||
],
|
||||
};
|
||||
|
||||
overlay_storage_tries.insert(storage_address1, overlay_storage_trie1);
|
||||
overlay_storage_tries.insert(storage_address2, overlay_storage_trie2);
|
||||
|
||||
let overlay = TrieUpdatesSorted::new(overlay_account_nodes, overlay_storage_tries);
|
||||
|
||||
// Normal storage trie: one Some (update) and one None (new)
|
||||
let storage_trie1 = StorageTrieUpdatesSorted {
|
||||
is_deleted: false,
|
||||
storage_nodes: vec![
|
||||
(storage_nibbles1, Some(storage_node1.clone())), // This will update overlay node
|
||||
(storage_nibbles2, None), // This is a new node
|
||||
],
|
||||
};
|
||||
|
||||
// Wiped storage trie
|
||||
let storage_trie2 = StorageTrieUpdatesSorted {
|
||||
is_deleted: true,
|
||||
storage_nodes: vec![
|
||||
(storage_nibbles1, Some(storage_node1.clone())), // Updated node from overlay
|
||||
(storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in overlay
|
||||
* storage_nibbles3 is in
|
||||
* overlay
|
||||
* but not updated */
|
||||
],
|
||||
};
|
||||
|
||||
let mut storage_tries = B256Map::default();
|
||||
storage_tries.insert(storage_address1, storage_trie1);
|
||||
storage_tries.insert(storage_address2, storage_trie2);
|
||||
|
||||
let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries);
|
||||
|
||||
// Write the changesets WITH OVERLAY
|
||||
let num_written =
|
||||
provider_rw.write_trie_changesets(block_number, &trie_updates, Some(&overlay)).unwrap();
|
||||
|
||||
// Verify number of entries written
|
||||
// Account changesets: 2 (one update from overlay, one removal)
|
||||
// Storage changesets:
|
||||
// - Normal storage: 2 (one update from overlay, one new)
|
||||
// - Wiped storage: 3 (two updated, one existing from overlay not updated)
|
||||
// Total: 2 + 2 + 3 = 7
|
||||
assert_eq!(num_written, 7);
|
||||
|
||||
// Verify account changesets were written correctly
|
||||
{
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_read::<tables::AccountsTrieChangeSets>().unwrap();
|
||||
|
||||
// Get all entries for this block to see what was written
|
||||
let all_entries = cursor
|
||||
.walk_dup(Some(block_number), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
|
||||
// Assert the full value of all_entries in a single assert_eq
|
||||
assert_eq!(
|
||||
all_entries,
|
||||
vec![
|
||||
(
|
||||
block_number,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(account_nibbles1),
|
||||
node: Some(node1_old), // Value from overlay, not DB
|
||||
}
|
||||
),
|
||||
(
|
||||
block_number,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(account_nibbles2),
|
||||
node: None,
|
||||
}
|
||||
),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
// Verify storage changesets were written correctly
|
||||
{
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_read::<tables::StoragesTrieChangeSets>().unwrap();
|
||||
|
||||
// Check normal storage trie changesets
|
||||
let key1 = BlockNumberHashedAddress((block_number, storage_address1));
|
||||
let entries1 =
|
||||
cursor.walk_dup(Some(key1), None).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
entries1,
|
||||
vec![
|
||||
(
|
||||
key1,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles1),
|
||||
node: Some(storage_node1_old), // Old value from overlay
|
||||
}
|
||||
),
|
||||
(
|
||||
key1,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles2),
|
||||
node: None, // New node, no previous value
|
||||
}
|
||||
),
|
||||
]
|
||||
);
|
||||
|
||||
// Check wiped storage trie changesets
|
||||
let key2 = BlockNumberHashedAddress((block_number, storage_address2));
|
||||
let entries2 =
|
||||
cursor.walk_dup(Some(key2), None).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
entries2,
|
||||
vec![
|
||||
(
|
||||
key2,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles1),
|
||||
node: Some(storage_node1), // Value from overlay
|
||||
}
|
||||
),
|
||||
(
|
||||
key2,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles2),
|
||||
node: None, // Was not in overlay
|
||||
}
|
||||
),
|
||||
(
|
||||
key2,
|
||||
TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey(storage_nibbles3),
|
||||
node: Some(storage_node2), /* Existing node from overlay in wiped
|
||||
* storage */
|
||||
}
|
||||
),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
provider_rw.commit().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_trie_changesets_from() {
|
||||
use alloy_primitives::hex_literal::hex;
|
||||
use reth_db_api::models::BlockNumberHashedAddress;
|
||||
use reth_trie::{BranchNodeCompact, StoredNibblesSubKey, TrieChangeSetsEntry};
|
||||
|
||||
let factory = create_test_provider_factory();
|
||||
|
||||
// Create some test data for different block numbers
|
||||
let block1 = 100u64;
|
||||
let block2 = 101u64;
|
||||
let block3 = 102u64;
|
||||
let block4 = 103u64;
|
||||
let block5 = 104u64;
|
||||
|
||||
// Create test addresses for storage changesets
|
||||
let storage_address1 =
|
||||
B256::from(hex!("1111111111111111111111111111111111111111111111111111111111111111"));
|
||||
let storage_address2 =
|
||||
B256::from(hex!("2222222222222222222222222222222222222222222222222222222222222222"));
|
||||
|
||||
// Create test nibbles
|
||||
let nibbles1 = StoredNibblesSubKey(Nibbles::from_nibbles([0x1, 0x2, 0x3]));
|
||||
let nibbles2 = StoredNibblesSubKey(Nibbles::from_nibbles([0x4, 0x5, 0x6]));
|
||||
let nibbles3 = StoredNibblesSubKey(Nibbles::from_nibbles([0x7, 0x8, 0x9]));
|
||||
|
||||
// Create test nodes
|
||||
let node1 = BranchNodeCompact::new(
|
||||
0b1111_1111_1111_1111,
|
||||
0b1111_1111_1111_1111,
|
||||
0b0000_0000_0000_0001,
|
||||
vec![B256::from(hex!(
|
||||
"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
|
||||
))],
|
||||
None,
|
||||
);
|
||||
let node2 = BranchNodeCompact::new(
|
||||
0b1111_1111_1111_1110,
|
||||
0b1111_1111_1111_1110,
|
||||
0b0000_0000_0000_0010,
|
||||
vec![B256::from(hex!(
|
||||
"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
|
||||
))],
|
||||
Some(B256::from(hex!(
|
||||
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
|
||||
))),
|
||||
);
|
||||
|
||||
// Populate AccountsTrieChangeSets with data across multiple blocks
|
||||
{
|
||||
let provider_rw = factory.provider_rw().unwrap();
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_write::<tables::AccountsTrieChangeSets>().unwrap();
|
||||
|
||||
// Block 100: 2 entries (will be kept - before start block)
|
||||
cursor
|
||||
.upsert(
|
||||
block1,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None })
|
||||
.unwrap();
|
||||
|
||||
// Block 101: 3 entries with duplicates (will be deleted - from this block onwards)
|
||||
cursor
|
||||
.upsert(
|
||||
block2,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(
|
||||
block2,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) },
|
||||
)
|
||||
.unwrap(); // duplicate key
|
||||
cursor
|
||||
.upsert(block2, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None })
|
||||
.unwrap();
|
||||
|
||||
// Block 102: 2 entries (will be deleted - after start block)
|
||||
cursor
|
||||
.upsert(
|
||||
block3,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(
|
||||
block3,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node2.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Block 103: 1 entry (will be deleted - after start block)
|
||||
cursor
|
||||
.upsert(block4, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None })
|
||||
.unwrap();
|
||||
|
||||
// Block 104: 2 entries (will be deleted - after start block)
|
||||
cursor
|
||||
.upsert(
|
||||
block5,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(block5, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None })
|
||||
.unwrap();
|
||||
|
||||
provider_rw.commit().unwrap();
|
||||
}
|
||||
|
||||
// Populate StoragesTrieChangeSets with data across multiple blocks
|
||||
{
|
||||
let provider_rw = factory.provider_rw().unwrap();
|
||||
let mut cursor =
|
||||
provider_rw.tx_ref().cursor_dup_write::<tables::StoragesTrieChangeSets>().unwrap();
|
||||
|
||||
// Block 100, address1: 2 entries (will be kept - before start block)
|
||||
let key1_block1 = BlockNumberHashedAddress((block1, storage_address1));
|
||||
cursor
|
||||
.upsert(
|
||||
key1_block1,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(key1_block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None })
|
||||
.unwrap();
|
||||
|
||||
// Block 101, address1: 3 entries with duplicates (will be deleted - from this block
|
||||
// onwards)
|
||||
let key1_block2 = BlockNumberHashedAddress((block2, storage_address1));
|
||||
cursor
|
||||
.upsert(
|
||||
key1_block2,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(key1_block2, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None })
|
||||
.unwrap(); // duplicate key
|
||||
cursor
|
||||
.upsert(
|
||||
key1_block2,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Block 102, address2: 2 entries (will be deleted - after start block)
|
||||
let key2_block3 = BlockNumberHashedAddress((block3, storage_address2));
|
||||
cursor
|
||||
.upsert(
|
||||
key2_block3,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(key2_block3, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None })
|
||||
.unwrap();
|
||||
|
||||
// Block 103, address1: 2 entries with duplicate (will be deleted - after start block)
|
||||
let key1_block4 = BlockNumberHashedAddress((block4, storage_address1));
|
||||
cursor
|
||||
.upsert(
|
||||
key1_block4,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node1) },
|
||||
)
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(
|
||||
key1_block4,
|
||||
&TrieChangeSetsEntry { nibbles: nibbles3, node: Some(node2.clone()) },
|
||||
)
|
||||
.unwrap(); // duplicate key
|
||||
|
||||
// Block 104, address2: 2 entries (will be deleted - after start block)
|
||||
let key2_block5 = BlockNumberHashedAddress((block5, storage_address2));
|
||||
cursor
|
||||
.upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles1, node: None })
|
||||
.unwrap();
|
||||
cursor
|
||||
.upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles2, node: Some(node2) })
|
||||
.unwrap();
|
||||
|
||||
provider_rw.commit().unwrap();
|
||||
}
|
||||
|
||||
// Clear all changesets from block 101 onwards
|
||||
{
|
||||
let provider_rw = factory.provider_rw().unwrap();
|
||||
provider_rw.clear_trie_changesets_from(block2).unwrap();
|
||||
provider_rw.commit().unwrap();
|
||||
}
|
||||
|
||||
// Verify AccountsTrieChangeSets after clearing
|
||||
{
|
||||
let provider = factory.provider().unwrap();
|
||||
let mut cursor =
|
||||
provider.tx_ref().cursor_dup_read::<tables::AccountsTrieChangeSets>().unwrap();
|
||||
|
||||
// Block 100 should still exist (before range)
|
||||
let block1_entries = cursor
|
||||
.walk_dup(Some(block1), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert_eq!(block1_entries.len(), 2, "Block 100 entries should be preserved");
|
||||
assert_eq!(block1_entries[0].0, block1);
|
||||
assert_eq!(block1_entries[1].0, block1);
|
||||
|
||||
// Blocks 101-104 should be deleted
|
||||
let block2_entries = cursor
|
||||
.walk_dup(Some(block2), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block2_entries.is_empty(), "Block 101 entries should be deleted");
|
||||
|
||||
let block3_entries = cursor
|
||||
.walk_dup(Some(block3), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block3_entries.is_empty(), "Block 102 entries should be deleted");
|
||||
|
||||
let block4_entries = cursor
|
||||
.walk_dup(Some(block4), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block4_entries.is_empty(), "Block 103 entries should be deleted");
|
||||
|
||||
// Block 104 should also be deleted
|
||||
let block5_entries = cursor
|
||||
.walk_dup(Some(block5), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block5_entries.is_empty(), "Block 104 entries should be deleted");
|
||||
}
|
||||
|
||||
// Verify StoragesTrieChangeSets after clearing
|
||||
{
|
||||
let provider = factory.provider().unwrap();
|
||||
let mut cursor =
|
||||
provider.tx_ref().cursor_dup_read::<tables::StoragesTrieChangeSets>().unwrap();
|
||||
|
||||
// Block 100 entries should still exist (before range)
|
||||
let key1_block1 = BlockNumberHashedAddress((block1, storage_address1));
|
||||
let block1_entries = cursor
|
||||
.walk_dup(Some(key1_block1), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert_eq!(block1_entries.len(), 2, "Block 100 storage entries should be preserved");
|
||||
|
||||
// Blocks 101-104 entries should be deleted
|
||||
let key1_block2 = BlockNumberHashedAddress((block2, storage_address1));
|
||||
let block2_entries = cursor
|
||||
.walk_dup(Some(key1_block2), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block2_entries.is_empty(), "Block 101 storage entries should be deleted");
|
||||
|
||||
let key2_block3 = BlockNumberHashedAddress((block3, storage_address2));
|
||||
let block3_entries = cursor
|
||||
.walk_dup(Some(key2_block3), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block3_entries.is_empty(), "Block 102 storage entries should be deleted");
|
||||
|
||||
let key1_block4 = BlockNumberHashedAddress((block4, storage_address1));
|
||||
let block4_entries = cursor
|
||||
.walk_dup(Some(key1_block4), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block4_entries.is_empty(), "Block 103 storage entries should be deleted");
|
||||
|
||||
// Block 104 entries should also be deleted
|
||||
let key2_block5 = BlockNumberHashedAddress((block5, storage_address2));
|
||||
let block5_entries = cursor
|
||||
.walk_dup(Some(key2_block5), None)
|
||||
.unwrap()
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.unwrap();
|
||||
assert!(block5_entries.is_empty(), "Block 104 storage entries should be deleted");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_trie_updates_sorted() {
|
||||
use reth_trie::{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use alloc::vec::Vec;
|
||||
use alloy_primitives::{Address, BlockNumber, Bytes, B256};
|
||||
use alloy_primitives::{Address, Bytes, B256};
|
||||
use reth_storage_errors::provider::ProviderResult;
|
||||
use reth_trie_common::{
|
||||
updates::{StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted},
|
||||
@@ -103,32 +103,6 @@ pub trait TrieWriter: Send {
|
||||
///
|
||||
/// Returns the number of entries modified.
|
||||
fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult<usize>;
|
||||
|
||||
/// Records the current values of all trie nodes which will be updated using the [`TrieUpdates`]
|
||||
/// into the trie changesets tables.
|
||||
///
|
||||
/// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with
|
||||
/// the same [`TrieUpdates`].
|
||||
///
|
||||
/// The `updates_overlay` parameter allows providing additional in-memory trie updates that
|
||||
/// should be considered when looking up current node values. When provided, these overlay
|
||||
/// updates are applied on top of the database state, allowing the method to see a view that
|
||||
/// includes both committed database values and pending in-memory changes. This is useful
|
||||
/// when writing changesets for updates that depend on previous uncommitted trie changes.
|
||||
///
|
||||
/// Returns the number of keys written.
|
||||
fn write_trie_changesets(
|
||||
&self,
|
||||
block_number: BlockNumber,
|
||||
trie_updates: &TrieUpdatesSorted,
|
||||
updates_overlay: Option<&TrieUpdatesSorted>,
|
||||
) -> ProviderResult<usize>;
|
||||
|
||||
/// Clears contents of trie changesets completely
|
||||
fn clear_trie_changesets(&self) -> ProviderResult<()>;
|
||||
|
||||
/// Clears contents of trie changesets starting from the given block number (inclusive) onwards.
|
||||
fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()>;
|
||||
}
|
||||
|
||||
/// Storage Trie Writer
|
||||
@@ -143,25 +117,4 @@ pub trait StorageTrieWriter: Send {
|
||||
&self,
|
||||
storage_tries: impl Iterator<Item = (&'a B256, &'a StorageTrieUpdatesSorted)>,
|
||||
) -> ProviderResult<usize>;
|
||||
|
||||
/// Records the current values of all trie nodes which will be updated using the
|
||||
/// [`StorageTrieUpdatesSorted`] into the storage trie changesets table.
|
||||
///
|
||||
/// The intended usage of this method is to call it _prior_ to calling
|
||||
/// `write_storage_trie_updates` with the same set of [`StorageTrieUpdatesSorted`].
|
||||
///
|
||||
/// The `updates_overlay` parameter allows providing additional in-memory trie updates that
|
||||
/// should be considered when looking up current node values. When provided, these overlay
|
||||
/// updates are applied on top of the database state for each storage trie, allowing the
|
||||
/// method to see a view that includes both committed database values and pending in-memory
|
||||
/// changes. This is useful when writing changesets for storage updates that depend on
|
||||
/// previous uncommitted trie changes.
|
||||
///
|
||||
/// Returns the number of keys written.
|
||||
fn write_storage_trie_changesets<'a>(
|
||||
&self,
|
||||
block_number: BlockNumber,
|
||||
storage_tries: impl Iterator<Item = (&'a B256, &'a StorageTrieUpdatesSorted)>,
|
||||
updates_overlay: Option<&TrieUpdatesSorted>,
|
||||
) -> ProviderResult<usize>;
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ mod nibbles;
|
||||
pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey};
|
||||
|
||||
mod storage;
|
||||
pub use storage::{StorageTrieEntry, TrieChangeSetsEntry};
|
||||
pub use storage::StorageTrieEntry;
|
||||
|
||||
mod subnode;
|
||||
pub use subnode::StoredSubNode;
|
||||
|
||||
@@ -42,181 +42,3 @@ impl reth_codecs::Compact for StorageTrieEntry {
|
||||
(this, buf)
|
||||
}
|
||||
}
|
||||
|
||||
/// Trie changeset entry representing the state of a trie node before a block.
|
||||
///
|
||||
/// `nibbles` is the subkey when used as a value in the changeset tables.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct TrieChangeSetsEntry {
|
||||
/// The nibbles of the intermediate node
|
||||
pub nibbles: StoredNibblesSubKey,
|
||||
/// Node value prior to the block being processed, None indicating it didn't exist.
|
||||
pub node: Option<BranchNodeCompact>,
|
||||
}
|
||||
|
||||
impl ValueWithSubKey for TrieChangeSetsEntry {
|
||||
type SubKey = StoredNibblesSubKey;
|
||||
|
||||
fn get_subkey(&self) -> Self::SubKey {
|
||||
self.nibbles.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "reth-codec"))]
|
||||
impl reth_codecs::Compact for TrieChangeSetsEntry {
|
||||
fn to_compact<B>(&self, buf: &mut B) -> usize
|
||||
where
|
||||
B: bytes::BufMut + AsMut<[u8]>,
|
||||
{
|
||||
let nibbles_len = self.nibbles.to_compact(buf);
|
||||
let node_len = self.node.as_ref().map(|node| node.to_compact(buf)).unwrap_or(0);
|
||||
nibbles_len + node_len
|
||||
}
|
||||
|
||||
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
|
||||
if len == 0 {
|
||||
// Return an empty entry without trying to parse anything
|
||||
return (
|
||||
Self { nibbles: StoredNibblesSubKey::from(super::Nibbles::default()), node: None },
|
||||
buf,
|
||||
)
|
||||
}
|
||||
|
||||
let (nibbles, buf) = StoredNibblesSubKey::from_compact(buf, 65);
|
||||
|
||||
if len <= 65 {
|
||||
return (Self { nibbles, node: None }, buf)
|
||||
}
|
||||
|
||||
let (node, buf) = BranchNodeCompact::from_compact(buf, len - 65);
|
||||
(Self { nibbles, node: Some(node) }, buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bytes::BytesMut;
|
||||
use reth_codecs::Compact;
|
||||
|
||||
#[test]
|
||||
fn test_trie_changesets_entry_full_empty() {
|
||||
// Test a fully empty entry (empty nibbles, None node)
|
||||
let entry = TrieChangeSetsEntry { nibbles: StoredNibblesSubKey::from(vec![]), node: None };
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
let len = entry.to_compact(&mut buf);
|
||||
|
||||
// Empty nibbles takes 65 bytes (64 for padding + 1 for length)
|
||||
// None node adds 0 bytes
|
||||
assert_eq!(len, 65);
|
||||
assert_eq!(buf.len(), 65);
|
||||
|
||||
// Deserialize and verify
|
||||
let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len);
|
||||
assert_eq!(decoded.nibbles.0.to_vec(), Vec::<u8>::new());
|
||||
assert_eq!(decoded.node, None);
|
||||
assert_eq!(remaining.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trie_changesets_entry_none_node() {
|
||||
// Test non-empty nibbles with None node
|
||||
let nibbles_data = vec![0x01, 0x02, 0x03, 0x04];
|
||||
let entry = TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey::from(nibbles_data.clone()),
|
||||
node: None,
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
let len = entry.to_compact(&mut buf);
|
||||
|
||||
// Nibbles takes 65 bytes regardless of content
|
||||
assert_eq!(len, 65);
|
||||
|
||||
// Deserialize and verify
|
||||
let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len);
|
||||
assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data);
|
||||
assert_eq!(decoded.node, None);
|
||||
assert_eq!(remaining.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trie_changesets_entry_empty_path_with_node() {
|
||||
// Test empty path with Some node
|
||||
// Using the same signature as in the codebase: (state_mask, hash_mask, tree_mask, hashes,
|
||||
// value)
|
||||
let test_node = BranchNodeCompact::new(
|
||||
0b1111_1111_1111_1111, // state_mask: all children present
|
||||
0b1111_1111_1111_1111, // hash_mask: all have hashes
|
||||
0b0000_0000_0000_0000, // tree_mask: no embedded trees
|
||||
vec![], // hashes
|
||||
None, // value
|
||||
);
|
||||
|
||||
let entry = TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey::from(vec![]),
|
||||
node: Some(test_node.clone()),
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
let len = entry.to_compact(&mut buf);
|
||||
|
||||
// Calculate expected length
|
||||
let mut temp_buf = BytesMut::new();
|
||||
let node_len = test_node.to_compact(&mut temp_buf);
|
||||
assert_eq!(len, 65 + node_len);
|
||||
|
||||
// Deserialize and verify
|
||||
let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len);
|
||||
assert_eq!(decoded.nibbles.0.to_vec(), Vec::<u8>::new());
|
||||
assert_eq!(decoded.node, Some(test_node));
|
||||
assert_eq!(remaining.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trie_changesets_entry_normal() {
|
||||
// Test normal case: non-empty path with Some node
|
||||
let nibbles_data = vec![0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f];
|
||||
// Using the same signature as in the codebase
|
||||
let test_node = BranchNodeCompact::new(
|
||||
0b0000_0000_1111_0000, // state_mask: some children present
|
||||
0b0000_0000_0011_0000, // hash_mask: some have hashes
|
||||
0b0000_0000_0000_0000, // tree_mask: no embedded trees
|
||||
vec![], // hashes (empty for this test)
|
||||
None, // value
|
||||
);
|
||||
|
||||
let entry = TrieChangeSetsEntry {
|
||||
nibbles: StoredNibblesSubKey::from(nibbles_data.clone()),
|
||||
node: Some(test_node.clone()),
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
let len = entry.to_compact(&mut buf);
|
||||
|
||||
// Verify serialization length
|
||||
let mut temp_buf = BytesMut::new();
|
||||
let node_len = test_node.to_compact(&mut temp_buf);
|
||||
assert_eq!(len, 65 + node_len);
|
||||
|
||||
// Deserialize and verify
|
||||
let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len);
|
||||
assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data);
|
||||
assert_eq!(decoded.node, Some(test_node));
|
||||
assert_eq!(remaining.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trie_changesets_entry_from_compact_zero_len() {
|
||||
// Test from_compact with zero length
|
||||
let buf = vec![0x01, 0x02, 0x03];
|
||||
let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, 0);
|
||||
|
||||
// Should return empty nibbles and None node
|
||||
assert_eq!(decoded.nibbles.0.to_vec(), Vec::<u8>::new());
|
||||
assert_eq!(decoded.node, None);
|
||||
assert_eq!(remaining, &buf[..]); // Buffer should be unchanged
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,8 +58,6 @@ There are many tables within the node, all used to store different types of data
|
||||
- HashedStorages
|
||||
- AccountsTrie
|
||||
- StoragesTrie
|
||||
- AccountsTrieChangeSets
|
||||
- StoragesTrieChangeSets
|
||||
- TransactionSenders
|
||||
- StageCheckpoints
|
||||
- StageCheckpointProgresses
|
||||
|
||||
@@ -12,7 +12,6 @@ The `stages` lib plays a central role in syncing the node, maintaining state, up
|
||||
- AccountHashingStage
|
||||
- StorageHashingStage
|
||||
- MerkleStage (execute)
|
||||
- MerkleChangeSets
|
||||
- TransactionLookupStage
|
||||
- IndexStorageHistoryStage
|
||||
- IndexAccountHistoryStage
|
||||
@@ -114,12 +113,6 @@ The `StorageHashingStage` is responsible for computing hashes of contract storag
|
||||
|
||||
<br>
|
||||
|
||||
## MerkleChangeSets
|
||||
|
||||
The `MerkleChangeSets` stage consolidates and finalizes Merkle-related change sets after the `MerkleStage` execution mode has run, ensuring consistent trie updates and checkpoints.
|
||||
|
||||
<br>
|
||||
|
||||
## TransactionLookupStage
|
||||
|
||||
The `TransactionLookupStage` builds and maintains transaction lookup indices. These indices enable efficient querying of transactions by hash or block position. This stage is crucial for RPC functionality, allowing users to quickly retrieve transaction information without scanning the entire blockchain.
|
||||
|
||||
@@ -434,11 +434,6 @@ storage_history = { distance = 100_000 } # Prune all historical storage states b
|
||||
|
||||
# Bodies History pruning configuration
|
||||
bodies_history = { distance = 100_000 } # Prune all historical block bodies before the block `head-100000`
|
||||
|
||||
# Merkle Changesets pruning configuration
|
||||
# Controls pruning of AccountsTrieChangeSets and StoragesTrieChangeSets.
|
||||
# Default: { distance = 128 } - keeps the last 128 blocks of merkle changesets
|
||||
merkle_changesets = { distance = 128 }
|
||||
```
|
||||
|
||||
We can also prune receipts more granular, using the logs filtering:
|
||||
|
||||
Reference in New Issue
Block a user