feat(trie): add trie-debug feature for recording sparse trie mutations (#22234)

Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Brian Picciano
2026-02-17 12:59:11 +01:00
committed by GitHub
parent 117b212e2e
commit 8db352dfd2
14 changed files with 388 additions and 7 deletions

View File

@@ -0,0 +1,8 @@
---
reth: patch
reth-engine-tree: patch
reth-node-builder: patch
reth-trie-sparse: minor
---
Added `trie-debug` feature for recording sparse trie mutations to aid in debugging state root mismatches.

2
Cargo.lock generated
View File

@@ -10631,6 +10631,8 @@ dependencies = [
"reth-trie",
"reth-trie-common",
"reth-trie-db",
"serde",
"serde_json",
"smallvec",
"tracing",
]

View File

@@ -190,6 +190,7 @@ min-trace-logs = [
"reth-node-core/min-trace-logs",
]
trie-debug = ["reth-node-builder/trie-debug", "reth-node-core/trie-debug"]
rocksdb = ["reth-ethereum-cli/rocksdb", "reth-node-core/rocksdb"]
edge = ["rocksdb"]

View File

@@ -73,6 +73,7 @@ reth-prune-types = { workspace = true, optional = true }
reth-stages = { workspace = true, optional = true }
reth-static-file = { workspace = true, optional = true }
reth-tracing = { workspace = true, optional = true }
serde_json = { workspace = true, optional = true }
[dev-dependencies]
# reth
@@ -143,6 +144,7 @@ test-utils = [
"reth-evm-ethereum/test-utils",
"reth-tasks/test-utils",
]
trie-debug = ["reth-trie-sparse/trie-debug", "dep:serde_json"]
rocksdb = [
"reth-provider/rocksdb",
"reth-prune/rocksdb",

View File

@@ -25,6 +25,8 @@ use reth_trie_parallel::{
root::ParallelStateRootError,
targets_v2::MultiProofTargetsV2,
};
#[cfg(feature = "trie-debug")]
use reth_trie_sparse::debug_recorder::TrieDebugRecorder;
use reth_trie_sparse::{
errors::{SparseStateTrieResult, SparseTrieErrorKind, SparseTrieResult},
provider::{TrieNodeProvider, TrieNodeProviderFactory},
@@ -183,11 +185,19 @@ where
ParallelStateRootError::Other(format!("could not calculate state root: {e:?}"))
})?;
#[cfg(feature = "trie-debug")]
let debug_recorders = self.trie.take_debug_recorders();
let end = Instant::now();
self.metrics.sparse_trie_final_update_duration_histogram.record(end.duration_since(start));
self.metrics.sparse_trie_total_duration_histogram.record(end.duration_since(now));
Ok(StateRootComputeOutcome { state_root, trie_updates })
Ok(StateRootComputeOutcome {
state_root,
trie_updates,
#[cfg(feature = "trie-debug")]
debug_recorders,
})
}
/// Clears and shrinks the trie, discarding all state.
@@ -475,11 +485,19 @@ where
ParallelStateRootError::Other(format!("could not calculate state root: {e:?}"))
})?;
#[cfg(feature = "trie-debug")]
let debug_recorders = self.trie.take_debug_recorders();
let end = Instant::now();
self.metrics.sparse_trie_final_update_duration_histogram.record(end.duration_since(start));
self.metrics.sparse_trie_total_duration_histogram.record(end.duration_since(now));
Ok(StateRootComputeOutcome { state_root, trie_updates })
Ok(StateRootComputeOutcome {
state_root,
trie_updates,
#[cfg(feature = "trie-debug")]
debug_recorders,
})
}
/// Processes a [`SparseTrieTaskMessage`] from the hashing task.
@@ -891,6 +909,10 @@ pub struct StateRootComputeOutcome {
pub state_root: B256,
/// The trie updates.
pub trie_updates: TrieUpdates,
/// Debug recorders taken from the sparse tries, keyed by `None` for account trie
/// and `Some(address)` for storage tries.
#[cfg(feature = "trie-debug")]
pub debug_recorders: Vec<(Option<B256>, TrieDebugRecorder)>,
}
/// Updates the sparse trie with the given proofs and state, and returns the elapsed time.

View File

@@ -15,6 +15,8 @@ use alloy_eip7928::BlockAccessList;
use alloy_eips::{eip1898::BlockWithParent, eip4895::Withdrawal, NumHash};
use alloy_evm::Evm;
use alloy_primitives::B256;
#[cfg(feature = "trie-debug")]
use reth_trie_sparse::debug_recorder::TrieDebugRecorder;
use crate::tree::payload_processor::receipt_root_task::{IndexedReceipt, ReceiptRootTaskHandle};
use reth_chain_state::{CanonicalInMemoryState, DeferredTrieData, ExecutedBlock, LazyOverlay};
@@ -532,6 +534,8 @@ where
let root_time = Instant::now();
let mut maybe_state_root = None;
let mut state_root_task_failed = false;
#[cfg(feature = "trie-debug")]
let mut trie_debug_recorders = Vec::new();
match strategy {
StateRootStrategy::StateRootTask => {
@@ -547,17 +551,34 @@ where
);
match task_result {
Ok(StateRootComputeOutcome { state_root, trie_updates }) => {
Ok(StateRootComputeOutcome {
state_root,
trie_updates,
#[cfg(feature = "trie-debug")]
debug_recorders,
}) => {
let elapsed = root_time.elapsed();
info!(target: "engine::tree::payload_validator", ?state_root, ?elapsed, "State root task finished");
#[cfg(feature = "trie-debug")]
{
trie_debug_recorders = debug_recorders;
}
// Compare trie updates with serial computation if configured
if self.config.always_compare_trie_updates() {
self.compare_trie_updates_with_serial(
let _has_diff = self.compare_trie_updates_with_serial(
overlay_factory.clone(),
&hashed_state,
trie_updates.clone(),
);
#[cfg(feature = "trie-debug")]
if _has_diff {
Self::write_trie_debug_recorders(
block.header().number(),
&trie_debug_recorders,
);
}
}
// we double check the state root here for good measure
@@ -570,6 +591,11 @@ where
block_state_root = ?block.header().state_root(),
"State root task returned incorrect state root"
);
#[cfg(feature = "trie-debug")]
Self::write_trie_debug_recorders(
block.header().number(),
&trie_debug_recorders,
);
state_root_task_failed = true;
}
}
@@ -635,6 +661,9 @@ where
// ensure state root matches
if state_root != block.header().state_root() {
#[cfg(feature = "trie-debug")]
Self::write_trie_debug_recorders(block.header().number(), &trie_debug_recorders);
// call post-block hook
self.on_invalid_block(
&parent_block,
@@ -1007,7 +1036,12 @@ where
))
})?;
let (state_root, trie_updates) = result?;
return Ok(Ok(StateRootComputeOutcome { state_root, trie_updates }));
return Ok(Ok(StateRootComputeOutcome {
state_root,
trie_updates,
#[cfg(feature = "trie-debug")]
debug_recorders: Vec::new(),
}));
}
Err(RecvTimeoutError::Timeout) => {}
}
@@ -1019,7 +1053,12 @@ where
"State root timeout race won"
);
let (state_root, trie_updates) = result?;
return Ok(Ok(StateRootComputeOutcome { state_root, trie_updates }));
return Ok(Ok(StateRootComputeOutcome {
state_root,
trie_updates,
#[cfg(feature = "trie-debug")]
debug_recorders: Vec::new(),
}));
}
}
}
@@ -1037,7 +1076,7 @@ where
overlay_factory: OverlayStateProviderFactory<P>,
hashed_state: &HashedPostState,
task_trie_updates: TrieUpdates,
) {
) -> bool {
debug!(target: "engine::tree::payload_validator", "Comparing trie updates with serial computation");
match Self::compute_state_root_serial(overlay_factory.clone(), hashed_state) {
@@ -1061,6 +1100,7 @@ where
%err,
"Error comparing trie updates"
);
return true;
}
}
Err(err) => {
@@ -1080,6 +1120,45 @@ where
);
}
}
false
}
/// Writes trie debug recorders to a JSON file for the given block number.
///
/// The file is written to the current working directory as
/// `trie_debug_block_{block_number}.json`.
#[cfg(feature = "trie-debug")]
fn write_trie_debug_recorders(
block_number: u64,
recorders: &[(Option<B256>, TrieDebugRecorder)],
) {
let path = format!("trie_debug_block_{block_number}.json");
match serde_json::to_string_pretty(recorders) {
Ok(json) => match std::fs::write(&path, json) {
Ok(()) => {
warn!(
target: "engine::tree::payload_validator",
%path,
"Wrote trie debug recorders to file"
);
}
Err(err) => {
warn!(
target: "engine::tree::payload_validator",
%err,
%path,
"Failed to write trie debug recorders"
);
}
},
Err(err) => {
warn!(
target: "engine::tree::payload_validator",
%err,
"Failed to serialize trie debug recorders"
);
}
}
}
/// Validates the block after execution.

View File

@@ -121,6 +121,7 @@ test-utils = [
"reth-primitives-traits/test-utils",
"reth-tasks/test-utils",
]
trie-debug = ["reth-engine-tree/trie-debug"]
op = [
"reth-db/op",
"reth-db-api/op",

View File

@@ -90,6 +90,9 @@ min-info-logs = ["tracing/release_max_level_info"]
min-debug-logs = ["tracing/release_max_level_debug"]
min-trace-logs = ["tracing/release_max_level_trace"]
# Debug recording for sparse trie mutations
trie-debug = []
# Route supported tables to RocksDB instead of MDBX
rocksdb = ["reth-storage-api/rocksdb"]

View File

@@ -26,6 +26,8 @@ alloy-rlp.workspace = true
# misc
auto_impl.workspace = true
rayon = { workspace = true, optional = true }
serde = { workspace = true, features = ["derive"], optional = true }
serde_json = { workspace = true, optional = true }
smallvec.workspace = true
# metrics
@@ -63,8 +65,11 @@ std = [
"reth-storage-api/std",
"reth-trie-common/std",
"tracing/std",
"serde?/std",
"serde_json?/std",
]
metrics = ["dep:reth-metrics", "dep:metrics", "std"]
trie-debug = ["std", "dep:serde", "dep:serde_json", "alloy-primitives/serde", "alloy-trie/serde"]
test-utils = [
"std",
"reth-primitives-traits/test-utils",

View File

@@ -0,0 +1,173 @@
//! Debug recorder for tracking mutating operations on sparse tries.
//!
//! This module is only available with the `trie-debug` feature and provides
//! infrastructure for recording all mutations to a [`crate::ParallelSparseTrie`]
//! for post-hoc debugging of state root mismatches.
use alloc::{string::String, vec::Vec};
use alloy_primitives::{hex, Bytes, B256};
use alloy_trie::nodes::TrieNode;
use reth_trie_common::Nibbles;
use serde::Serialize;
/// Records mutating operations performed on a sparse trie in the order they occurred.
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize)]
pub struct TrieDebugRecorder {
ops: Vec<RecordedOp>,
}
impl TrieDebugRecorder {
/// Creates a new empty recorder.
pub fn new() -> Self {
Self::default()
}
/// Clears all recorded operations.
pub fn reset(&mut self) {
self.ops.clear();
}
/// Records a single operation.
pub fn record(&mut self, op: RecordedOp) {
self.ops.push(op);
}
/// Returns a reference to the recorded operations.
pub fn ops(&self) -> &[RecordedOp] {
&self.ops
}
/// Takes and returns the recorded operations, leaving the recorder empty.
pub fn take_ops(&mut self) -> Vec<RecordedOp> {
core::mem::take(&mut self.ops)
}
/// Returns `true` if no operations have been recorded.
pub const fn is_empty(&self) -> bool {
self.ops.is_empty()
}
}
/// A mutating operation recorded from a sparse trie.
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum RecordedOp {
/// Records a `reveal_nodes` call with the nodes that were revealed.
RevealNodes {
/// The proof trie nodes that were revealed.
nodes: Vec<ProofTrieNodeRecord>,
},
/// Records an `update_leaves` call with the leaf updates.
UpdateLeaves {
/// The leaf updates that were applied.
updates: Vec<(B256, LeafUpdateRecord)>,
/// Keys remaining in the updates map after the call (i.e. those that could not be applied
/// due to blinded nodes).
remaining_keys: Vec<B256>,
/// Proof targets returned via the callback as `(key, min_len)` pairs.
proof_targets: Vec<(B256, u8)>,
},
/// Records an `update_subtrie_hashes` call.
UpdateSubtrieHashes,
/// Records a `root()` call.
Root,
}
/// A serializable record of a proof trie node.
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub struct ProofTrieNodeRecord {
/// The nibble path of the node.
pub path: Nibbles,
/// The trie node.
pub node: TrieNodeRecord,
/// The branch node masks `(hash_mask, tree_mask)` stored as raw `u16` values, if present.
pub masks: Option<(u16, u16)>,
}
impl ProofTrieNodeRecord {
/// Creates a record from a [`reth_trie_common::ProofTrieNode`].
pub fn from_proof_trie_node(node: &reth_trie_common::ProofTrieNode) -> Self {
Self {
path: node.path,
node: TrieNodeRecord(node.node.clone()),
masks: node.masks.map(|masks| (masks.hash_mask.get(), masks.tree_mask.get())),
}
}
/// Creates a record from a [`reth_trie_common::ProofTrieNodeV2`].
pub fn from_proof_trie_node_v2(node: &reth_trie_common::ProofTrieNodeV2) -> Self {
use reth_trie_common::TrieNodeV2;
let trie_node = match &node.node {
TrieNodeV2::EmptyRoot => TrieNode::EmptyRoot,
TrieNodeV2::Leaf(leaf) => TrieNode::Leaf(leaf.clone()),
TrieNodeV2::Extension(ext) => TrieNode::Extension(ext.clone()),
TrieNodeV2::Branch(branch) => TrieNode::Branch(alloy_trie::nodes::BranchNode::new(
branch.stack.clone(),
branch.state_mask,
)),
};
Self {
path: node.path,
node: TrieNodeRecord(trie_node),
masks: node.masks.map(|masks| (masks.hash_mask.get(), masks.tree_mask.get())),
}
}
}
/// A newtype wrapper around [`TrieNode`] with custom serialization that hex-encodes byte fields
/// (leaf values, branch stack entries, extension child pointers) instead of serializing them as
/// raw integer arrays.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TrieNodeRecord(pub TrieNode);
impl From<TrieNode> for TrieNodeRecord {
fn from(node: TrieNode) -> Self {
Self(node)
}
}
impl Serialize for TrieNodeRecord {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
use serde::ser::SerializeStructVariant;
match &self.0 {
TrieNode::EmptyRoot => serializer.serialize_unit_variant("TrieNode", 0, "EmptyRoot"),
TrieNode::Branch(branch) => {
let stack_hex: Vec<String> =
branch.stack.iter().map(|n| hex::encode(n.as_ref())).collect();
let mut sv = serializer.serialize_struct_variant("TrieNode", 1, "Branch", 2)?;
sv.serialize_field("stack", &stack_hex)?;
sv.serialize_field("state_mask", &branch.state_mask.get())?;
sv.end()
}
TrieNode::Extension(ext) => {
let mut sv = serializer.serialize_struct_variant("TrieNode", 2, "Extension", 2)?;
sv.serialize_field("key", &ext.key)?;
sv.serialize_field("child", &hex::encode(ext.child.as_ref()))?;
sv.end()
}
TrieNode::Leaf(leaf) => {
let mut sv = serializer.serialize_struct_variant("TrieNode", 3, "Leaf", 2)?;
sv.serialize_field("key", &leaf.key)?;
sv.serialize_field("value", &hex::encode(&leaf.value))?;
sv.end()
}
}
}
}
/// A serializable record of a leaf update, mirroring [`crate::LeafUpdate`].
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum LeafUpdateRecord {
/// The leaf value was changed to the given RLP-encoded value.
Changed(Bytes),
/// The leaf value was touched but the new value is not yet known.
Touched,
}
impl From<&crate::LeafUpdate> for LeafUpdateRecord {
fn from(update: &crate::LeafUpdate) -> Self {
match update {
crate::LeafUpdate::Changed(value) => Self::Changed(value.clone().into()),
crate::LeafUpdate::Touched => Self::Touched,
}
}
}

View File

@@ -24,6 +24,11 @@ pub mod provider;
#[cfg(feature = "metrics")]
mod metrics;
#[cfg(feature = "trie-debug")]
pub mod debug_recorder;
#[cfg(feature = "trie-debug")]
use serde_json as _;
/// Re-export sparse trie error types.
pub mod errors {
pub use reth_execution_errors::{

View File

@@ -1,3 +1,5 @@
#[cfg(feature = "trie-debug")]
use crate::debug_recorder::{LeafUpdateRecord, ProofTrieNodeRecord, RecordedOp, TrieDebugRecorder};
use crate::{
lower::LowerSparseSubtrie,
provider::{RevealedNode, TrieNodeProvider},
@@ -131,6 +133,9 @@ pub struct ParallelSparseTrie {
/// Metrics for the parallel sparse trie.
#[cfg(feature = "metrics")]
metrics: crate::metrics::ParallelSparseTrieMetrics,
/// Debug recorder for tracking mutating operations.
#[cfg(feature = "trie-debug")]
debug_recorder: TrieDebugRecorder,
}
impl Default for ParallelSparseTrie {
@@ -151,6 +156,8 @@ impl Default for ParallelSparseTrie {
subtrie_heat: SubtrieModifications::default(),
#[cfg(feature = "metrics")]
metrics: Default::default(),
#[cfg(feature = "trie-debug")]
debug_recorder: Default::default(),
}
}
}
@@ -182,6 +189,11 @@ impl SparseTrie for ParallelSparseTrie {
return Ok(())
}
#[cfg(feature = "trie-debug")]
self.debug_recorder.record(RecordedOp::RevealNodes {
nodes: nodes.iter().map(ProofTrieNodeRecord::from_proof_trie_node_v2).collect(),
});
// Sort nodes first by their subtrie, and secondarily by their path. This allows for
// grouping nodes by their subtrie using `chunk_by`.
nodes.sort_unstable_by(
@@ -812,6 +824,9 @@ impl SparseTrie for ParallelSparseTrie {
fn root(&mut self) -> B256 {
trace!(target: "trie::parallel_sparse", "Calculating trie root hash");
#[cfg(feature = "trie-debug")]
self.debug_recorder.record(RecordedOp::Root);
if self.prefix_set.is_empty() &&
let Some(hash) =
self.upper_subtrie.nodes.get(&Nibbles::default()).and_then(|node| node.hash())
@@ -843,6 +858,9 @@ impl SparseTrie for ParallelSparseTrie {
fn update_subtrie_hashes(&mut self) {
trace!(target: "trie::parallel_sparse", "Updating subtrie hashes");
#[cfg(feature = "trie-debug")]
self.debug_recorder.record(RecordedOp::UpdateSubtrieHashes);
// Take changed subtries according to the prefix set
let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze();
let num_changed_keys = prefix_set.len();
@@ -959,6 +977,8 @@ impl SparseTrie for ParallelSparseTrie {
self.updates = None;
self.branch_node_masks.clear();
self.subtrie_heat.clear();
#[cfg(feature = "trie-debug")]
self.debug_recorder.reset();
// `update_actions_buffers` doesn't need to be cleared; we want to reuse the Vecs it has
// buffered, and all of those are already inherently cleared when they get used.
}
@@ -1073,6 +1093,9 @@ impl SparseTrie for ParallelSparseTrie {
}
fn prune(&mut self, max_depth: usize) -> usize {
#[cfg(feature = "trie-debug")]
self.debug_recorder.reset();
// Decay heat for subtries not modified this cycle
self.subtrie_heat.decay_and_reset();
@@ -1243,6 +1266,12 @@ impl SparseTrie for ParallelSparseTrie {
) -> SparseTrieResult<()> {
use crate::{provider::NoRevealProvider, LeafUpdate};
#[cfg(feature = "trie-debug")]
let recorded_updates: Vec<_> =
updates.iter().map(|(k, v)| (*k, LeafUpdateRecord::from(v))).collect();
#[cfg(feature = "trie-debug")]
let mut recorded_proof_targets: Vec<(B256, u8)> = Vec::new();
// Drain updates to avoid cloning keys while preserving the map's allocation.
// On success, entries remain removed; on blinded node failure, they're re-inserted.
let drained: Vec<_> = updates.drain().collect();
@@ -1262,6 +1291,8 @@ impl SparseTrie for ParallelSparseTrie {
let (target_key, min_len) =
Self::proof_target_for_path(key, &full_path, &path);
proof_required_fn(target_key, min_len);
#[cfg(feature = "trie-debug")]
recorded_proof_targets.push((target_key, min_len));
updates.insert(key, LeafUpdate::Changed(value));
} else {
return Err(e);
@@ -1276,6 +1307,8 @@ impl SparseTrie for ParallelSparseTrie {
let (target_key, min_len) =
Self::proof_target_for_path(key, &full_path, &path);
proof_required_fn(target_key, min_len);
#[cfg(feature = "trie-debug")]
recorded_proof_targets.push((target_key, min_len));
updates.insert(key, LeafUpdate::Changed(value));
} else {
return Err(e);
@@ -1290,6 +1323,8 @@ impl SparseTrie for ParallelSparseTrie {
let (target_key, min_len) =
Self::proof_target_for_path(key, &full_path, &path);
proof_required_fn(target_key, min_len);
#[cfg(feature = "trie-debug")]
recorded_proof_targets.push((target_key, min_len));
updates.insert(key, LeafUpdate::Touched);
}
// Path is fully revealed (exists or proven non-existent), no action needed.
@@ -1299,8 +1334,20 @@ impl SparseTrie for ParallelSparseTrie {
}
}
#[cfg(feature = "trie-debug")]
self.debug_recorder.record(RecordedOp::UpdateLeaves {
updates: recorded_updates,
remaining_keys: updates.keys().copied().collect(),
proof_targets: recorded_proof_targets,
});
Ok(())
}
#[cfg(feature = "trie-debug")]
fn take_debug_recorder(&mut self) -> TrieDebugRecorder {
core::mem::take(&mut self.debug_recorder)
}
}
impl ParallelSparseTrie {

View File

@@ -1,3 +1,5 @@
#[cfg(feature = "trie-debug")]
use crate::debug_recorder::TrieDebugRecorder;
use crate::{
provider::{TrieNodeProvider, TrieNodeProviderFactory},
traits::SparseTrie as SparseTrieTrait,
@@ -149,6 +151,26 @@ impl SparseStateTrie {
}
}
impl<A: SparseTrieTrait, S: SparseTrieTrait> SparseStateTrie<A, S> {
/// Takes all debug recorders from the account trie and all revealed storage tries.
///
/// Returns a vec of `(Option<B256>, TrieDebugRecorder)` where `None` is the account trie
/// key, and `Some(address)` are storage trie keys.
#[cfg(feature = "trie-debug")]
pub fn take_debug_recorders(&mut self) -> alloc::vec::Vec<(Option<B256>, TrieDebugRecorder)> {
let mut recorders = alloc::vec::Vec::new();
if let Some(trie) = self.state.as_revealed_mut() {
recorders.push((None, trie.take_debug_recorder()));
}
for (address, trie) in &mut self.storage.tries {
if let Some(trie) = trie.as_revealed_mut() {
recorders.push((Some(*address), trie.take_debug_recorder()));
}
}
recorders
}
}
impl<A, S> SparseStateTrie<A, S>
where
A: SparseTrieTrait + Default,

View File

@@ -11,6 +11,8 @@ use alloy_trie::BranchNodeCompact;
use reth_execution_errors::SparseTrieResult;
use reth_trie_common::{BranchNodeMasks, Nibbles, ProofTrieNodeV2, TrieNodeV2};
#[cfg(feature = "trie-debug")]
use crate::debug_recorder::TrieDebugRecorder;
use crate::provider::TrieNodeProvider;
/// Describes an update to a leaf in the sparse trie.
@@ -306,6 +308,15 @@ pub trait SparseTrie: Sized + Debug + Send + Sync {
/// The number of nodes converted to hash stubs.
fn prune(&mut self, max_depth: usize) -> usize;
/// Takes the debug recorder out of this trie, replacing it with an empty one.
///
/// Returns the recorder containing all recorded mutations since the last reset.
/// The default implementation returns an empty recorder.
#[cfg(feature = "trie-debug")]
fn take_debug_recorder(&mut self) -> TrieDebugRecorder {
TrieDebugRecorder::default()
}
/// Applies leaf updates to the sparse trie.
///
/// When a [`LeafUpdate::Changed`] is successfully applied, it is removed from the