chore: rename extend_ref methods on sorted data structures (#21043)

This commit is contained in:
Dan Cline
2026-01-19 13:04:57 +00:00
committed by GitHub
parent f7460e219c
commit 1d55abeef3
9 changed files with 32 additions and 26 deletions

View File

@@ -192,10 +192,10 @@ impl DeferredTrieData {
);
// Only trigger COW clone if there's actually data to add.
if !sorted_hashed_state.is_empty() {
Arc::make_mut(&mut overlay.state).extend_ref(&sorted_hashed_state);
Arc::make_mut(&mut overlay.state).extend_ref_and_sort(&sorted_hashed_state);
}
if !sorted_trie_updates.is_empty() {
Arc::make_mut(&mut overlay.nodes).extend_ref(&sorted_trie_updates);
Arc::make_mut(&mut overlay.nodes).extend_ref_and_sort(&sorted_trie_updates);
}
overlay
}
@@ -242,13 +242,13 @@ impl DeferredTrieData {
for ancestor in ancestors {
let ancestor_data = ancestor.wait_cloned();
state_mut.extend_ref(ancestor_data.hashed_state.as_ref());
nodes_mut.extend_ref(ancestor_data.trie_updates.as_ref());
state_mut.extend_ref_and_sort(ancestor_data.hashed_state.as_ref());
nodes_mut.extend_ref_and_sort(ancestor_data.trie_updates.as_ref());
}
// Extend with current block's sorted data last (takes precedence)
state_mut.extend_ref(sorted_hashed_state);
nodes_mut.extend_ref(sorted_trie_updates);
state_mut.extend_ref_and_sort(sorted_hashed_state);
nodes_mut.extend_ref_and_sort(sorted_trie_updates);
overlay
}
@@ -521,7 +521,7 @@ mod tests {
let hashed_state = Arc::new(HashedPostStateSorted::new(accounts, B256Map::default()));
let trie_updates = Arc::default();
let mut overlay = TrieInputSorted::default();
Arc::make_mut(&mut overlay.state).extend_ref(hashed_state.as_ref());
Arc::make_mut(&mut overlay.state).extend_ref_and_sort(hashed_state.as_ref());
DeferredTrieData::ready(ComputedTrieData {
hashed_state,

View File

@@ -155,8 +155,8 @@ impl LazyOverlay {
for block in blocks_iter {
let block_data = block.wait_cloned();
Arc::make_mut(&mut state).extend_ref(block_data.hashed_state.as_ref());
Arc::make_mut(&mut nodes).extend_ref(block_data.trie_updates.as_ref());
Arc::make_mut(&mut state).extend_ref_and_sort(block_data.hashed_state.as_ref());
Arc::make_mut(&mut nodes).extend_ref_and_sort(block_data.trie_updates.as_ref());
}
TrieInputSorted { state, nodes, prefix_sets: Default::default() }

View File

@@ -217,7 +217,7 @@ impl MerkleChangeSets {
let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostStateSorted {
let mut cumulative_revert = HashedPostStateSorted::default();
for n in (block_number..target_end).rev() {
cumulative_revert.extend_ref(get_block_state_revert(n))
cumulative_revert.extend_ref_and_sort(get_block_state_revert(n))
}
cumulative_revert
};
@@ -270,7 +270,7 @@ impl MerkleChangeSets {
let trie_overlay = Arc::clone(&nodes);
let mut nodes_mut = Arc::unwrap_or_clone(nodes);
nodes_mut.extend_ref(&this_trie_updates);
nodes_mut.extend_ref_and_sort(&this_trie_updates);
nodes = Arc::new(nodes_mut);
// Write the changesets to the DB using the trie updates produced by the block, and the

View File

@@ -582,7 +582,8 @@ impl<TX: DbTx + DbTxMut + 'static, N: NodeTypesForProvider> DatabaseProvider<TX,
let mut result = blocks_iter.next().expect("non-empty").trie_updates();
for block in blocks_iter {
Arc::make_mut(&mut result).extend_ref(block.trie_updates().as_ref());
Arc::make_mut(&mut result)
.extend_ref_and_sort(block.trie_updates().as_ref());
}
match Arc::try_unwrap(result) {

View File

@@ -291,7 +291,7 @@ impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StateRootProvider
fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult<B256> {
let mut revert_state = self.revert_state()?;
let hashed_state_sorted = hashed_state.into_sorted();
revert_state.extend_ref(&hashed_state_sorted);
revert_state.extend_ref_and_sort(&hashed_state_sorted);
Ok(StateRoot::overlay_root(self.tx(), &revert_state)?)
}
@@ -306,7 +306,7 @@ impl<Provider: DBProvider + ChangeSetReader + BlockNumReader> StateRootProvider
) -> ProviderResult<(B256, TrieUpdates)> {
let mut revert_state = self.revert_state()?;
let hashed_state_sorted = hashed_state.into_sorted();
revert_state.extend_ref(&hashed_state_sorted);
revert_state.extend_ref_and_sort(&hashed_state_sorted);
Ok(StateRoot::overlay_root_with_updates(self.tx(), &revert_state)?)
}

View File

@@ -163,12 +163,12 @@ impl<F> OverlayStateProviderFactory<F> {
pub fn with_extended_hashed_state_overlay(mut self, other: HashedPostStateSorted) -> Self {
match &mut self.overlay_source {
Some(OverlaySource::Immediate { state, .. }) => {
Arc::make_mut(state).extend_ref(&other);
Arc::make_mut(state).extend_ref_and_sort(&other);
}
Some(OverlaySource::Lazy(lazy)) => {
// Resolve lazy overlay and convert to immediate with extension
let (trie, mut state) = lazy.as_overlay();
Arc::make_mut(&mut state).extend_ref(&other);
Arc::make_mut(&mut state).extend_ref_and_sort(&other);
self.overlay_source = Some(OverlaySource::Immediate { trie, state });
}
None => {
@@ -342,7 +342,7 @@ where
let trie_updates = if trie_reverts.is_empty() {
overlay_trie
} else if !overlay_trie.is_empty() {
trie_reverts.extend_ref(&overlay_trie);
trie_reverts.extend_ref_and_sort(&overlay_trie);
Arc::new(trie_reverts)
} else {
Arc::new(trie_reverts)
@@ -351,7 +351,7 @@ where
let hashed_state_updates = if hashed_state_reverts.is_empty() {
overlay_state
} else if !overlay_state.is_empty() {
hashed_state_reverts.extend_ref(&overlay_state);
hashed_state_reverts.extend_ref_and_sort(&overlay_state);
Arc::new(hashed_state_reverts)
} else {
Arc::new(hashed_state_reverts)

View File

@@ -621,7 +621,9 @@ impl HashedPostStateSorted {
/// Extends this state with contents of another sorted state.
/// Entries in `other` take precedence for duplicate keys.
pub fn extend_ref(&mut self, other: &Self) {
///
/// Sorts the accounts after extending. Sorts the storage after extending, for each account.
pub fn extend_ref_and_sort(&mut self, other: &Self) {
// Extend accounts
extend_sorted_vec(&mut self.accounts, &other.accounts);
@@ -1416,7 +1418,7 @@ mod tests {
storages: B256Map::default(),
};
state1.extend_ref(&state2);
state1.extend_ref_and_sort(&state2);
// Check accounts are merged and sorted
assert_eq!(state1.accounts.len(), 6);

View File

@@ -605,7 +605,10 @@ impl TrieUpdatesSorted {
/// This merges the account nodes and storage tries from `other` into `self`.
/// Account nodes are merged and re-sorted, with `other`'s values taking precedence
/// for duplicate keys.
pub fn extend_ref(&mut self, other: &Self) {
///
/// Sorts the account nodes after extending. Sorts the storage tries after extending, for each
/// storage trie.
pub fn extend_ref_and_sort(&mut self, other: &Self) {
// Extend account nodes
extend_sorted_vec(&mut self.account_nodes, &other.account_nodes);
@@ -834,7 +837,7 @@ mod tests {
// Test extending with empty updates
let mut updates1 = TrieUpdatesSorted::default();
let updates2 = TrieUpdatesSorted::default();
updates1.extend_ref(&updates2);
updates1.extend_ref_and_sort(&updates2);
assert_eq!(updates1.account_nodes.len(), 0);
assert_eq!(updates1.storage_tries.len(), 0);
@@ -853,7 +856,7 @@ mod tests {
],
storage_tries: B256Map::default(),
};
updates1.extend_ref(&updates2);
updates1.extend_ref_and_sort(&updates2);
assert_eq!(updates1.account_nodes.len(), 3);
// Should be sorted: 0x01, 0x02, 0x03
assert_eq!(updates1.account_nodes[0].0, Nibbles::from_nibbles_unchecked([0x01]));
@@ -889,7 +892,7 @@ mod tests {
(hashed_address2, storage_trie1),
]),
};
updates1.extend_ref(&updates2);
updates1.extend_ref_and_sort(&updates2);
assert_eq!(updates1.storage_tries.len(), 2);
assert!(updates1.storage_tries.contains_key(&hashed_address1));
assert!(updates1.storage_tries.contains_key(&hashed_address2));

View File

@@ -87,7 +87,7 @@ where
// This reverts all changes from db tip back to just after block-1 was processed
let mut cumulative_state_revert_prev = cumulative_state_revert.clone();
cumulative_state_revert_prev.extend_ref(&individual_state_revert);
cumulative_state_revert_prev.extend_ref_and_sort(&individual_state_revert);
// Step 2: Calculate cumulative trie updates revert for block-1
// This gives us the trie state as it was after block-1 was processed
@@ -469,7 +469,7 @@ impl ChangesetCache {
// Since we iterate newest to oldest, older values are added last
// and overwrite any conflicting newer values (oldest changeset values take
// precedence).
accumulated_reverts.extend_ref(&changesets);
accumulated_reverts.extend_ref_and_sort(&changesets);
}
let elapsed = timer.elapsed();