chore(trie): Implement OverlayStateProviderFactory (#18854)

This commit is contained in:
Brian Picciano
2025-10-06 17:02:07 +02:00
committed by GitHub
parent 636f053c26
commit 3706961379
9 changed files with 570 additions and 8 deletions

View File

@@ -137,6 +137,14 @@ pub enum ProviderError {
/// Missing trie updates.
#[error("missing trie updates for block {0}")]
MissingTrieUpdates(B256),
/// Insufficient changesets to revert to the requested block.
#[error("insufficient changesets to revert to block #{requested}. Available changeset range: {available:?}")]
InsufficientChangesets {
/// The block number requested for reversion
requested: BlockNumber,
/// The available range of blocks with changesets
available: core::ops::RangeInclusive<BlockNumber>,
},
/// Any other error type wrapped into a cloneable [`AnyError`].
#[error(transparent)]
Other(#[from] AnyError),

View File

@@ -18,7 +18,7 @@ use crate::{
OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit,
StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader,
StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider,
TransactionsProviderExt, TrieWriter,
TransactionsProviderExt, TrieReader, TrieWriter,
};
use alloy_consensus::{
transaction::{SignerRecoverable, TransactionMeta, TxHashRef},
@@ -2446,6 +2446,55 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> TrieWriter for DatabaseProvider
}
}
impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> TrieReader for DatabaseProvider<TX, N> {
fn trie_reverts(&self, from: BlockNumber) -> ProviderResult<TrieUpdatesSorted> {
let tx = self.tx_ref();
// Read account trie changes directly into a Vec - data is already sorted by nibbles
// within each block, and we want the oldest (first) version of each node
let mut account_nodes = Vec::new();
let mut seen_account_keys = HashSet::new();
let mut accounts_cursor = tx.cursor_dup_read::<tables::AccountsTrieChangeSets>()?;
for entry in accounts_cursor.walk_range(from..)? {
let (_, TrieChangeSetsEntry { nibbles, node }) = entry?;
// Only keep the first (oldest) version of each node
if seen_account_keys.insert(nibbles.0) {
account_nodes.push((nibbles.0, node));
}
}
// Read storage trie changes - data is sorted by (block, hashed_address, nibbles)
// Keep track of seen (address, nibbles) pairs to only keep the oldest version
let mut storage_tries = B256Map::<Vec<_>>::default();
let mut seen_storage_keys = HashSet::new();
let mut storages_cursor = tx.cursor_dup_read::<tables::StoragesTrieChangeSets>()?;
let storage_range = BlockNumberHashedAddress((from, B256::ZERO))..;
for entry in storages_cursor.walk_range(storage_range)? {
let (
BlockNumberHashedAddress((_, hashed_address)),
TrieChangeSetsEntry { nibbles, node },
) = entry?;
// Only keep the first (oldest) version of each node for this address
if seen_storage_keys.insert((hashed_address, nibbles.0)) {
storage_tries.entry(hashed_address).or_default().push((nibbles.0, node));
}
}
// Convert to StorageTrieUpdatesSorted
let storage_tries = storage_tries
.into_iter()
.map(|(address, nodes)| {
(address, StorageTrieUpdatesSorted { storage_nodes: nodes, is_deleted: false })
})
.collect();
Ok(TrieUpdatesSorted { account_nodes, storage_tries })
}
}
impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> StorageTrieWriter for DatabaseProvider<TX, N> {
/// Writes storage trie updates from the given storage trie map.
///

View File

@@ -17,7 +17,7 @@ mod state;
pub use state::{
historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks},
latest::{LatestStateProvider, LatestStateProviderRef},
overlay::OverlayStateProvider,
overlay::{OverlayStateProvider, OverlayStateProviderFactory},
};
mod consistent_view;

View File

@@ -1,15 +1,141 @@
use alloy_primitives::B256;
use alloy_primitives::{BlockNumber, B256};
use reth_db_api::DatabaseError;
use reth_storage_api::DBProvider;
use reth_errors::ProviderError;
use reth_stages_types::StageId;
use reth_storage_api::{DBProvider, DatabaseProviderFactory, StageCheckpointReader, TrieReader};
use reth_trie::{
hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory},
trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory},
updates::TrieUpdatesSorted,
HashedPostStateSorted,
HashedPostState, HashedPostStateSorted, KeccakKeyHasher,
};
use reth_trie_db::{
DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory,
};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
use std::sync::Arc;
/// Factory for creating overlay state providers with optional reverts and overlays.
///
/// This factory allows building an `OverlayStateProvider` whose DB state has been reverted to a
/// particular block, and/or with additional overlay information added on top.
#[derive(Debug, Clone)]
pub struct OverlayStateProviderFactory<F> {
/// The underlying database provider factory
factory: F,
/// Optional block number for collecting reverts
block_number: Option<BlockNumber>,
/// Optional trie overlay
trie_overlay: Option<Arc<TrieUpdatesSorted>>,
/// Optional hashed state overlay
hashed_state_overlay: Option<Arc<HashedPostStateSorted>>,
}
impl<F> OverlayStateProviderFactory<F>
where
F: DatabaseProviderFactory,
F::Provider: Clone + TrieReader + StageCheckpointReader,
{
/// Create a new overlay state provider factory
pub const fn new(factory: F) -> Self {
Self { factory, block_number: None, trie_overlay: None, hashed_state_overlay: None }
}
/// Set the block number for collecting reverts
pub const fn with_block_number(mut self, block_number: Option<BlockNumber>) -> Self {
self.block_number = block_number;
self
}
/// Set the trie overlay
pub fn with_trie_overlay(mut self, trie_overlay: Option<Arc<TrieUpdatesSorted>>) -> Self {
self.trie_overlay = trie_overlay;
self
}
/// Set the hashed state overlay
pub fn with_hashed_state_overlay(
mut self,
hashed_state_overlay: Option<Arc<HashedPostStateSorted>>,
) -> Self {
self.hashed_state_overlay = hashed_state_overlay;
self
}
/// Validates that there are sufficient changesets to revert to the requested block number.
///
/// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block.
fn validate_changesets_availability(
&self,
provider: &F::Provider,
requested_block: BlockNumber,
) -> Result<(), ProviderError> {
// Get the MerkleChangeSets stage checkpoint - let errors propagate as-is
let checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?;
// If there's no checkpoint at all or block range details are missing, we can't revert
let checkpoint = checkpoint
.and_then(|checkpoint| checkpoint.merkle_changesets_stage_checkpoint())
.ok_or_else(|| ProviderError::InsufficientChangesets {
requested: requested_block,
available: 0..=0,
})?;
// Check if the requested block is within the available range
let available_range = checkpoint.block_range.from..=checkpoint.block_range.to;
if !available_range.contains(&requested_block) {
return Err(ProviderError::InsufficientChangesets {
requested: requested_block,
available: available_range,
});
}
Ok(())
}
/// Create a read-only [`OverlayStateProvider`].
pub fn provider_ro(&self) -> Result<OverlayStateProvider<F::Provider>, ProviderError> {
// Get a read-only provider
let provider = self.factory.database_provider_ro()?;
// If block_number is provided, collect reverts
let (trie_updates, hashed_state) = if let Some(from_block) = self.block_number {
// Validate that we have sufficient changesets for the requested block
self.validate_changesets_availability(&provider, from_block)?;
// Collect trie reverts
let mut trie_updates_mut = provider.trie_reverts(from_block)?;
// Collect state reverts using HashedPostState::from_reverts
let reverted_state =
HashedPostState::from_reverts::<KeccakKeyHasher>(provider.tx_ref(), from_block..)?;
let mut hashed_state_mut = reverted_state.into_sorted();
// Extend with overlays if provided
if let Some(trie_overlay) = &self.trie_overlay {
trie_updates_mut.extend_ref(trie_overlay);
}
if let Some(hashed_state_overlay) = &self.hashed_state_overlay {
hashed_state_mut.extend_ref(hashed_state_overlay);
}
(Arc::new(trie_updates_mut), Arc::new(hashed_state_mut))
} else {
// If no block_number, use overlays directly or defaults
let trie_updates =
self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default()));
let hashed_state = self
.hashed_state_overlay
.clone()
.unwrap_or_else(|| Arc::new(HashedPostStateSorted::default()));
(trie_updates, hashed_state)
};
Ok(OverlayStateProvider::new(provider, trie_updates, hashed_state))
}
}
/// State provider with in-memory overlay from trie updates and hashed post state.
///
/// This provider uses in-memory trie updates and hashed post state as an overlay

View File

@@ -89,6 +89,14 @@ pub trait StateProofProvider: Send + Sync {
fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult<Vec<Bytes>>;
}
/// Trie Reader
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait TrieReader: Send + Sync {
/// Returns the [`TrieUpdatesSorted`] for reverting the trie database to its state prior to the
/// given block having been processed.
fn trie_reverts(&self, from: BlockNumber) -> ProviderResult<TrieUpdatesSorted>;
}
/// Trie Writer
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait TrieWriter: Send + Sync {

View File

@@ -3,6 +3,7 @@ use core::ops::Not;
use crate::{
added_removed_keys::MultiAddedRemovedKeys,
prefix_set::{PrefixSetMut, TriePrefixSetsMut},
utils::extend_sorted_vec,
KeyHasher, MultiProofTargets, Nibbles,
};
use alloc::{borrow::Cow, vec::Vec};
@@ -484,6 +485,21 @@ impl HashedPostStateSorted {
pub const fn account_storages(&self) -> &B256Map<HashedStorageSorted> {
&self.storages
}
/// Extends this state with contents of another sorted state.
/// Entries in `other` take precedence for duplicate keys.
pub fn extend_ref(&mut self, other: &Self) {
// Extend accounts
self.accounts.extend_ref(&other.accounts);
// Extend storages
for (hashed_address, other_storage) in &other.storages {
self.storages
.entry(*hashed_address)
.and_modify(|existing| existing.extend_ref(other_storage))
.or_insert_with(|| other_storage.clone());
}
}
}
impl AsRef<Self> for HashedPostStateSorted {
@@ -510,6 +526,20 @@ impl HashedAccountsSorted {
.chain(self.destroyed_accounts.iter().map(|address| (*address, None)))
.sorted_by_key(|entry| *entry.0)
}
/// Extends this collection with contents of another sorted collection.
/// Entries in `other` take precedence for duplicate keys.
pub fn extend_ref(&mut self, other: &Self) {
// Updates take precedence over removals, so we want removals from `other` to only apply to
// the previous accounts.
self.accounts.retain(|(addr, _)| !other.destroyed_accounts.contains(addr));
// Extend the sorted accounts vector
extend_sorted_vec(&mut self.accounts, &other.accounts);
// Merge destroyed accounts sets
self.destroyed_accounts.extend(&other.destroyed_accounts);
}
}
/// Sorted hashed storage optimized for iterating during state trie calculation.
@@ -537,6 +567,28 @@ impl HashedStorageSorted {
.chain(self.zero_valued_slots.iter().map(|hashed_slot| (*hashed_slot, U256::ZERO)))
.sorted_by_key(|entry| *entry.0)
}
/// Extends this storage with contents of another sorted storage.
/// Entries in `other` take precedence for duplicate keys.
pub fn extend_ref(&mut self, other: &Self) {
if other.wiped {
// If other is wiped, clear everything and copy from other
self.wiped = true;
self.non_zero_valued_slots.clear();
self.zero_valued_slots.clear();
self.non_zero_valued_slots.extend_from_slice(&other.non_zero_valued_slots);
self.zero_valued_slots.extend(&other.zero_valued_slots);
return;
}
self.non_zero_valued_slots.retain(|(slot, _)| !other.zero_valued_slots.contains(slot));
// Extend the sorted non-zero valued slots
extend_sorted_vec(&mut self.non_zero_valued_slots, &other.non_zero_valued_slots);
// Merge zero valued slots sets
self.zero_valued_slots.extend(&other.zero_valued_slots);
}
}
/// An iterator that yields chunks of the state updates of at most `size` account and storage
@@ -1072,4 +1124,102 @@ mod tests {
);
assert_eq!(chunks.next(), None);
}
#[test]
fn test_hashed_post_state_sorted_extend_ref() {
// Test extending accounts
let mut state1 = HashedPostStateSorted {
accounts: HashedAccountsSorted {
accounts: vec![
(B256::from([1; 32]), Account::default()),
(B256::from([3; 32]), Account::default()),
],
destroyed_accounts: B256Set::from_iter([B256::from([5; 32])]),
},
storages: B256Map::default(),
};
let state2 = HashedPostStateSorted {
accounts: HashedAccountsSorted {
accounts: vec![
(B256::from([2; 32]), Account::default()),
(B256::from([3; 32]), Account { nonce: 1, ..Default::default() }), // Override
(B256::from([4; 32]), Account::default()),
],
destroyed_accounts: B256Set::from_iter([B256::from([6; 32])]),
},
storages: B256Map::default(),
};
state1.extend_ref(&state2);
// Check accounts are merged and sorted
assert_eq!(state1.accounts.accounts.len(), 4);
assert_eq!(state1.accounts.accounts[0].0, B256::from([1; 32]));
assert_eq!(state1.accounts.accounts[1].0, B256::from([2; 32]));
assert_eq!(state1.accounts.accounts[2].0, B256::from([3; 32]));
assert_eq!(state1.accounts.accounts[2].1.nonce, 1); // Should have state2's value
assert_eq!(state1.accounts.accounts[3].0, B256::from([4; 32]));
// Check destroyed accounts are merged
assert!(state1.accounts.destroyed_accounts.contains(&B256::from([5; 32])));
assert!(state1.accounts.destroyed_accounts.contains(&B256::from([6; 32])));
}
#[test]
fn test_hashed_storage_sorted_extend_ref() {
// Test normal extension
let mut storage1 = HashedStorageSorted {
non_zero_valued_slots: vec![
(B256::from([1; 32]), U256::from(10)),
(B256::from([3; 32]), U256::from(30)),
],
zero_valued_slots: B256Set::from_iter([B256::from([5; 32])]),
wiped: false,
};
let storage2 = HashedStorageSorted {
non_zero_valued_slots: vec![
(B256::from([2; 32]), U256::from(20)),
(B256::from([3; 32]), U256::from(300)), // Override
(B256::from([4; 32]), U256::from(40)),
],
zero_valued_slots: B256Set::from_iter([B256::from([6; 32])]),
wiped: false,
};
storage1.extend_ref(&storage2);
assert_eq!(storage1.non_zero_valued_slots.len(), 4);
assert_eq!(storage1.non_zero_valued_slots[0].0, B256::from([1; 32]));
assert_eq!(storage1.non_zero_valued_slots[1].0, B256::from([2; 32]));
assert_eq!(storage1.non_zero_valued_slots[2].0, B256::from([3; 32]));
assert_eq!(storage1.non_zero_valued_slots[2].1, U256::from(300)); // Should have storage2's value
assert_eq!(storage1.non_zero_valued_slots[3].0, B256::from([4; 32]));
assert!(storage1.zero_valued_slots.contains(&B256::from([5; 32])));
assert!(storage1.zero_valued_slots.contains(&B256::from([6; 32])));
assert!(!storage1.wiped);
// Test wiped storage
let mut storage3 = HashedStorageSorted {
non_zero_valued_slots: vec![(B256::from([1; 32]), U256::from(10))],
zero_valued_slots: B256Set::from_iter([B256::from([2; 32])]),
wiped: false,
};
let storage4 = HashedStorageSorted {
non_zero_valued_slots: vec![(B256::from([3; 32]), U256::from(30))],
zero_valued_slots: B256Set::from_iter([B256::from([4; 32])]),
wiped: true,
};
storage3.extend_ref(&storage4);
assert!(storage3.wiped);
// When wiped, should only have storage4's values
assert_eq!(storage3.non_zero_valued_slots.len(), 1);
assert_eq!(storage3.non_zero_valued_slots[0].0, B256::from([3; 32]));
assert_eq!(storage3.zero_valued_slots.len(), 1);
assert!(storage3.zero_valued_slots.contains(&B256::from([4; 32])));
}
}

View File

@@ -57,6 +57,9 @@ pub mod updates;
pub mod added_removed_keys;
/// Utilities used by other modules in this crate.
mod utils;
/// Bincode-compatible serde implementations for trie types.
///
/// `bincode` crate allows for more efficient serialization of trie types, because it allows

View File

@@ -1,4 +1,4 @@
use crate::{BranchNodeCompact, HashBuilder, Nibbles};
use crate::{utils::extend_sorted_vec, BranchNodeCompact, HashBuilder, Nibbles};
use alloc::{
collections::{btree_map::BTreeMap, btree_set::BTreeSet},
vec::Vec,
@@ -447,6 +447,24 @@ impl TrieUpdatesSorted {
pub const fn storage_tries_ref(&self) -> &B256Map<StorageTrieUpdatesSorted> {
&self.storage_tries
}
/// Extends the trie updates with another set of sorted updates.
///
/// This merges the account nodes and storage tries from `other` into `self`.
/// Account nodes are merged and re-sorted, with `other`'s values taking precedence
/// for duplicate keys.
pub fn extend_ref(&mut self, other: &Self) {
// Extend account nodes
extend_sorted_vec(&mut self.account_nodes, &other.account_nodes);
// Merge storage tries
for (hashed_address, storage_trie) in &other.storage_tries {
self.storage_tries
.entry(*hashed_address)
.and_modify(|existing| existing.extend_ref(storage_trie))
.or_insert_with(|| storage_trie.clone());
}
}
}
impl AsRef<Self> for TrieUpdatesSorted {
@@ -488,6 +506,23 @@ impl StorageTrieUpdatesSorted {
pub fn storage_nodes_ref(&self) -> &[(Nibbles, Option<BranchNodeCompact>)] {
&self.storage_nodes
}
/// Extends the storage trie updates with another set of sorted updates.
///
/// If `other` is marked as deleted, this will be marked as deleted and all nodes cleared.
/// Otherwise, nodes are merged with `other`'s values taking precedence for duplicates.
pub fn extend_ref(&mut self, other: &Self) {
if other.is_deleted {
self.is_deleted = true;
self.storage_nodes.clear();
self.storage_nodes.extend(other.storage_nodes.iter().cloned());
return;
}
// Extend storage nodes
extend_sorted_vec(&mut self.storage_nodes, &other.storage_nodes);
self.is_deleted = self.is_deleted || other.is_deleted;
}
}
/// Excludes empty nibbles from the given iterator.
@@ -502,6 +537,136 @@ fn exclude_empty_from_pair<V>(
iter.into_iter().filter(|(n, _)| !n.is_empty())
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::B256;
#[test]
fn test_trie_updates_sorted_extend_ref() {
// Test extending with empty updates
let mut updates1 = TrieUpdatesSorted::default();
let updates2 = TrieUpdatesSorted::default();
updates1.extend_ref(&updates2);
assert_eq!(updates1.account_nodes.len(), 0);
assert_eq!(updates1.storage_tries.len(), 0);
// Test extending account nodes
let mut updates1 = TrieUpdatesSorted {
account_nodes: vec![
(Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())),
(Nibbles::from_nibbles_unchecked([0x03]), None),
],
storage_tries: B256Map::default(),
};
let updates2 = TrieUpdatesSorted {
account_nodes: vec![
(Nibbles::from_nibbles_unchecked([0x02]), Some(BranchNodeCompact::default())),
(Nibbles::from_nibbles_unchecked([0x03]), Some(BranchNodeCompact::default())), /* Override */
],
storage_tries: B256Map::default(),
};
updates1.extend_ref(&updates2);
assert_eq!(updates1.account_nodes.len(), 3);
// Should be sorted: 0x01, 0x02, 0x03
assert_eq!(updates1.account_nodes[0].0, Nibbles::from_nibbles_unchecked([0x01]));
assert_eq!(updates1.account_nodes[1].0, Nibbles::from_nibbles_unchecked([0x02]));
assert_eq!(updates1.account_nodes[2].0, Nibbles::from_nibbles_unchecked([0x03]));
// 0x03 should have Some value from updates2 (override)
assert!(updates1.account_nodes[2].1.is_some());
// Test extending storage tries
let storage_trie1 = StorageTrieUpdatesSorted {
is_deleted: false,
storage_nodes: vec![(
Nibbles::from_nibbles_unchecked([0x0a]),
Some(BranchNodeCompact::default()),
)],
};
let storage_trie2 = StorageTrieUpdatesSorted {
is_deleted: false,
storage_nodes: vec![(Nibbles::from_nibbles_unchecked([0x0b]), None)],
};
let hashed_address1 = B256::from([1; 32]);
let hashed_address2 = B256::from([2; 32]);
let mut updates1 = TrieUpdatesSorted {
account_nodes: vec![],
storage_tries: B256Map::from_iter([(hashed_address1, storage_trie1.clone())]),
};
let updates2 = TrieUpdatesSorted {
account_nodes: vec![],
storage_tries: B256Map::from_iter([
(hashed_address1, storage_trie2),
(hashed_address2, storage_trie1),
]),
};
updates1.extend_ref(&updates2);
assert_eq!(updates1.storage_tries.len(), 2);
assert!(updates1.storage_tries.contains_key(&hashed_address1));
assert!(updates1.storage_tries.contains_key(&hashed_address2));
// Check that storage trie for hashed_address1 was extended
let merged_storage = &updates1.storage_tries[&hashed_address1];
assert_eq!(merged_storage.storage_nodes.len(), 2);
}
#[test]
fn test_storage_trie_updates_sorted_extend_ref_deleted() {
// Test case 1: Extending with a deleted storage trie that has nodes
let mut storage1 = StorageTrieUpdatesSorted {
is_deleted: false,
storage_nodes: vec![
(Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())),
(Nibbles::from_nibbles_unchecked([0x02]), None),
],
};
let storage2 = StorageTrieUpdatesSorted {
is_deleted: true,
storage_nodes: vec![
(Nibbles::from_nibbles_unchecked([0x03]), Some(BranchNodeCompact::default())),
(Nibbles::from_nibbles_unchecked([0x04]), None),
],
};
storage1.extend_ref(&storage2);
// Should be marked as deleted
assert!(storage1.is_deleted);
// Original nodes should be cleared, but other's nodes should be added
assert_eq!(storage1.storage_nodes.len(), 2);
assert_eq!(storage1.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x03]));
assert_eq!(storage1.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x04]));
// Test case 2: Extending a deleted storage trie with more nodes
let mut storage3 = StorageTrieUpdatesSorted {
is_deleted: true,
storage_nodes: vec![(
Nibbles::from_nibbles_unchecked([0x05]),
Some(BranchNodeCompact::default()),
)],
};
let storage4 = StorageTrieUpdatesSorted {
is_deleted: true,
storage_nodes: vec![
(Nibbles::from_nibbles_unchecked([0x06]), Some(BranchNodeCompact::default())),
(Nibbles::from_nibbles_unchecked([0x07]), None),
],
};
storage3.extend_ref(&storage4);
// Should remain deleted
assert!(storage3.is_deleted);
// Should have nodes from other (original cleared then extended)
assert_eq!(storage3.storage_nodes.len(), 2);
assert_eq!(storage3.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x06]));
assert_eq!(storage3.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x07]));
}
}
/// Bincode-compatible trie updates type serde implementations.
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat {
@@ -717,7 +882,7 @@ pub mod serde_bincode_compat {
}
#[cfg(all(test, feature = "serde"))]
mod tests {
mod serde_tests {
use super::*;
#[test]

View File

@@ -0,0 +1,53 @@
use alloc::vec::Vec;
/// Helper function to extend a sorted vector with another sorted vector.
/// Values from `other` take precedence for duplicate keys.
///
/// This function efficiently merges two sorted vectors by:
/// 1. Iterating through the target vector with mutable references
/// 2. Using a peekable iterator for the other vector
/// 3. For each target item, processing other items that come before or equal to it
/// 4. Collecting items from other that need to be inserted
/// 5. Appending and re-sorting only if new items were added
pub(crate) fn extend_sorted_vec<K, V>(target: &mut Vec<(K, V)>, other: &[(K, V)])
where
K: Clone + Ord + core::hash::Hash + Eq,
V: Clone,
{
if other.is_empty() {
return;
}
let mut other_iter = other.iter().peekable();
let mut to_insert = Vec::new();
// Iterate through target and update/collect items from other
for target_item in target.iter_mut() {
while let Some(other_item) = other_iter.peek() {
use core::cmp::Ordering;
match other_item.0.cmp(&target_item.0) {
Ordering::Less => {
// Other item comes before current target item, collect it
to_insert.push(other_iter.next().unwrap().clone());
}
Ordering::Equal => {
// Same key, update target with other's value
target_item.1 = other_iter.next().unwrap().1.clone();
break;
}
Ordering::Greater => {
// Other item comes after current target item, keep target unchanged
break;
}
}
}
}
// Append collected new items, as well as any remaining from `other` which are necessarily also
// new, and sort if needed
if !to_insert.is_empty() || other_iter.peek().is_some() {
target.extend(to_insert);
target.extend(other_iter.cloned());
target.sort_unstable_by(|a, b| a.0.cmp(&b.0));
}
}