refactor: use alloy_primitives::map for all HashMap/HashSet types (#21686)

Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Georgios Konstantopoulos
2026-02-04 04:08:39 -08:00
committed by GitHub
parent 98313a0bea
commit f53f90d714
56 changed files with 290 additions and 281 deletions

1
Cargo.lock generated
View File

@@ -3800,6 +3800,7 @@ dependencies = [
name = "example-full-contract-state"
version = "1.10.2"
dependencies = [
"alloy-primitives",
"eyre",
"reth-ethereum",
]

View File

@@ -6,7 +6,7 @@ use crate::{
};
use alloy_consensus::{transaction::TransactionMeta, BlockHeader};
use alloy_eips::{BlockHashOrNumber, BlockNumHash};
use alloy_primitives::{map::HashMap, BlockNumber, TxHash, B256};
use alloy_primitives::{map::B256Map, BlockNumber, TxHash, B256};
use parking_lot::RwLock;
use reth_chainspec::ChainInfo;
use reth_ethereum_primitives::EthPrimitives;
@@ -57,7 +57,7 @@ pub(crate) struct InMemoryStateMetrics {
#[derive(Debug, Default)]
pub(crate) struct InMemoryState<N: NodePrimitives = EthPrimitives> {
/// All canonical blocks that are not on disk yet.
blocks: RwLock<HashMap<B256, Arc<BlockState<N>>>>,
blocks: RwLock<B256Map<Arc<BlockState<N>>>>,
/// Mapping of block numbers to block hashes.
numbers: RwLock<BTreeMap<u64, B256>>,
/// The pending block that has not yet been made canonical.
@@ -68,7 +68,7 @@ pub(crate) struct InMemoryState<N: NodePrimitives = EthPrimitives> {
impl<N: NodePrimitives> InMemoryState<N> {
pub(crate) fn new(
blocks: HashMap<B256, Arc<BlockState<N>>>,
blocks: B256Map<Arc<BlockState<N>>>,
numbers: BTreeMap<u64, B256>,
pending: Option<BlockState<N>>,
) -> Self {
@@ -184,7 +184,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
/// Create a new in-memory state with the given blocks, numbers, pending state, and optional
/// finalized header.
pub fn new(
blocks: HashMap<B256, Arc<BlockState<N>>>,
blocks: B256Map<Arc<BlockState<N>>>,
numbers: BTreeMap<u64, B256>,
pending: Option<BlockState<N>>,
finalized: Option<SealedHeader<N::BlockHeader>>,
@@ -209,7 +209,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
/// Create an empty state.
pub fn empty() -> Self {
Self::new(HashMap::default(), BTreeMap::new(), None, None, None)
Self::new(B256Map::default(), BTreeMap::new(), None, None, None)
}
/// Create a new in memory state with the given local head and finalized header
@@ -1176,7 +1176,7 @@ mod tests {
#[test]
fn test_in_memory_state_impl_state_by_hash() {
let mut state_by_hash = HashMap::default();
let mut state_by_hash = B256Map::default();
let number = rand::rng().random::<u64>();
let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default();
let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random()));
@@ -1190,7 +1190,7 @@ mod tests {
#[test]
fn test_in_memory_state_impl_state_by_number() {
let mut state_by_hash = HashMap::default();
let mut state_by_hash = B256Map::default();
let mut hash_by_number = BTreeMap::new();
let number = rand::rng().random::<u64>();
@@ -1209,7 +1209,7 @@ mod tests {
#[test]
fn test_in_memory_state_impl_head_state() {
let mut state_by_hash = HashMap::default();
let mut state_by_hash = B256Map::default();
let mut hash_by_number = BTreeMap::new();
let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default();
let state1 = Arc::new(create_mock_state(&mut test_block_builder, 1, B256::random()));
@@ -1237,7 +1237,7 @@ mod tests {
let pending_hash = pending_state.hash();
let in_memory_state =
InMemoryState::new(HashMap::default(), BTreeMap::new(), Some(pending_state));
InMemoryState::new(B256Map::default(), BTreeMap::new(), Some(pending_state));
let result = in_memory_state.pending_state();
assert!(result.is_some());
@@ -1249,7 +1249,7 @@ mod tests {
#[test]
fn test_in_memory_state_impl_no_pending_state() {
let in_memory_state: InMemoryState =
InMemoryState::new(HashMap::default(), BTreeMap::new(), None);
InMemoryState::new(B256Map::default(), BTreeMap::new(), None);
assert_eq!(in_memory_state.pending_state(), None);
}
@@ -1380,7 +1380,7 @@ mod tests {
let state2 = Arc::new(BlockState::with_parent(block2.clone(), Some(state1.clone())));
let state3 = Arc::new(BlockState::with_parent(block3.clone(), Some(state2.clone())));
let mut blocks = HashMap::default();
let mut blocks = B256Map::default();
blocks.insert(block1.recovered_block().hash(), state1);
blocks.insert(block2.recovered_block().hash(), state2);
blocks.insert(block3.recovered_block().hash(), state3);
@@ -1427,7 +1427,7 @@ mod tests {
fn test_canonical_in_memory_state_canonical_chain_single_block() {
let block = TestBlockBuilder::eth().get_executed_block_with_number(1, B256::random());
let hash = block.recovered_block().hash();
let mut blocks = HashMap::default();
let mut blocks = B256Map::default();
blocks.insert(hash, Arc::new(BlockState::new(block)));
let mut numbers = BTreeMap::new();
numbers.insert(1, hash);

View File

@@ -2,7 +2,7 @@
use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics};
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use alloy_primitives::{map::B256Set, B256};
use futures::FutureExt;
use reth_consensus::Consensus;
use reth_network_p2p::{
@@ -12,7 +12,7 @@ use reth_network_p2p::{
use reth_primitives_traits::{Block, SealedBlock};
use std::{
cmp::{Ordering, Reverse},
collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque},
collections::{binary_heap::PeekMut, BinaryHeap, VecDeque},
fmt::Debug,
sync::Arc,
task::{Context, Poll},
@@ -109,7 +109,7 @@ where
}
/// Processes a block set download request.
fn download_block_set(&mut self, hashes: HashSet<B256>) {
fn download_block_set(&mut self, hashes: B256Set) {
for hash in hashes {
self.download_full_block(hash);
}
@@ -397,7 +397,7 @@ mod tests {
// send block set download request
block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockSet(
HashSet::from([tip.hash(), tip.parent_hash]),
B256Set::from_iter([tip.hash(), tip.parent_hash]),
)));
// ensure we have TOTAL_BLOCKS in flight full block request
@@ -440,7 +440,7 @@ mod tests {
)));
// send block set download request
let download_set = HashSet::from([tip.hash(), tip.parent_hash]);
let download_set = B256Set::from_iter([tip.hash(), tip.parent_hash]);
block_downloader
.on_action(DownloadAction::Download(DownloadRequest::BlockSet(download_set.clone())));

View File

@@ -5,7 +5,7 @@ use crate::{
chain::{ChainHandler, FromOrchestrator, HandlerEvent},
download::{BlockDownloader, DownloadAction, DownloadOutcome},
};
use alloy_primitives::B256;
use alloy_primitives::{map::B256Set, B256};
use crossbeam_channel::Sender;
use futures::{Stream, StreamExt};
use reth_chain_state::ExecutedBlock;
@@ -14,7 +14,6 @@ use reth_ethereum_primitives::EthPrimitives;
use reth_payload_primitives::PayloadTypes;
use reth_primitives_traits::{Block, NodePrimitives, SealedBlock};
use std::{
collections::HashSet,
fmt::Display,
task::{ready, Context, Poll},
};
@@ -341,7 +340,7 @@ pub enum RequestHandlerEvent<T> {
#[derive(Debug)]
pub enum DownloadRequest {
/// Download the given set of blocks.
BlockSet(HashSet<B256>),
BlockSet(B256Set),
/// Download the given range of blocks.
BlockRange(B256, u64),
}
@@ -349,6 +348,6 @@ pub enum DownloadRequest {
impl DownloadRequest {
/// Returns a [`DownloadRequest`] for a single block.
pub fn single_block(hash: B256) -> Self {
Self::BlockSet(HashSet::from([hash]))
Self::BlockSet(B256Set::from_iter([hash]))
}
}

View File

@@ -3,7 +3,7 @@
use crate::engine::EngineApiKind;
use alloy_eips::BlockNumHash;
use alloy_primitives::{
map::{HashMap, HashSet},
map::{B256Map, B256Set},
BlockNumber, B256,
};
use reth_chain_state::{DeferredTrieData, EthPrimitives, ExecutedBlock, LazyOverlay};
@@ -25,7 +25,7 @@ pub struct TreeState<N: NodePrimitives = EthPrimitives> {
/// __All__ unique executed blocks by block hash that are connected to the canonical chain.
///
/// This includes blocks of all forks.
pub(crate) blocks_by_hash: HashMap<B256, ExecutedBlock<N>>,
pub(crate) blocks_by_hash: B256Map<ExecutedBlock<N>>,
/// Executed blocks grouped by their respective block number.
///
/// This maps unique block number to all known blocks for that height.
@@ -33,7 +33,7 @@ pub struct TreeState<N: NodePrimitives = EthPrimitives> {
/// Note: there can be multiple blocks at the same height due to forks.
pub(crate) blocks_by_number: BTreeMap<BlockNumber, Vec<ExecutedBlock<N>>>,
/// Map of any parent block hash to its children.
pub(crate) parent_to_child: HashMap<B256, HashSet<B256>>,
pub(crate) parent_to_child: B256Map<B256Set>,
/// Currently tracked canonical head of the chain.
pub(crate) current_canonical_head: BlockNumHash,
/// The engine API variant of this handler
@@ -50,10 +50,10 @@ impl<N: NodePrimitives> TreeState<N> {
/// Returns a new, empty tree state that points to the given canonical head.
pub fn new(current_canonical_head: BlockNumHash, engine_kind: EngineApiKind) -> Self {
Self {
blocks_by_hash: HashMap::default(),
blocks_by_hash: B256Map::default(),
blocks_by_number: BTreeMap::new(),
current_canonical_head,
parent_to_child: HashMap::default(),
parent_to_child: B256Map::default(),
engine_kind,
cached_canonical_overlay: None,
}
@@ -178,7 +178,7 @@ impl<N: NodePrimitives> TreeState<N> {
/// ## Returns
///
/// The removed block and the block hashes of its children.
fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock<N>, HashSet<B256>)> {
fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock<N>, B256Set)> {
let executed = self.blocks_by_hash.remove(&hash)?;
// Remove this block from collection of children of its parent block.
@@ -489,7 +489,7 @@ mod tests {
assert_eq!(
tree_state.parent_to_child.get(&blocks[0].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[1].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[1].recovered_block().hash()]))
);
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
@@ -498,7 +498,7 @@ mod tests {
assert_eq!(
tree_state.parent_to_child.get(&blocks[1].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[2].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[2].recovered_block().hash()]))
);
assert!(tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
@@ -586,11 +586,11 @@ mod tests {
assert_eq!(
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[3].recovered_block().hash()]))
);
assert_eq!(
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[4].recovered_block().hash()]))
);
}
@@ -636,11 +636,11 @@ mod tests {
assert_eq!(
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[3].recovered_block().hash()]))
);
assert_eq!(
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[4].recovered_block().hash()]))
);
}
@@ -686,11 +686,11 @@ mod tests {
assert_eq!(
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[3].recovered_block().hash()]))
);
assert_eq!(
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
Some(&B256Set::from_iter([blocks[4].recovered_block().hash()]))
);
}
}

View File

@@ -11,7 +11,7 @@ use reth_trie_db::ChangesetCache;
use alloy_eips::eip1898::BlockWithParent;
use alloy_primitives::{
map::{HashMap, HashSet},
map::{B256Map, B256Set},
Bytes, B256,
};
use alloy_rlp::Decodable;
@@ -235,11 +235,11 @@ impl TestHarness {
}
fn with_blocks(mut self, blocks: Vec<ExecutedBlock>) -> Self {
let mut blocks_by_hash = HashMap::default();
let mut blocks_by_hash = B256Map::default();
let mut blocks_by_number = BTreeMap::new();
let mut state_by_hash = HashMap::default();
let mut state_by_hash = B256Map::default();
let mut hash_by_number = BTreeMap::new();
let mut parent_to_child: HashMap<B256, HashSet<B256>> = HashMap::default();
let mut parent_to_child: B256Map<B256Set> = B256Map::default();
let mut parent_hash = B256::ZERO;
for block in &blocks {
@@ -957,7 +957,7 @@ async fn test_engine_tree_fcu_missing_head() {
let event = test_harness.from_tree_rx.recv().await.unwrap();
match event {
EngineApiEvent::Download(DownloadRequest::BlockSet(actual_block_set)) => {
let expected_block_set = HashSet::from_iter([missing_block.hash()]);
let expected_block_set = B256Set::from_iter([missing_block.hash()]);
assert_eq!(actual_block_set, expected_block_set);
}
_ => panic!("Unexpected event: {event:#?}"),
@@ -1002,7 +1002,7 @@ async fn test_engine_tree_live_sync_transition_required_blocks_requested() {
let event = test_harness.from_tree_rx.recv().await.unwrap();
match event {
EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => {
assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash]));
assert_eq!(hash_set, B256Set::from_iter([main_chain_last_hash]));
}
_ => panic!("Unexpected event: {event:#?}"),
}
@@ -1011,7 +1011,7 @@ async fn test_engine_tree_live_sync_transition_required_blocks_requested() {
let event = test_harness.from_tree_rx.recv().await.unwrap();
match event {
EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => {
assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash]));
assert_eq!(hash_set, B256Set::from_iter([main_chain_last_hash]));
}
_ => panic!("Unexpected event: {event:#?}"),
}

View File

@@ -1,4 +1,7 @@
use alloy_primitives::{map::HashMap, B256};
use alloy_primitives::{
map::{B256Map, HashMap},
B256,
};
use reth_db::DatabaseError;
use reth_trie::{
trie_cursor::{TrieCursor, TrieCursorFactory},
@@ -19,7 +22,7 @@ struct EntryDiff<T> {
struct TrieUpdatesDiff {
account_nodes: HashMap<Nibbles, EntryDiff<Option<BranchNodeCompact>>>,
removed_nodes: HashMap<Nibbles, EntryDiff<bool>>,
storage_tries: HashMap<B256, StorageTrieUpdatesDiff>,
storage_tries: B256Map<StorageTrieUpdatesDiff>,
}
impl TrieUpdatesDiff {

View File

@@ -1,7 +1,11 @@
use crate::{BlockExecutionOutput, BlockExecutionResult};
use alloc::{vec, vec::Vec};
use alloy_eips::eip7685::Requests;
use alloy_primitives::{logs_bloom, map::HashMap, Address, BlockNumber, Bloom, Log, B256, U256};
use alloy_primitives::{
logs_bloom,
map::{AddressMap, B256Map, HashMap},
Address, BlockNumber, Bloom, Log, B256, U256,
};
use reth_primitives_traits::{Account, Bytecode, Receipt, StorageEntry};
use reth_trie_common::{HashedPostState, KeyHasher};
use revm::{
@@ -10,14 +14,13 @@ use revm::{
};
/// Type used to initialize revms bundle state.
pub type BundleStateInit =
HashMap<Address, (Option<Account>, Option<Account>, HashMap<B256, (U256, U256)>)>;
pub type BundleStateInit = AddressMap<(Option<Account>, Option<Account>, B256Map<(U256, U256)>)>;
/// Types used inside `RevertsInit` to initialize revms reverts.
pub type AccountRevertInit = (Option<Option<Account>>, Vec<StorageEntry>);
/// Type used to initialize revms reverts.
pub type RevertsInit = HashMap<BlockNumber, HashMap<Address, AccountRevertInit>>;
pub type RevertsInit = HashMap<BlockNumber, AddressMap<AccountRevertInit>>;
/// Represents a changed account
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
@@ -614,12 +617,12 @@ mod tests {
);
// Create a BundleStateInit object and insert initial data
let mut state_init: BundleStateInit = HashMap::default();
let mut state_init: BundleStateInit = AddressMap::default();
state_init
.insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default()));
.insert(Address::new([2; 20]), (None, Some(Account::default()), B256Map::default()));
// Create a HashMap for account reverts and insert initial data
let mut revert_inner: HashMap<Address, AccountRevertInit> = HashMap::default();
// Create an AddressMap for account reverts and insert initial data
let mut revert_inner: AddressMap<AccountRevertInit> = AddressMap::default();
revert_inner.insert(Address::new([2; 20]), (None, vec![]));
// Create a RevertsInit object and insert the revert_inner data

View File

@@ -621,12 +621,11 @@ mod tests {
bodies::test_utils::{insert_headers, zip_blocks},
test_utils::{generate_bodies, TestBodiesClient},
};
use alloy_primitives::B256;
use alloy_primitives::{map::B256Map, B256};
use assert_matches::assert_matches;
use reth_consensus::test_utils::TestConsensus;
use reth_provider::test_utils::create_test_provider_factory;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::collections::HashMap;
// Check that the blocks are emitted in order of block number, not in order of
// first-downloaded
@@ -674,7 +673,7 @@ mod tests {
let bodies = blocks
.into_iter()
.map(|block| (block.hash(), block.into_body()))
.collect::<HashMap<_, _>>();
.collect::<B256Map<_>>();
insert_headers(&factory, &headers);

View File

@@ -3,7 +3,7 @@
#![allow(dead_code)]
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use alloy_primitives::map::B256Map;
use reth_ethereum_primitives::BlockBody;
use reth_network_p2p::bodies::response::BlockResponse;
use reth_primitives_traits::{Block, SealedBlock, SealedHeader};
@@ -11,11 +11,10 @@ use reth_provider::{
test_utils::MockNodeTypesWithDB, ProviderFactory, StaticFileProviderFactory, StaticFileSegment,
StaticFileWriter,
};
use std::collections::HashMap;
pub(crate) fn zip_blocks<'a, B: Block>(
headers: impl Iterator<Item = &'a SealedHeader<B::Header>>,
bodies: &mut HashMap<B256, B::Body>,
bodies: &mut B256Map<B::Body>,
) -> Vec<BlockResponse<B>> {
headers
.into_iter()
@@ -32,7 +31,7 @@ pub(crate) fn zip_blocks<'a, B: Block>(
pub(crate) fn create_raw_bodies(
headers: impl IntoIterator<Item = SealedHeader>,
bodies: &mut HashMap<B256, BlockBody>,
bodies: &mut B256Map<BlockBody>,
) -> Vec<reth_ethereum_primitives::Block> {
headers
.into_iter()

View File

@@ -704,7 +704,7 @@ mod tests {
FileClient::from_file(file.into(), NoopConsensus::arc())
.await
.unwrap()
.with_bodies(bodies.clone()),
.with_bodies(bodies.clone().into_iter().collect()),
);
let mut downloader = BodiesDownloaderBuilder::default().build::<Block, _, _>(
client.clone(),

View File

@@ -1,4 +1,4 @@
use alloy_primitives::B256;
use alloy_primitives::{map::B256Map, B256};
use reth_ethereum_primitives::BlockBody;
use reth_network_p2p::{
bodies::client::{BodiesClient, BodiesFut},
@@ -7,7 +7,6 @@ use reth_network_p2p::{
};
use reth_network_peers::PeerId;
use std::{
collections::HashMap,
fmt::Debug,
ops::RangeInclusive,
sync::{
@@ -21,7 +20,7 @@ use tokio::sync::Mutex;
/// A [`BodiesClient`] for testing.
#[derive(Debug, Default)]
pub struct TestBodiesClient {
bodies: Arc<Mutex<HashMap<B256, BlockBody>>>,
bodies: Arc<Mutex<B256Map<BlockBody>>>,
should_delay: bool,
max_batch_size: Option<usize>,
times_requested: AtomicU64,
@@ -29,7 +28,7 @@ pub struct TestBodiesClient {
}
impl TestBodiesClient {
pub(crate) fn with_bodies(mut self, bodies: HashMap<B256, BlockBody>) -> Self {
pub(crate) fn with_bodies(mut self, bodies: B256Map<BlockBody>) -> Self {
self.bodies = Arc::new(Mutex::new(bodies));
self
}

View File

@@ -4,10 +4,10 @@
#[cfg(any(test, feature = "file-client"))]
use crate::{bodies::test_utils::create_raw_bodies, file_codec::BlockFileCodec};
use alloy_primitives::B256;
use alloy_primitives::{map::B256Map, B256};
use reth_ethereum_primitives::BlockBody;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::{collections::HashMap, ops::RangeInclusive};
use std::ops::RangeInclusive;
mod bodies_client;
pub use bodies_client::TestBodiesClient;
@@ -19,7 +19,7 @@ pub(crate) const TEST_SCOPE: &str = "downloaders.test";
/// Generate a set of bodies and their corresponding block hashes
pub(crate) fn generate_bodies(
range: RangeInclusive<u64>,
) -> (Vec<SealedHeader>, HashMap<B256, BlockBody>) {
) -> (Vec<SealedHeader>, B256Map<BlockBody>) {
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
@@ -38,7 +38,7 @@ pub(crate) fn generate_bodies(
#[cfg(any(test, feature = "file-client"))]
pub(crate) async fn generate_bodies_file(
range: RangeInclusive<u64>,
) -> (tokio::fs::File, Vec<SealedHeader>, HashMap<B256, BlockBody>) {
) -> (tokio::fs::File, Vec<SealedHeader>, B256Map<BlockBody>) {
use futures::SinkExt;
use std::io::SeekFrom;
use tokio::{fs::File, io::AsyncSeekExt};

View File

@@ -8,13 +8,13 @@ use crate::{
};
use alloy_consensus::Header;
use alloy_eips::{BlockHashOrNumber, BlockNumHash};
use alloy_primitives::B256;
use alloy_primitives::{map::B256Map, B256};
use parking_lot::Mutex;
use reth_eth_wire_types::HeadersDirection;
use reth_ethereum_primitives::{Block, BlockBody};
use reth_network_peers::{PeerId, WithPeerId};
use reth_primitives_traits::{SealedBlock, SealedHeader};
use std::{collections::HashMap, ops::RangeInclusive, sync::Arc};
use std::{ops::RangeInclusive, sync::Arc};
/// A headers+bodies client that stores the headers and bodies in memory, with an artificial soft
/// bodies response limit that is set to 20 by default.
@@ -22,8 +22,8 @@ use std::{collections::HashMap, ops::RangeInclusive, sync::Arc};
/// This full block client can be [Clone]d and shared between multiple tasks.
#[derive(Clone, Debug)]
pub struct TestFullBlockClient {
headers: Arc<Mutex<HashMap<B256, Header>>>,
bodies: Arc<Mutex<HashMap<B256, BlockBody>>>,
headers: Arc<Mutex<B256Map<Header>>>,
bodies: Arc<Mutex<B256Map<BlockBody>>>,
// soft response limit, max number of bodies to respond with
soft_limit: usize,
}
@@ -31,8 +31,8 @@ pub struct TestFullBlockClient {
impl Default for TestFullBlockClient {
fn default() -> Self {
Self {
headers: Arc::new(Mutex::new(HashMap::default())),
bodies: Arc::new(Mutex::new(HashMap::default())),
headers: Arc::new(Mutex::new(B256Map::default())),
bodies: Arc::new(Mutex::new(B256Map::default())),
soft_limit: 20,
}
}

View File

@@ -1,7 +1,7 @@
//! Pool component for the node builder.
use crate::{BuilderContext, FullNodeTypes};
use alloy_primitives::Address;
use alloy_primitives::map::AddressSet;
use reth_chain_state::CanonStateSubscriptions;
use reth_chainspec::EthereumHardforks;
use reth_node_api::{BlockTy, NodeTypes, TxTy};
@@ -9,7 +9,7 @@ use reth_transaction_pool::{
blobstore::DiskFileBlobStore, BlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction,
SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator,
};
use std::{collections::HashSet, future::Future};
use std::future::Future;
/// A type that knows how to build the transaction pool.
pub trait PoolBuilder<Node: FullNodeTypes, Evm>: Send {
@@ -62,7 +62,7 @@ pub struct PoolBuilderConfigOverrides {
/// Minimum base fee required by the protocol.
pub minimal_protocol_basefee: Option<u64>,
/// Addresses that will be considered as local. Above exemptions apply.
pub local_addresses: HashSet<Address>,
pub local_addresses: AddressSet,
/// Additional tasks to validate new transactions.
pub additional_validation_tasks: Option<usize>,
}

View File

@@ -4,7 +4,7 @@ use crate::args::{
types::{MaxU32, ZeroAsNoneU64},
GasPriceOracleArgs, RpcStateCacheArgs,
};
use alloy_primitives::Address;
use alloy_primitives::map::AddressSet;
use alloy_rpc_types_engine::JwtSecret;
use clap::{
builder::{PossibleValue, RangedU64ValueParser, Resettable, TypedValueParser},
@@ -15,7 +15,6 @@ use reth_cli_util::{parse_duration_from_secs_or_ms, parse_ether_value};
use reth_rpc_eth_types::builder::config::PendingBlockKind;
use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
use std::{
collections::HashSet,
ffi::OsStr,
net::{IpAddr, Ipv4Addr},
path::PathBuf,
@@ -89,7 +88,7 @@ pub struct DefaultRpcServerArgs {
rpc_proof_permits: usize,
rpc_pending_block: PendingBlockKind,
rpc_forwarder: Option<Url>,
builder_disallow: Option<HashSet<Address>>,
builder_disallow: Option<AddressSet>,
rpc_state_cache: RpcStateCacheArgs,
gas_price_oracle: GasPriceOracleArgs,
rpc_send_raw_transaction_sync_timeout: Duration,
@@ -335,7 +334,7 @@ impl DefaultRpcServerArgs {
}
/// Set the default builder disallow addresses
pub fn with_builder_disallow(mut self, v: Option<HashSet<Address>>) -> Self {
pub fn with_builder_disallow(mut self, v: Option<AddressSet>) -> Self {
self.builder_disallow = v;
self
}
@@ -621,8 +620,8 @@ pub struct RpcServerArgs {
/// Path to file containing disallowed addresses, json-encoded list of strings. Block
/// validation API will reject blocks containing transactions from these addresses.
#[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::<HashSet<Address>>, default_value = Resettable::from(DefaultRpcServerArgs::get_global().builder_disallow.as_ref().map(|v| format!("{:?}", v).into())))]
pub builder_disallow: Option<HashSet<Address>>,
#[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::<AddressSet>, default_value = Resettable::from(DefaultRpcServerArgs::get_global().builder_disallow.as_ref().map(|v| format!("{:?}", v).into())))]
pub builder_disallow: Option<AddressSet>,
/// State cache configuration.
#[command(flatten)]

View File

@@ -293,7 +293,11 @@ mod tests {
use alloy_consensus::{Header, Receipt};
use alloy_eips::eip7685::Requests;
use alloy_genesis::Genesis;
use alloy_primitives::{bytes, map::HashMap, Address, LogData, B256};
use alloy_primitives::{
bytes,
map::{AddressMap, B256Map, HashMap},
Address, LogData, B256,
};
use op_revm::OpSpecId;
use reth_chainspec::ChainSpec;
use reth_evm::execute::ProviderError;
@@ -588,12 +592,12 @@ mod tests {
);
// Create a BundleStateInit object and insert initial data
let mut state_init: BundleStateInit = HashMap::default();
let mut state_init: BundleStateInit = AddressMap::default();
state_init
.insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default()));
.insert(Address::new([2; 20]), (None, Some(Account::default()), B256Map::default()));
// Create a HashMap for account reverts and insert initial data
let mut revert_inner: HashMap<Address, AccountRevertInit> = HashMap::default();
// Create an AddressMap for account reverts and insert initial data
let mut revert_inner: AddressMap<AccountRevertInit> = AddressMap::default();
revert_inner.insert(Address::new([2; 20]), (None, vec![]));
// Create a RevertsInit object and insert the revert_inner data

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use alloy_primitives::{map::HashSet, Address};
use alloy_primitives::{map::AddressSet, Address};
use reth_transaction_pool::{PoolTransaction, ValidPoolTransaction};
/// Iterator that returns transactions for the block building process in the order they should be
@@ -58,7 +58,7 @@ where
T: PoolTransaction,
I: Iterator<Item = Arc<ValidPoolTransaction<T>>>,
{
invalid: HashSet<Address>,
invalid: AddressSet,
best: I,
}
@@ -103,7 +103,7 @@ mod tests {
BestPayloadTransactions, PayloadTransactions, PayloadTransactionsChain,
PayloadTransactionsFixed,
};
use alloy_primitives::{map::HashSet, Address};
use alloy_primitives::{map::AddressSet, Address};
use reth_transaction_pool::{
pool::{BestTransactionsWithPrioritizedSenders, PendingPool},
test_utils::{MockOrdering, MockTransaction, MockTransactionFactory},
@@ -169,10 +169,10 @@ mod tests {
BestPayloadTransactions::new(priority_pool.best()),
Some(100),
BestPayloadTransactions::new(BestTransactionsWithPrioritizedSenders::new(
HashSet::from([address_a]),
AddressSet::from_iter([address_a]),
200,
BestTransactionsWithPrioritizedSenders::new(
HashSet::from([address_b]),
AddressSet::from_iter([address_b]),
200,
pool.best(),
),

View File

@@ -2,7 +2,7 @@
use crate::RessProtocolProvider;
use alloy_consensus::Header;
use alloy_primitives::{map::B256HashMap, Bytes, B256};
use alloy_primitives::{map::B256Map, Bytes, B256};
use reth_ethereum_primitives::BlockBody;
use reth_storage_errors::provider::ProviderResult;
use std::{
@@ -35,10 +35,10 @@ impl RessProtocolProvider for NoopRessProtocolProvider {
/// Mock implementation of [`RessProtocolProvider`].
#[derive(Clone, Default, Debug)]
pub struct MockRessProtocolProvider {
headers: Arc<Mutex<B256HashMap<Header>>>,
block_bodies: Arc<Mutex<B256HashMap<BlockBody>>>,
bytecodes: Arc<Mutex<B256HashMap<Bytes>>>,
witnesses: Arc<Mutex<B256HashMap<Vec<Bytes>>>>,
headers: Arc<Mutex<B256Map<Header>>>,
block_bodies: Arc<Mutex<B256Map<BlockBody>>>,
bytecodes: Arc<Mutex<B256Map<Bytes>>>,
witnesses: Arc<Mutex<B256Map<Vec<Bytes>>>>,
witness_delay: Option<Duration>,
}

View File

@@ -1,6 +1,6 @@
use alloy_consensus::BlockHeader as _;
use alloy_primitives::{
map::{B256HashSet, B256Map},
map::{B256Map, B256Set},
BlockNumber, B256,
};
use futures::StreamExt;
@@ -22,7 +22,7 @@ pub struct PendingState<N: NodePrimitives>(Arc<RwLock<PendingStateInner<N>>>);
struct PendingStateInner<N: NodePrimitives> {
blocks_by_hash: B256Map<ExecutedBlock<N>>,
invalid_blocks_by_hash: B256Map<Arc<RecoveredBlock<N::Block>>>,
block_hashes_by_number: BTreeMap<BlockNumber, B256HashSet>,
block_hashes_by_number: BTreeMap<BlockNumber, B256Set>,
}
impl<N: NodePrimitives> PendingState<N> {

View File

@@ -1,6 +1,6 @@
//! Database adapters for payload building.
use alloy_primitives::{
map::{Entry, HashMap},
map::{AddressMap, B256Map, Entry, HashMap, U256Map},
Address, B256, U256,
};
use core::cell::RefCell;
@@ -31,9 +31,9 @@ use revm::{bytecode::Bytecode, state::AccountInfo, Database, DatabaseRef};
#[derive(Debug, Clone, Default)]
pub struct CachedReads {
/// Block state account with storage.
pub accounts: HashMap<Address, CachedAccount>,
pub accounts: AddressMap<CachedAccount>,
/// Created contracts.
pub contracts: HashMap<B256, Bytecode>,
pub contracts: B256Map<Bytecode>,
/// Block hash mapped to the block number.
pub block_hashes: HashMap<u64, B256>,
}
@@ -52,12 +52,7 @@ impl CachedReads {
}
/// Inserts an account info into the cache.
pub fn insert_account(
&mut self,
address: Address,
info: AccountInfo,
storage: HashMap<U256, U256>,
) {
pub fn insert_account(&mut self, address: Address, info: AccountInfo, storage: U256Map<U256>) {
self.accounts.insert(address, CachedAccount { info: Some(info), storage });
}
@@ -201,12 +196,12 @@ pub struct CachedAccount {
/// Account state.
pub info: Option<AccountInfo>,
/// Account's storage.
pub storage: HashMap<U256, U256>,
pub storage: U256Map<U256>,
}
impl CachedAccount {
fn new(info: Option<AccountInfo>) -> Self {
Self { info, storage: HashMap::default() }
Self { info, storage: U256Map::default() }
}
}

View File

@@ -1,6 +1,8 @@
use alloc::vec::Vec;
use alloy_primitives::{
keccak256, map::HashMap, Address, BlockNumber, Bytes, StorageKey, B256, U256,
keccak256,
map::{AddressMap, B256Map, HashMap},
Address, BlockNumber, Bytes, StorageKey, B256, U256,
};
use reth_primitives_traits::{Account, Bytecode};
use reth_storage_api::{
@@ -16,8 +18,8 @@ use reth_trie::{
/// Mock state for testing
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub struct StateProviderTest {
accounts: HashMap<Address, (HashMap<StorageKey, U256>, Account)>,
contracts: HashMap<B256, Bytecode>,
accounts: AddressMap<(HashMap<StorageKey, U256>, Account)>,
contracts: B256Map<Bytecode>,
block_hash: HashMap<u64, B256>,
}

View File

@@ -1,7 +1,6 @@
use alloy_eips::BlockId;
use alloy_primitives::{Address, U256};
use alloy_primitives::{map::AddressMap, U256};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use std::collections::HashMap;
// Required for the subscription attribute below
use reth_chain_state as _;
@@ -15,7 +14,7 @@ pub trait RethApi {
async fn reth_get_balance_changes_in_block(
&self,
block_id: BlockId,
) -> RpcResult<HashMap<Address, U256>>;
) -> RpcResult<AddressMap<U256>>;
/// Subscribe to json `ChainNotifications`
#[subscription(

View File

@@ -2,19 +2,18 @@
use alloy_dyn_abi::TypedData;
use alloy_eips::eip2718::Decodable2718;
use alloy_primitives::{eip191_hash_message, Address, Signature, B256};
use alloy_primitives::{eip191_hash_message, map::AddressMap, Address, Signature, B256};
use alloy_signer::SignerSync;
use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner};
use reth_rpc_convert::SignableTxRequest;
use reth_rpc_eth_api::helpers::{signer::Result, EthSigner};
use reth_rpc_eth_types::SignError;
use std::collections::HashMap;
/// Holds developer keys
#[derive(Debug, Clone)]
pub struct DevSigner {
addresses: Vec<Address>,
accounts: HashMap<Address, PrivateKeySigner>,
accounts: AddressMap<PrivateKeySigner>,
}
impl DevSigner {
@@ -30,7 +29,7 @@ impl DevSigner {
let address = sk.address();
let addresses = vec![address];
let accounts = HashMap::from([(address, sk)]);
let accounts = AddressMap::from_iter([(address, sk)]);
signers.push(Box::new(Self { addresses, accounts }) as Box<dyn EthSigner<T, TxReq>>);
}
signers
@@ -54,7 +53,7 @@ impl DevSigner {
let address = sk.address();
let addresses = vec![address];
let accounts = HashMap::from([(address, sk)]);
let accounts = AddressMap::from_iter([(address, sk)]);
signers.push(Box::new(Self { addresses, accounts }) as Box<dyn EthSigner<T, TxReq>>);
}
@@ -121,7 +120,7 @@ mod tests {
let signer: PrivateKeySigner =
"4646464646464646464646464646464646464646464646464646464646464646".parse().unwrap();
let address = signer.address();
let accounts = HashMap::from([(address, signer)]);
let accounts = AddressMap::from_iter([(address, signer)]);
let addresses = vec![address];
DevSigner { addresses, accounts }
}

View File

@@ -32,7 +32,10 @@ mod tests {
use crate::eth::helpers::types::EthRpcConverter;
use super::*;
use alloy_primitives::{Address, StorageKey, StorageValue, U256};
use alloy_primitives::{
map::{AddressMap, B256Map},
Address, StorageKey, StorageValue, U256,
};
use reth_chainspec::ChainSpec;
use reth_evm_ethereum::EthEvmConfig;
use reth_network_api::noop::NoopNetwork;
@@ -42,7 +45,6 @@ mod tests {
};
use reth_rpc_eth_api::{helpers::EthState, node::RpcNodeCoreAdapter};
use reth_transaction_pool::test_utils::{testing_pool, TestPool};
use std::collections::HashMap;
fn noop_eth_api() -> EthApi<
RpcNodeCoreAdapter<NoopProvider, TestPool, NoopNetwork, EthEvmConfig>,
@@ -56,7 +58,7 @@ mod tests {
}
fn mock_eth_api(
accounts: HashMap<Address, ExtendedAccount>,
accounts: AddressMap<ExtendedAccount>,
) -> EthApi<
RpcNodeCoreAdapter<MockEthProvider, TestPool, NoopNetwork, EthEvmConfig>,
EthRpcConverter<ChainSpec>,
@@ -81,10 +83,12 @@ mod tests {
// === Mock ===
let storage_value = StorageValue::from(1337);
let storage_key = StorageKey::random();
let storage = HashMap::from([(storage_key, storage_value)]);
let storage: B256Map<_> = core::iter::once((storage_key, storage_value)).collect();
let accounts =
HashMap::from([(address, ExtendedAccount::new(0, U256::ZERO).extend_storage(storage))]);
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(0, U256::ZERO).extend_storage(storage),
)]);
let eth_api = mock_eth_api(accounts);
let storage_key: U256 = storage_key.into();

View File

@@ -137,7 +137,7 @@ mod tests {
use alloy_consensus::{
BlobTransactionSidecar, Block, Header, SidecarBuilder, SimpleCoder, Transaction,
};
use alloy_primitives::{Address, U256};
use alloy_primitives::{map::AddressMap, Address, U256};
use alloy_rpc_types_eth::request::TransactionRequest;
use reth_chainspec::{ChainSpec, ChainSpecBuilder};
use reth_evm_ethereum::EthEvmConfig;
@@ -149,10 +149,9 @@ mod tests {
use reth_rpc_eth_api::node::RpcNodeCoreAdapter;
use reth_transaction_pool::test_utils::{testing_pool, TestPool};
use revm_primitives::Bytes;
use std::collections::HashMap;
fn mock_eth_api(
accounts: HashMap<Address, ExtendedAccount>,
accounts: AddressMap<ExtendedAccount>,
) -> EthApi<
RpcNodeCoreAdapter<MockEthProvider, TestPool, NoopNetwork, EthEvmConfig>,
EthRpcConverter<ChainSpec>,
@@ -218,7 +217,7 @@ mod tests {
#[tokio::test]
async fn test_fill_transaction_fills_chain_id() {
let address = Address::random();
let accounts = HashMap::from([(
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), // 10 ETH
)]);
@@ -244,7 +243,7 @@ mod tests {
let address = Address::random();
let nonce = 42u64;
let accounts = HashMap::from([(
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(nonce, U256::from(1_000_000_000_000_000_000u64)), // 1 ETH
)]);
@@ -271,7 +270,7 @@ mod tests {
let provided_nonce = 100u64;
let provided_gas_limit = 50_000u64;
let accounts = HashMap::from([(
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(42, U256::from(10_000_000_000_000_000_000u64)),
)]);
@@ -300,7 +299,7 @@ mod tests {
let address = Address::random();
let balance = U256::from(100u128) * U256::from(1_000_000_000_000_000_000u128);
let accounts = HashMap::from([(address, ExtendedAccount::new(5, balance))]);
let accounts = AddressMap::from_iter([(address, ExtendedAccount::new(5, balance))]);
let eth_api = mock_eth_api(accounts);
@@ -320,7 +319,7 @@ mod tests {
#[tokio::test]
async fn test_fill_transaction_eip4844_blob_fee() {
let address = Address::random();
let accounts = HashMap::from([(
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)),
)]);
@@ -357,7 +356,7 @@ mod tests {
#[tokio::test]
async fn test_fill_transaction_eip4844_preserves_blob_fee() {
let address = Address::random();
let accounts = HashMap::from([(
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)),
)]);
@@ -395,7 +394,7 @@ mod tests {
#[tokio::test]
async fn test_fill_transaction_non_blob_tx_no_blob_fee() {
let address = Address::random();
let accounts = HashMap::from([(
let accounts = AddressMap::from_iter([(
address,
ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)),
)]);

View File

@@ -1,7 +1,7 @@
use std::{collections::HashMap, future::Future, sync::Arc};
use std::{future::Future, sync::Arc};
use alloy_eips::BlockId;
use alloy_primitives::{Address, U256};
use alloy_primitives::{map::AddressMap, U256};
use async_trait::async_trait;
use futures::{Stream, StreamExt};
use jsonrpsee::{core::RpcResult, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
@@ -58,15 +58,12 @@ where
}
/// Returns a map of addresses to changed account balanced for a particular block.
pub async fn balance_changes_in_block(
&self,
block_id: BlockId,
) -> EthResult<HashMap<Address, U256>> {
pub async fn balance_changes_in_block(&self, block_id: BlockId) -> EthResult<AddressMap<U256>> {
self.on_blocking_task(|this| async move { this.try_balance_changes_in_block(block_id) })
.await
}
fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult<HashMap<Address, U256>> {
fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult<AddressMap<U256>> {
let Some(block_number) = self.provider().block_number_for_id(block_id)? else {
return Err(EthApiError::HeaderNotFound(block_id))
};
@@ -74,7 +71,7 @@ where
let state = self.provider().state_by_block_id(block_id)?;
let accounts_before = self.provider().account_block_changeset(block_number)?;
let hash_map = accounts_before.iter().try_fold(
HashMap::default(),
AddressMap::default(),
|mut hash_map, account_before| -> RethResult<_> {
let current_balance = state.account_balance(&account_before.address)?;
let prev_balance = account_before.info.map(|info| info.balance);
@@ -102,7 +99,7 @@ where
async fn reth_get_balance_changes_in_block(
&self,
block_id: BlockId,
) -> RpcResult<HashMap<Address, U256>> {
) -> RpcResult<AddressMap<U256>> {
Ok(Self::balance_changes_in_block(self, block_id).await?)
}

View File

@@ -2,6 +2,7 @@ use alloy_consensus::{
BlobTransactionValidationError, BlockHeader, EnvKzgSettings, Transaction, TxReceipt,
};
use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash};
use alloy_primitives::map::AddressSet;
use alloy_rpc_types_beacon::relay::{
BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2,
BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4,
@@ -40,7 +41,7 @@ use reth_tasks::TaskSpawner;
use revm_primitives::{Address, B256, U256};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::{collections::HashSet, sync::Arc};
use std::sync::Arc;
use tokio::sync::{oneshot, RwLock};
use tracing::warn;
@@ -568,7 +569,7 @@ pub struct ValidationApiInner<Provider, E: ConfigureEvm, T: PayloadTypes> {
/// Block executor factory.
evm_config: E,
/// Set of disallowed addresses
disallow: HashSet<Address>,
disallow: AddressSet,
/// The maximum block distance - parent to latest - allowed for validation
validation_window: u64,
/// Cached state reads to avoid redundant disk I/O across multiple validation attempts
@@ -586,7 +587,7 @@ pub struct ValidationApiInner<Provider, E: ConfigureEvm, T: PayloadTypes> {
///
/// This function sorts addresses to ensure deterministic output regardless of
/// insertion order, then computes a SHA256 hash of the concatenated addresses.
fn hash_disallow_list(disallow: &HashSet<Address>) -> String {
fn hash_disallow_list(disallow: &AddressSet) -> String {
let mut sorted: Vec<_> = disallow.iter().collect();
sorted.sort(); // sort for deterministic hashing
@@ -608,7 +609,7 @@ impl<Provider, E: ConfigureEvm, T: PayloadTypes> fmt::Debug for ValidationApiInn
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct ValidationApiConfig {
/// Disallowed addresses.
pub disallow: HashSet<Address>,
pub disallow: AddressSet,
/// The maximum block distance - parent to latest - allowed for validation
pub validation_window: u64,
}
@@ -700,13 +701,12 @@ pub(crate) struct ValidationMetrics {
#[cfg(test)]
mod tests {
use super::hash_disallow_list;
use super::{hash_disallow_list, AddressSet};
use revm_primitives::Address;
use std::collections::HashSet;
#[test]
fn test_hash_disallow_list_deterministic() {
let mut addresses = HashSet::new();
let mut addresses = AddressSet::default();
addresses.insert(Address::from([1u8; 20]));
addresses.insert(Address::from([2u8; 20]));
@@ -718,10 +718,10 @@ mod tests {
#[test]
fn test_hash_disallow_list_different_content() {
let mut addresses1 = HashSet::new();
let mut addresses1 = AddressSet::default();
addresses1.insert(Address::from([1u8; 20]));
let mut addresses2 = HashSet::new();
let mut addresses2 = AddressSet::default();
addresses2.insert(Address::from([2u8; 20]));
let hash1 = hash_disallow_list(&addresses1);
@@ -732,11 +732,11 @@ mod tests {
#[test]
fn test_hash_disallow_list_order_independent() {
let mut addresses1 = HashSet::new();
let mut addresses1 = AddressSet::default();
addresses1.insert(Address::from([1u8; 20]));
addresses1.insert(Address::from([2u8; 20]));
let mut addresses2 = HashSet::new();
let mut addresses2 = AddressSet::default();
addresses2.insert(Address::from([2u8; 20])); // Different insertion order
addresses2.insert(Address::from([1u8; 20]));
@@ -751,7 +751,7 @@ mod tests {
fn test_disallow_list_hash_rbuilder_parity() {
let json = r#"["0x05E0b5B40B7b66098C2161A5EE11C5740A3A7C45","0x01e2919679362dFBC9ee1644Ba9C6da6D6245BB1","0x03893a7c7463AE47D46bc7f091665f1893656003","0x04DBA1194ee10112fE6C3207C0687DEf0e78baCf"]"#;
let blocklist: Vec<Address> = serde_json::from_str(json).unwrap();
let blocklist: HashSet<Address> = blocklist.into_iter().collect();
let blocklist: AddressSet = blocklist.into_iter().collect();
let expected_hash = "ee14e9d115e182f61871a5a385ab2f32ecf434f3b17bdbacc71044810d89e608";
let hash = hash_disallow_list(&blocklist);
assert_eq!(expected_hash, hash);

View File

@@ -476,7 +476,7 @@ mod tests {
},
};
use alloy_consensus::{BlockHeader, Header};
use alloy_primitives::{BlockNumber, TxNumber, B256};
use alloy_primitives::{map::B256Map, BlockNumber, TxNumber, B256};
use futures_util::Stream;
use reth_db::{static_file::HeaderWithHashMask, tables};
use reth_db_api::{
@@ -503,7 +503,7 @@ mod tests {
self, random_block_range, random_signed_tx, BlockRangeParams,
};
use std::{
collections::{HashMap, VecDeque},
collections::VecDeque,
ops::RangeInclusive,
pin::Pin,
task::{Context, Poll},
@@ -519,14 +519,14 @@ mod tests {
/// A helper struct for running the [`BodyStage`].
pub(crate) struct BodyTestRunner {
responses: HashMap<B256, BlockBody>,
responses: B256Map<BlockBody>,
db: TestStageDB,
batch_size: u64,
}
impl Default for BodyTestRunner {
fn default() -> Self {
Self { responses: HashMap::default(), db: TestStageDB::default(), batch_size: 1000 }
Self { responses: B256Map::default(), db: TestStageDB::default(), batch_size: 1000 }
}
}
@@ -535,7 +535,7 @@ mod tests {
self.batch_size = batch_size;
}
pub(crate) fn set_responses(&mut self, responses: HashMap<B256, BlockBody>) {
pub(crate) fn set_responses(&mut self, responses: B256Map<BlockBody>) {
self.responses = responses;
}
}
@@ -736,11 +736,11 @@ mod tests {
}
}
/// A [`BodyDownloader`] that is backed by an internal [`HashMap`] for testing.
/// A [`BodyDownloader`] that is backed by an internal [`B256Map`] for testing.
#[derive(Debug)]
pub(crate) struct TestBodyDownloader {
provider_factory: ProviderFactory<MockNodeTypesWithDB>,
responses: HashMap<B256, BlockBody>,
responses: B256Map<BlockBody>,
headers: VecDeque<SealedHeader>,
batch_size: u64,
}
@@ -748,7 +748,7 @@ mod tests {
impl TestBodyDownloader {
pub(crate) fn new(
provider_factory: ProviderFactory<MockNodeTypesWithDB>,
responses: HashMap<B256, BlockBody>,
responses: B256Map<BlockBody>,
batch_size: u64,
) -> Self {
Self { provider_factory, responses, headers: VecDeque::default(), batch_size }

View File

@@ -1,5 +1,5 @@
//! Utils for `stages`.
use alloy_primitives::{Address, BlockNumber, TxNumber, B256};
use alloy_primitives::{map::AddressMap, Address, BlockNumber, TxNumber, B256};
use reth_config::config::EtlConfig;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
@@ -125,7 +125,7 @@ where
Provider: DBProvider + ChangeSetReader + StaticFileProviderFactory,
{
let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone());
let mut cache: HashMap<Address, Vec<u64>> = HashMap::default();
let mut cache: AddressMap<Vec<u64>> = AddressMap::default();
let mut insert_fn = |address: Address, indices: Vec<u64>| {
let last = indices.last().expect("indices is non-empty");

View File

@@ -2,7 +2,11 @@
use alloy_consensus::BlockHeader;
use alloy_genesis::GenesisAccount;
use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256};
use alloy_primitives::{
keccak256,
map::{AddressMap, B256Map, HashMap},
Address, B256, U256,
};
use reth_chainspec::EthChainSpec;
use reth_codecs::Compact;
use reth_config::config::EtlConfig;
@@ -308,10 +312,11 @@ where
{
let capacity = alloc.size_hint().1.unwrap_or(0);
let mut state_init: BundleStateInit =
HashMap::with_capacity_and_hasher(capacity, Default::default());
let mut reverts_init = HashMap::with_capacity_and_hasher(capacity, Default::default());
let mut contracts: HashMap<B256, Bytecode> =
HashMap::with_capacity_and_hasher(capacity, Default::default());
AddressMap::with_capacity_and_hasher(capacity, Default::default());
let mut reverts_init: AddressMap<_> =
AddressMap::with_capacity_and_hasher(capacity, Default::default());
let mut contracts: B256Map<Bytecode> =
B256Map::with_capacity_and_hasher(capacity, Default::default());
for (address, account) in alloc {
let bytecode_hash = if let Some(code) = &account.code {
@@ -340,7 +345,7 @@ where
let value = U256::from_be_bytes(value.0);
(*key, (U256::ZERO, value))
})
.collect::<HashMap<_, _>>()
.collect::<B256Map<_>>()
})
.unwrap_or_default();

View File

@@ -27,7 +27,7 @@ use alloy_consensus::{
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{
keccak256,
map::{hash_map, HashMap, HashSet},
map::{hash_map, AddressSet, B256Map, HashMap},
Address, BlockHash, BlockNumber, TxHash, TxNumber, B256,
};
use itertools::Itertools;
@@ -2238,7 +2238,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
PruneMode::Distance(self.minimum_pruning_distance).should_prune(first_block, tip);
// Prepare set of addresses which logs should not be pruned.
let mut allowed_addresses: HashSet<Address, _> = HashSet::new();
let mut allowed_addresses: AddressSet = AddressSet::default();
for (_, addresses) in contract_log_pruner.range(..first_block) {
allowed_addresses.extend(addresses.iter().copied());
}
@@ -2866,7 +2866,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HashingWriter for DatabaseProvi
fn unwind_storage_hashing(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>> {
) -> ProviderResult<B256Map<BTreeSet<B256>>> {
// Aggregate all block changesets and make list of accounts that have been changed.
let mut hashed_storages = changesets
.into_iter()
@@ -2877,8 +2877,8 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HashingWriter for DatabaseProvi
hashed_storages.sort_by_key(|(ha, hk, _)| (*ha, *hk));
// Apply values to HashedState, and remove the account if it's None.
let mut hashed_storage_keys: HashMap<B256, BTreeSet<B256>> =
HashMap::with_capacity_and_hasher(hashed_storages.len(), Default::default());
let mut hashed_storage_keys: B256Map<BTreeSet<B256>> =
B256Map::with_capacity_and_hasher(hashed_storages.len(), Default::default());
let mut hashed_storage = self.tx.cursor_dup_write::<tables::HashedStorages>()?;
for (hashed_address, key, value) in hashed_storages.into_iter().rev() {
hashed_storage_keys.entry(hashed_address).or_default().insert(key);
@@ -2901,7 +2901,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HashingWriter for DatabaseProvi
fn unwind_storage_hashing_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>> {
) -> ProviderResult<B256Map<BTreeSet<B256>>> {
let changesets = self.storage_changesets_range(range)?;
self.unwind_storage_hashing(changesets.into_iter())
}
@@ -2909,7 +2909,7 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypes> HashingWriter for DatabaseProvi
fn insert_storage_for_hashing(
&self,
storages: impl IntoIterator<Item = (Address, impl IntoIterator<Item = StorageEntry>)>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>> {
) -> ProviderResult<B256Map<BTreeSet<B256>>> {
// hash values
let hashed_storages =
storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| {

View File

@@ -1,7 +1,10 @@
use super::metrics::{RocksDBMetrics, RocksDBOperation, ROCKSDB_TABLES};
use crate::providers::{compute_history_rank, needs_prev_shard_check, HistoryInfo};
use alloy_consensus::transaction::TxHashRef;
use alloy_primitives::{Address, BlockNumber, TxNumber, B256};
use alloy_primitives::{
map::{AddressMap, HashMap},
Address, BlockNumber, TxNumber, B256,
};
use itertools::Itertools;
use metrics::Label;
use parking_lot::Mutex;
@@ -29,7 +32,7 @@ use rocksdb::{
DB,
};
use std::{
collections::{BTreeMap, HashMap},
collections::BTreeMap,
fmt,
path::{Path, PathBuf},
sync::Arc,
@@ -1089,8 +1092,8 @@ impl RocksDBProvider {
&self,
last_indices: &[(Address, BlockNumber)],
) -> ProviderResult<WriteBatchWithTransaction<true>> {
let mut address_min_block: HashMap<Address, BlockNumber> =
HashMap::with_capacity_and_hasher(last_indices.len(), Default::default());
let mut address_min_block: AddressMap<BlockNumber> =
AddressMap::with_capacity_and_hasher(last_indices.len(), Default::default());
for &(address, block_number) in last_indices {
address_min_block
.entry(address)

View File

@@ -12,8 +12,9 @@ use alloy_consensus::{
};
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
use alloy_primitives::{
keccak256, map::HashMap, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue,
TxHash, TxNumber, B256, U256,
keccak256,
map::{AddressMap, B256Map, HashMap},
Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256,
};
use parking_lot::Mutex;
use reth_chain_state::{CanonStateNotifications, CanonStateSubscriptions};
@@ -54,13 +55,13 @@ use tokio::sync::broadcast;
pub struct MockEthProvider<T: NodePrimitives = EthPrimitives, ChainSpec = reth_chainspec::ChainSpec>
{
///local block store
pub blocks: Arc<Mutex<HashMap<B256, T::Block>>>,
pub blocks: Arc<Mutex<B256Map<T::Block>>>,
/// Local header store
pub headers: Arc<Mutex<HashMap<B256, <T::Block as Block>::Header>>>,
pub headers: Arc<Mutex<B256Map<<T::Block as Block>::Header>>>,
/// Local receipt store indexed by block number
pub receipts: Arc<Mutex<HashMap<BlockNumber, Vec<T::Receipt>>>>,
/// Local account store
pub accounts: Arc<Mutex<HashMap<Address, ExtendedAccount>>>,
pub accounts: Arc<Mutex<AddressMap<ExtendedAccount>>>,
/// Local chain spec
pub chain_spec: Arc<ChainSpec>,
/// Local state roots

View File

@@ -1,5 +1,5 @@
use alloc::collections::{BTreeMap, BTreeSet};
use alloy_primitives::{map::HashMap, Address, BlockNumber, B256};
use alloy_primitives::{map::B256Map, Address, BlockNumber, B256};
use auto_impl::auto_impl;
use core::ops::RangeBounds;
use reth_db_api::models::BlockNumberAddress;
@@ -48,7 +48,7 @@ pub trait HashingWriter: Send {
fn unwind_storage_hashing(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>>;
) -> ProviderResult<B256Map<BTreeSet<B256>>>;
/// Unwind and clear storage hashing in a given block range.
///
@@ -58,7 +58,7 @@ pub trait HashingWriter: Send {
fn unwind_storage_hashing_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>>;
) -> ProviderResult<B256Map<BTreeSet<B256>>>;
/// Iterates over storages and inserts them to hashing table.
///
@@ -68,5 +68,5 @@ pub trait HashingWriter: Send {
fn insert_storage_for_hashing(
&self,
storages: impl IntoIterator<Item = (Address, impl IntoIterator<Item = StorageEntry>)>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>>;
) -> ProviderResult<B256Map<BTreeSet<B256>>>;
}

View File

@@ -1,6 +1,6 @@
#![allow(missing_docs)]
use alloy_consensus::Transaction;
use alloy_primitives::{Address, B256, U256};
use alloy_primitives::{map::AddressMap, Address, B256, U256};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use rand::prelude::SliceRandom;
@@ -12,7 +12,7 @@ use reth_transaction_pool::{
BlockInfo, CanonicalStateUpdate, PoolConfig, PoolTransaction, PoolUpdateKind, SubPoolLimit,
TransactionOrigin, TransactionPool, TransactionPoolExt,
};
use std::{collections::HashMap, time::Duration};
use std::time::Duration;
/// Generates a set of transactions for multiple senders
fn generate_transactions(num_senders: usize, txs_per_sender: usize) -> Vec<MockTransaction> {
let mut runner = TestRunner::deterministic();
@@ -47,8 +47,8 @@ fn generate_transactions(num_senders: usize, txs_per_sender: usize) -> Vec<MockT
}
/// Fill the pool with transactions
async fn fill_pool(pool: &TestPoolBuilder, txs: Vec<MockTransaction>) -> HashMap<Address, u64> {
let mut sender_nonces = HashMap::new();
async fn fill_pool(pool: &TestPoolBuilder, txs: Vec<MockTransaction>) -> AddressMap<u64> {
let mut sender_nonces = AddressMap::default();
// Add transactions one by one
for tx in txs {

View File

@@ -7,10 +7,10 @@ use alloy_eips::{
eip7840::BlobParams,
merge::EPOCH_SLOTS,
};
use alloy_primitives::{TxHash, B256};
use alloy_primitives::{map::B256Set, TxHash, B256};
use parking_lot::{Mutex, RwLock};
use schnellru::{ByLength, LruMap};
use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc};
use std::{fmt, fs, io, path::PathBuf, sync::Arc};
use tracing::{debug, trace};
/// How many [`BlobTransactionSidecarVariant`] to cache in memory.
@@ -310,7 +310,7 @@ struct DiskFileBlobStoreInner {
blob_cache: Mutex<LruMap<TxHash, Arc<BlobTransactionSidecarVariant>, ByLength>>,
size_tracker: BlobStoreSize,
file_lock: RwLock<()>,
txs_to_delete: RwLock<HashSet<B256>>,
txs_to_delete: RwLock<B256Set>,
/// Tracks of known versioned hashes and a transaction they exist in
///
/// Note: It is possible that one blob can appear in multiple transactions but this only tracks

View File

@@ -3,9 +3,9 @@ use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::B256;
use alloy_primitives::{map::B256Map, B256};
use parking_lot::RwLock;
use std::{collections::HashMap, sync::Arc};
use std::sync::Arc;
/// An in-memory blob store.
#[derive(Clone, Debug, Default, PartialEq)]
@@ -50,7 +50,7 @@ impl InMemoryBlobStore {
#[derive(Debug, Default)]
struct InMemoryBlobStoreInner {
/// Storage for all blob data.
store: RwLock<HashMap<B256, Arc<BlobTransactionSidecarVariant>>>,
store: RwLock<B256Map<Arc<BlobTransactionSidecarVariant>>>,
size_tracker: BlobStoreSize,
}
@@ -194,7 +194,7 @@ impl BlobStore for InMemoryBlobStore {
/// Removes the given blob from the store and returns the size of the blob that was removed.
#[inline]
fn remove_size(store: &mut HashMap<B256, Arc<BlobTransactionSidecarVariant>>, tx: &B256) -> usize {
fn remove_size(store: &mut B256Map<Arc<BlobTransactionSidecarVariant>>, tx: &B256) -> usize {
store.remove(tx).map(|rem| rem.size()).unwrap_or_default()
}
@@ -203,7 +203,7 @@ fn remove_size(store: &mut HashMap<B256, Arc<BlobTransactionSidecarVariant>>, tx
/// We don't need to handle the size updates for replacements because transactions are unique.
#[inline]
fn insert_size(
store: &mut HashMap<B256, Arc<BlobTransactionSidecarVariant>>,
store: &mut B256Map<Arc<BlobTransactionSidecarVariant>>,
tx: B256,
blob: BlobTransactionSidecarVariant,
) -> usize {

View File

@@ -5,8 +5,8 @@ use crate::{
};
use alloy_consensus::constants::EIP4844_TX_TYPE_ID;
use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE};
use alloy_primitives::Address;
use std::{collections::HashSet, ops::Mul, time::Duration};
use alloy_primitives::{map::AddressSet, Address};
use std::{ops::Mul, time::Duration};
/// Guarantees max transactions for one sender, compatible with geth/erigon
pub const TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16;
@@ -225,7 +225,7 @@ pub struct LocalTransactionConfig {
/// - no eviction exemptions
pub no_exemptions: bool,
/// Addresses that will be considered as local. Above exemptions apply.
pub local_addresses: HashSet<Address>,
pub local_addresses: AddressSet,
/// Flag indicating whether local transactions should be propagated.
pub propagate_local_transactions: bool,
}
@@ -234,7 +234,7 @@ impl Default for LocalTransactionConfig {
fn default() -> Self {
Self {
no_exemptions: false,
local_addresses: HashSet::default(),
local_addresses: AddressSet::default(),
propagate_local_transactions: true,
}
}
@@ -333,7 +333,7 @@ mod tests {
#[test]
fn test_contains_local_address() {
let address = Address::new([1; 20]);
let mut local_addresses = HashSet::default();
let mut local_addresses = AddressSet::default();
local_addresses.insert(address);
let config = LocalTransactionConfig { local_addresses, ..Default::default() };
@@ -350,7 +350,7 @@ mod tests {
let address = Address::new([1; 20]);
let config = LocalTransactionConfig {
no_exemptions: true,
local_addresses: HashSet::default(),
local_addresses: AddressSet::default(),
..Default::default()
};
@@ -361,7 +361,7 @@ mod tests {
#[test]
fn test_is_local_without_no_exemptions() {
let address = Address::new([1; 20]);
let mut local_addresses = HashSet::default();
let mut local_addresses = AddressSet::default();
local_addresses.insert(address);
let config =

View File

@@ -1,5 +1,5 @@
//! Identifier types for transactions and senders.
use alloy_primitives::{map::HashMap, Address};
use alloy_primitives::{map::AddressMap, Address};
use rustc_hash::FxHashMap;
/// An internal mapping of addresses.
@@ -11,7 +11,7 @@ pub struct SenderIdentifiers {
/// The identifier to use next.
id: u64,
/// Assigned [`SenderId`] for an [`Address`].
address_to_id: HashMap<Address, SenderId>,
address_to_id: AddressMap<SenderId>,
/// Reverse mapping of [`SenderId`] to [`Address`].
sender_to_address: FxHashMap<SenderId, Address>,
}

View File

@@ -307,7 +307,7 @@ use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::{Address, TxHash, B256, U256};
use alloy_primitives::{map::AddressSet, Address, TxHash, B256, U256};
use aquamarine as _;
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_eth_wire_types::HandleMempoolData;
@@ -316,7 +316,7 @@ use reth_evm_ethereum::EthEvmConfig;
use reth_execution_types::ChangedAccount;
use reth_primitives_traits::{HeaderTy, Recovered};
use reth_storage_api::{BlockReaderIdExt, StateProviderFactory};
use std::{collections::HashSet, sync::Arc};
use std::sync::Arc;
use tokio::sync::mpsc::Receiver;
use tracing::{instrument, trace};
@@ -752,7 +752,7 @@ where
self.pool.get_pending_transactions_by_origin(origin)
}
fn unique_senders(&self) -> HashSet<Address> {
fn unique_senders(&self) -> AddressSet {
self.pool.unique_senders()
}

View File

@@ -10,7 +10,10 @@ use crate::{
};
use alloy_consensus::{transaction::TxHashRef, BlockHeader, Typed2718};
use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718};
use alloy_primitives::{Address, BlockHash, BlockNumber, Bytes};
use alloy_primitives::{
map::{AddressSet, HashSet},
Address, BlockHash, BlockNumber, Bytes,
};
use alloy_rlp::Encodable;
use futures_util::{
future::{BoxFuture, Fuse, FusedFuture},
@@ -28,7 +31,6 @@ use reth_tasks::TaskSpawner;
use serde::{Deserialize, Serialize};
use std::{
borrow::Borrow,
collections::HashSet,
hash::{Hash, Hasher},
path::{Path, PathBuf},
sync::Arc,
@@ -670,7 +672,7 @@ fn load_accounts<Client, I>(
client: Client,
at: BlockHash,
addresses: I,
) -> Result<LoadedAccounts, Box<(HashSet<Address>, ProviderError)>>
) -> Result<LoadedAccounts, Box<(AddressSet, ProviderError)>>
where
I: IntoIterator<Item = Address>,
Client: StateProviderFactory,

View File

@@ -19,10 +19,10 @@ use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::{Address, TxHash, B256, U256};
use alloy_primitives::{map::AddressSet, Address, TxHash, B256, U256};
use reth_eth_wire_types::HandleMempoolData;
use reth_primitives_traits::Recovered;
use std::{collections::HashSet, marker::PhantomData, sync::Arc};
use std::{marker::PhantomData, sync::Arc};
use tokio::sync::{mpsc, mpsc::Receiver};
/// A [`TransactionPool`] implementation that does nothing.
@@ -312,7 +312,7 @@ impl<T: EthPoolTransaction> TransactionPool for NoopTransactionPool<T> {
vec![]
}
fn unique_senders(&self) -> HashSet<Address> {
fn unique_senders(&self) -> AddressSet {
Default::default()
}

View File

@@ -6,7 +6,7 @@ use crate::{
};
use alloy_consensus::Transaction;
use alloy_eips::Typed2718;
use alloy_primitives::Address;
use alloy_primitives::map::AddressSet;
use core::fmt;
use reth_primitives_traits::transaction::error::InvalidTransactionError;
use std::{
@@ -364,7 +364,7 @@ pub struct BestTransactionsWithPrioritizedSenders<I: Iterator> {
/// Inner iterator
inner: I,
/// A set of senders which transactions should be prioritized
prioritized_senders: HashSet<Address>,
prioritized_senders: AddressSet,
/// Maximum total gas limit of prioritized transactions
max_prioritized_gas: u64,
/// Buffer with transactions that are not being prioritized. Those will be the first to be
@@ -377,7 +377,7 @@ pub struct BestTransactionsWithPrioritizedSenders<I: Iterator> {
impl<I: Iterator> BestTransactionsWithPrioritizedSenders<I> {
/// Constructs a new [`BestTransactionsWithPrioritizedSenders`].
pub fn new(prioritized_senders: HashSet<Address>, max_prioritized_gas: u64, inner: I) -> Self {
pub fn new(prioritized_senders: AddressSet, max_prioritized_gas: u64, inner: I) -> Self {
Self {
inner,
prioritized_senders,
@@ -857,7 +857,7 @@ mod tests {
pool.add_transaction(Arc::new(valid_prioritized_tx2), 0);
let prioritized_senders =
HashSet::from([prioritized_tx.sender(), prioritized_tx2.sender()]);
AddressSet::from_iter([prioritized_tx.sender(), prioritized_tx2.sender()]);
let best =
BestTransactionsWithPrioritizedSenders::new(prioritized_senders, 200, pool.best());

View File

@@ -88,7 +88,10 @@ use crate::{
TransactionValidator,
};
use alloy_primitives::{Address, TxHash, B256};
use alloy_primitives::{
map::{AddressSet, HashSet},
Address, TxHash, B256,
};
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use reth_eth_wire_types::HandleMempoolData;
use reth_execution_types::ChangedAccount;
@@ -97,7 +100,6 @@ use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Typed2718};
use reth_primitives_traits::Recovered;
use rustc_hash::FxHashMap;
use std::{
collections::HashSet,
fmt,
sync::{
atomic::{AtomicBool, Ordering},
@@ -218,7 +220,7 @@ where
}
/// Returns all senders in the pool
pub fn unique_senders(&self) -> HashSet<Address> {
pub fn unique_senders(&self) -> AddressSet {
self.get_pool_data().unique_senders()
}

View File

@@ -30,7 +30,9 @@ use alloy_eips::{
eip4844::BLOB_TX_MIN_BLOB_GASPRICE,
Typed2718,
};
use alloy_primitives::{Address, TxHash, B256};
#[cfg(test)]
use alloy_primitives::Address;
use alloy_primitives::{map::AddressSet, TxHash, B256};
use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use std::{
@@ -184,7 +186,7 @@ impl<T: TransactionOrdering> TxPool<T> {
}
/// Returns all senders in the pool
pub(crate) fn unique_senders(&self) -> HashSet<Address> {
pub(crate) fn unique_senders(&self) -> AddressSet {
self.all_transactions.txs.values().map(|tx| tx.transaction.sender()).collect()
}

View File

@@ -8,12 +8,9 @@ use crate::{
test_utils::{MockOrdering, MockTransactionDistribution, MockTransactionFactory},
TransactionOrdering,
};
use alloy_primitives::{Address, U256};
use alloy_primitives::{map::AddressMap, Address, U256};
use rand::Rng;
use std::{
collections::HashMap,
ops::{Deref, DerefMut},
};
use std::ops::{Deref, DerefMut};
/// A wrapped `TxPool` with additional helpers for testing
pub(crate) struct MockPool<T: TransactionOrdering = MockOrdering> {
@@ -64,19 +61,19 @@ pub(crate) struct MockTransactionSimulator<R: Rng> {
/// Generator for transactions
tx_generator: MockTransactionDistribution,
/// represents the on chain balance of a sender.
balances: HashMap<Address, U256>,
balances: AddressMap<U256>,
/// represents the on chain nonce of a sender.
nonces: HashMap<Address, u64>,
nonces: AddressMap<u64>,
/// A set of addresses to use as senders.
senders: Vec<Address>,
/// What scenarios to execute.
scenarios: Vec<ScenarioType>,
/// All previous scenarios executed by a sender.
executed: HashMap<Address, ExecutedScenarios>,
executed: AddressMap<ExecutedScenarios>,
/// "Validates" generated transactions.
validator: MockTransactionFactory,
/// Represents the gaps in nonces for each sender.
nonce_gaps: HashMap<Address, u64>,
nonce_gaps: AddressMap<u64>,
/// The rng instance used to select senders and scenarios.
rng: R,
}

View File

@@ -70,7 +70,7 @@ use alloy_eips::{
eip7594::BlobTransactionSidecarVariant,
eip7702::SignedAuthorization,
};
use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256};
use alloy_primitives::{map::AddressSet, Address, Bytes, TxHash, TxKind, B256, U256};
use futures_util::{ready, Stream};
use reth_eth_wire_types::HandleMempoolData;
use reth_ethereum_primitives::{PooledTransactionVariant, TransactionSigned};
@@ -78,7 +78,7 @@ use reth_execution_types::ChangedAccount;
use reth_primitives_traits::{Block, InMemorySize, Recovered, SealedBlock, SignedTransaction};
use serde::{Deserialize, Serialize};
use std::{
collections::{HashMap, HashSet},
collections::HashMap,
fmt,
fmt::Debug,
future::Future,
@@ -622,7 +622,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync {
}
/// Returns a set of all senders of transactions in the pool
fn unique_senders(&self) -> HashSet<Address>;
fn unique_senders(&self) -> AddressSet;
/// Returns the [`BlobTransactionSidecarVariant`] for the given transaction hash if it exists in
/// the blob store.

View File

@@ -20,12 +20,7 @@ use reth_trie::{
HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted,
};
use reth_trie_common::updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted};
use std::{
collections::{BTreeMap, HashMap},
ops::RangeInclusive,
sync::Arc,
time::Instant,
};
use std::{collections::BTreeMap, ops::RangeInclusive, sync::Arc, time::Instant};
use tracing::debug;
#[cfg(feature = "metrics")]
@@ -535,7 +530,7 @@ impl ChangesetCache {
#[derive(Debug)]
struct ChangesetCacheInner {
/// Cache entries: block hash -> (block number, changesets)
entries: HashMap<B256, (u64, Arc<TrieUpdatesSorted>)>,
entries: B256Map<(u64, Arc<TrieUpdatesSorted>)>,
/// Block number to hashes mapping for eviction
block_numbers: BTreeMap<u64, Vec<B256>>,
@@ -579,7 +574,7 @@ impl ChangesetCacheInner {
/// via the `evict()` method to manage memory usage.
fn new() -> Self {
Self {
entries: HashMap::new(),
entries: B256Map::default(),
block_numbers: BTreeMap::new(),
#[cfg(feature = "metrics")]
metrics: Default::default(),
@@ -683,7 +678,7 @@ impl ChangesetCacheInner {
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::map::B256Map;
use alloy_primitives::map::{B256Map, HashMap};
// Helper function to create empty TrieUpdatesSorted for testing
fn create_test_changesets() -> Arc<TrieUpdatesSorted> {
@@ -760,7 +755,7 @@ mod tests {
let mut cache = ChangesetCacheInner::new();
// Insert blocks 100-165
let mut hashes = std::collections::HashMap::new();
let mut hashes = HashMap::new();
for i in 100..=165 {
let hash = B256::random();
cache.insert(hash, i, create_test_changesets());

View File

@@ -5,7 +5,7 @@ use crate::{
};
use alloc::{collections::VecDeque, vec::Vec};
use alloy_primitives::{
map::{B256Map, HashSet},
map::{B256Map, B256Set, HashSet},
Bytes, B256,
};
use alloy_rlp::{Decodable, Encodable};
@@ -1324,7 +1324,7 @@ struct StorageTrieModifications {
/// Access frequency and prune state per storage trie address.
state: B256Map<TrieModificationState>,
/// Tracks which tries were accessed in the current cycle (between prune calls).
accessed_this_cycle: HashSet<B256>,
accessed_this_cycle: B256Set,
}
#[allow(dead_code)]

View File

@@ -2248,7 +2248,7 @@ mod find_leaf_tests {
let blinded_hash = B256::repeat_byte(0xBB);
let leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]);
let mut nodes = alloy_primitives::map::HashMap::default();
let mut nodes = HashMap::default();
// Create path to the blinded node
nodes.insert(
Nibbles::default(),

View File

@@ -1,5 +1,5 @@
#![allow(missing_docs, unreachable_pub)]
use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256};
use alloy_primitives::{keccak256, map::AddressMap, Address, B256, U256};
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use reth_trie::{HashedPostState, HashedStorage, KeccakKeyHasher};
@@ -30,7 +30,7 @@ pub fn hash_post_state(c: &mut Criterion) {
}
}
fn from_bundle_state_seq(state: &HashMap<Address, BundleAccount>) -> HashedPostState {
fn from_bundle_state_seq(state: &AddressMap<BundleAccount>) -> HashedPostState {
let mut this = HashedPostState::default();
for (address, account) in state {
@@ -49,7 +49,7 @@ fn from_bundle_state_seq(state: &HashMap<Address, BundleAccount>) -> HashedPostS
this
}
fn generate_test_data(size: usize) -> HashMap<Address, BundleAccount> {
fn generate_test_data(size: usize) -> AddressMap<BundleAccount> {
let storage_size = 1_000;
let mut runner = TestRunner::deterministic();
@@ -78,7 +78,7 @@ fn generate_test_data(size: usize) -> HashMap<Address, BundleAccount> {
let bundle_state = bundle_builder.build();
bundle_state.state
bundle_state.state.into_iter().collect()
}
criterion_group!(post_state, hash_post_state);

View File

@@ -1,6 +1,6 @@
//! An ExEx example that installs a new RPC subscription endpoint that emits storage changes for a
//! requested address.
use alloy_primitives::{Address, U256};
use alloy_primitives::{map::AddressMap, Address, U256};
use futures::TryStreamExt;
use jsonrpsee::{
core::SubscriptionResult, proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage,
@@ -9,7 +9,6 @@ use reth_ethereum::{
exex::{ExExContext, ExExEvent, ExExNotification},
node::{api::FullNodeComponents, builder::NodeHandleFor, EthereumNode},
};
use std::collections::HashMap;
use tokio::sync::{mpsc, oneshot};
use tracing::{error, info};
@@ -97,8 +96,8 @@ async fn my_exex<Node: FullNodeComponents>(
mut ctx: ExExContext<Node>,
mut subscription_requests: mpsc::UnboundedReceiver<SubscriptionRequest>,
) -> eyre::Result<()> {
let mut subscriptions: HashMap<Address, Vec<mpsc::UnboundedSender<StorageDiff>>> =
HashMap::new();
let mut subscriptions: AddressMap<Vec<mpsc::UnboundedSender<StorageDiff>>> =
AddressMap::default();
loop {
tokio::select! {

View File

@@ -9,6 +9,7 @@ repository.workspace = true
exclude.workspace = true
[dependencies]
alloy-primitives.workspace = true
reth-ethereum = { workspace = true, features = ["node"] }
eyre.workspace = true

View File

@@ -7,9 +7,10 @@
//! 3. Get contract bytecode
//! 4. Iterate through all storage slots for the contract
use alloy_primitives::map::B256Map;
use reth_ethereum::{
chainspec::ChainSpecBuilder,
evm::revm::primitives::{Address, B256, U256},
evm::revm::primitives::{Address, U256},
node::EthereumNode,
primitives::{Account, Bytecode},
provider::{
@@ -23,7 +24,7 @@ use reth_ethereum::{
},
storage::{DBProvider, StateProvider},
};
use std::{collections::HashMap, str::FromStr};
use std::str::FromStr;
/// Represents the complete state of a contract including account info, bytecode, and storage
#[derive(Debug, Clone)]
@@ -35,7 +36,7 @@ pub struct ContractState {
/// Contract bytecode (None if not a contract or doesn't exist)
pub bytecode: Option<Bytecode>,
/// All storage slots for the contract
pub storage: HashMap<B256, U256>,
pub storage: B256Map<U256>,
}
/// Extract the full state of a specific contract
@@ -52,7 +53,7 @@ pub fn extract_contract_state<P: DBProvider>(
let bytecode = state_provider.account_code(&contract_address)?;
let mut storage_cursor = provider.tx_ref().cursor_dup_read::<tables::PlainStorageState>()?;
let mut storage = HashMap::new();
let mut storage = B256Map::default();
if let Some((_, first_entry)) = storage_cursor.seek_exact(contract_address)? {
storage.insert(first_entry.key, first_entry.value);

View File

@@ -2,16 +2,16 @@
//! signers to the genesis block.
use alloy_genesis::GenesisAccount;
use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_primitives::{
map::{AddressMap, Entry},
Address, Bytes, B256, U256,
};
use reth_primitives_traits::crypto::secp256k1::public_key_to_address;
use secp256k1::{
rand::{thread_rng, RngCore},
Keypair, Secp256k1,
};
use std::{
collections::{hash_map::Entry, BTreeMap, HashMap},
fmt,
};
use std::{collections::BTreeMap, fmt};
/// This helps create a custom genesis alloc by making it easy to add funded accounts with known
/// signers to the genesis block.
@@ -43,7 +43,7 @@ use std::{
/// ```
pub struct GenesisAllocator<'a> {
/// The genesis alloc to be built.
alloc: HashMap<Address, GenesisAccount>,
alloc: AddressMap<GenesisAccount>,
/// The rng to use for generating key pairs.
rng: Box<dyn RngCore + 'a>,
}
@@ -54,7 +54,7 @@ impl<'a> GenesisAllocator<'a> {
where
R: RngCore,
{
Self { alloc: HashMap::default(), rng: Box::new(rng) }
Self { alloc: AddressMap::default(), rng: Box::new(rng) }
}
/// Use the provided rng for generating key pairs.
@@ -189,14 +189,14 @@ impl<'a> GenesisAllocator<'a> {
}
/// Build the genesis alloc.
pub fn build(self) -> HashMap<Address, GenesisAccount> {
pub fn build(self) -> AddressMap<GenesisAccount> {
self.alloc
}
}
impl Default for GenesisAllocator<'_> {
fn default() -> Self {
Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) }
Self { alloc: AddressMap::default(), rng: Box::new(thread_rng()) }
}
}