Files
reth/crates/transaction-pool/src/pool/txpool.rs
2026-02-03 14:13:52 +00:00

4548 lines
185 KiB
Rust

//! The internal transaction pool implementation.
use crate::{
config::{LocalTransactionConfig, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER},
error::{
Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError,
PoolError, PoolErrorKind,
},
identifier::{SenderId, TransactionId},
metrics::{AllTransactionsMetrics, TxPoolMetrics},
pool::{
best::BestTransactions,
blob::BlobTransactions,
parked::{BasefeeOrd, ParkedPool, QueuedOrd},
pending::PendingPool,
state::{SubPool, TxState},
update::{Destination, PoolUpdate, UpdateOutcome},
AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome,
},
traits::{BestTransactionsAttributes, BlockInfo, PoolSize},
PoolConfig, PoolResult, PoolTransaction, PoolUpdateKind, PriceBumpConfig, TransactionOrdering,
ValidPoolTransaction, U256,
};
use alloy_consensus::constants::{
EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, KECCAK_EMPTY,
LEGACY_TX_TYPE_ID,
};
use alloy_eips::{
eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE},
eip4844::BLOB_TX_MIN_BLOB_GASPRICE,
Typed2718,
};
use alloy_primitives::{Address, TxHash, B256};
use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use std::{
cmp::Ordering,
collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet},
fmt,
ops::Bound::{Excluded, Unbounded},
sync::Arc,
};
use tracing::{trace, warn};
#[cfg_attr(doc, aquamarine::aquamarine)]
// TODO: Inlined diagram due to a bug in aquamarine library, should become an include when it's
// fixed. See https://github.com/mersinvald/aquamarine/issues/50
// include_mmd!("docs/mermaid/txpool.mmd")
/// A pool that manages transactions.
///
/// This pool maintains the state of all transactions and stores them accordingly.
///
/// ```mermaid
/// graph TB
/// subgraph TxPool
/// direction TB
/// pool[(All Transactions)]
/// subgraph Subpools
/// direction TB
/// B3[(Queued)]
/// B1[(Pending)]
/// B2[(Basefee)]
/// B4[(Blob)]
/// end
/// end
/// discard([discard])
/// production([Block Production])
/// new([New Block])
/// A[Incoming Tx] --> B[Validation] -->|ins
/// pool --> |if ready + blobfee too low| B4
/// pool --> |if ready| B1
/// pool --> |if ready + basfee too low| B2
/// pool --> |nonce gap or lack of funds| B3
/// pool --> |update| pool
/// B1 --> |best| production
/// B2 --> |worst| discard
/// B3 --> |worst| discard
/// B4 --> |worst| discard
/// B1 --> |increased blob fee| B4
/// B4 --> |decreased blob fee| B1
/// B1 --> |increased base fee| B2
/// B2 --> |decreased base fee| B1
/// B3 --> |promote| B1
/// B3 --> |promote| B2
/// new --> |apply state changes| pool
/// ```
pub struct TxPool<T: TransactionOrdering> {
/// pending subpool
///
/// Holds transactions that are ready to be executed on the current state.
pending_pool: PendingPool<T>,
/// Pool settings to enforce limits etc.
config: PoolConfig,
/// queued subpool
///
/// Holds all parked transactions that depend on external changes from the sender:
///
/// - blocked by missing ancestor transaction (has nonce gaps)
/// - sender lacks funds to pay for this transaction.
queued_pool: ParkedPool<QueuedOrd<T::Transaction>>,
/// base fee subpool
///
/// Holds all parked transactions that currently violate the dynamic fee requirement but could
/// be moved to pending if the base fee changes in their favor (decreases) in future blocks.
basefee_pool: ParkedPool<BasefeeOrd<T::Transaction>>,
/// Blob transactions in the pool that are __not pending__.
///
/// This means they either do not satisfy the dynamic fee requirement or the blob fee
/// requirement. These transactions can be moved to pending if the base fee or blob fee changes
/// in their favor (decreases) in future blocks. The transaction may need both the base fee and
/// blob fee to decrease to become executable.
blob_pool: BlobTransactions<T::Transaction>,
/// All transactions in the pool.
all_transactions: AllTransactions<T::Transaction>,
/// Transaction pool metrics
metrics: TxPoolMetrics,
}
// === impl TxPool ===
impl<T: TransactionOrdering> TxPool<T> {
/// Create a new graph pool instance.
pub fn new(ordering: T, config: PoolConfig) -> Self {
Self {
pending_pool: PendingPool::with_buffer(
ordering,
config.max_new_pending_txs_notifications,
),
queued_pool: Default::default(),
basefee_pool: Default::default(),
blob_pool: Default::default(),
all_transactions: AllTransactions::new(&config),
config,
metrics: Default::default(),
}
}
/// Retrieves the highest nonce for a specific sender from the transaction pool.
pub fn get_highest_nonce_by_sender(&self, sender: SenderId) -> Option<u64> {
self.all().txs_iter(sender).last().map(|(_, tx)| tx.transaction.nonce())
}
/// Retrieves the highest transaction (wrapped in an `Arc`) for a specific sender from the
/// transaction pool.
pub fn get_highest_transaction_by_sender(
&self,
sender: SenderId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all().txs_iter(sender).last().map(|(_, tx)| Arc::clone(&tx.transaction))
}
/// Returns the transaction with the highest nonce that is executable given the on chain nonce.
///
/// If the pool already tracks a higher nonce for the given sender, then this nonce is used
/// instead.
///
/// Note: The next pending pooled transaction must have the on chain nonce.
pub(crate) fn get_highest_consecutive_transaction_by_sender(
&self,
mut on_chain: TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let mut last_consecutive_tx = None;
// ensure this operates on the most recent
if let Some(current) = self.all_transactions.sender_info.get(&on_chain.sender) {
on_chain.nonce = on_chain.nonce.max(current.state_nonce);
}
let mut next_expected_nonce = on_chain.nonce;
for (id, tx) in self.all().descendant_txs_inclusive(&on_chain) {
if next_expected_nonce != id.nonce {
break
}
next_expected_nonce = id.next_nonce();
last_consecutive_tx = Some(tx);
}
last_consecutive_tx.map(|tx| Arc::clone(&tx.transaction))
}
/// Returns access to the [`AllTransactions`] container.
pub(crate) const fn all(&self) -> &AllTransactions<T::Transaction> {
&self.all_transactions
}
/// Returns all senders in the pool
pub(crate) fn unique_senders(&self) -> HashSet<Address> {
self.all_transactions.txs.values().map(|tx| tx.transaction.sender()).collect()
}
/// Returns stats about the size of pool.
pub fn size(&self) -> PoolSize {
PoolSize {
pending: self.pending_pool.len(),
pending_size: self.pending_pool.size(),
basefee: self.basefee_pool.len(),
basefee_size: self.basefee_pool.size(),
queued: self.queued_pool.len(),
queued_size: self.queued_pool.size(),
blob: self.blob_pool.len(),
blob_size: self.blob_pool.size(),
total: self.all_transactions.len(),
}
}
/// Returns the currently tracked block values
pub const fn block_info(&self) -> BlockInfo {
BlockInfo {
block_gas_limit: self.all_transactions.block_gas_limit,
last_seen_block_hash: self.all_transactions.last_seen_block_hash,
last_seen_block_number: self.all_transactions.last_seen_block_number,
pending_basefee: self.all_transactions.pending_fees.base_fee,
pending_blob_fee: Some(self.all_transactions.pending_fees.blob_fee),
}
}
/// Updates the tracked blob fee
fn update_blob_fee<F>(
&mut self,
mut pending_blob_fee: u128,
base_fee_update: Ordering,
mut on_promoted: F,
) where
F: FnMut(&Arc<ValidPoolTransaction<T::Transaction>>),
{
std::mem::swap(&mut self.all_transactions.pending_fees.blob_fee, &mut pending_blob_fee);
match (self.all_transactions.pending_fees.blob_fee.cmp(&pending_blob_fee), base_fee_update)
{
(Ordering::Equal, Ordering::Equal | Ordering::Greater) => {
// fee unchanged, nothing to update
}
(Ordering::Greater, Ordering::Equal | Ordering::Greater) => {
// increased blob fee: recheck pending pool and remove all that are no longer valid
let removed =
self.pending_pool.update_blob_fee(self.all_transactions.pending_fees.blob_fee);
for tx in removed {
let to = {
let tx =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
// the blob fee is too high now, unset the blob fee cap block flag
tx.state.remove(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
tx.subpool = tx.state.into();
tx.subpool
};
self.add_transaction_to_subpool(to, tx);
}
}
(Ordering::Less, _) | (_, Ordering::Less) => {
// decreased blob/base fee: recheck blob pool and promote all that are now valid
let removed =
self.blob_pool.enforce_pending_fees(&self.all_transactions.pending_fees);
for tx in removed {
let subpool = {
let tx_meta =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
tx_meta.state.insert(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
tx_meta.state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
tx_meta.subpool = tx_meta.state.into();
tx_meta.subpool
};
if subpool == SubPool::Pending {
on_promoted(&tx);
}
self.add_transaction_to_subpool(subpool, tx);
}
}
}
}
/// Updates the tracked basefee
///
/// Depending on the change in direction of the basefee, this will promote or demote
/// transactions from the basefee pool.
fn update_basefee<F>(&mut self, mut pending_basefee: u64, mut on_promoted: F) -> Ordering
where
F: FnMut(&Arc<ValidPoolTransaction<T::Transaction>>),
{
std::mem::swap(&mut self.all_transactions.pending_fees.base_fee, &mut pending_basefee);
match self.all_transactions.pending_fees.base_fee.cmp(&pending_basefee) {
Ordering::Equal => {
// fee unchanged, nothing to update
Ordering::Equal
}
Ordering::Greater => {
// increased base fee: recheck pending pool and remove all that are no longer valid
let removed =
self.pending_pool.update_base_fee(self.all_transactions.pending_fees.base_fee);
for tx in removed {
let to = {
let tx =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
tx.state.remove(TxState::ENOUGH_FEE_CAP_BLOCK);
tx.subpool = tx.state.into();
tx.subpool
};
self.add_transaction_to_subpool(to, tx);
}
Ordering::Greater
}
Ordering::Less => {
// Base fee decreased: recheck BaseFee and promote.
// Invariants:
// - BaseFee contains only non-blob txs (blob txs live in Blob) and they already
// have ENOUGH_BLOB_FEE_CAP_BLOCK.
// - PENDING_POOL_BITS = BASE_FEE_POOL_BITS | ENOUGH_FEE_CAP_BLOCK |
// ENOUGH_BLOB_FEE_CAP_BLOCK.
// With the lower base fee they gain ENOUGH_FEE_CAP_BLOCK, so we can set the bit and
// insert directly into Pending (skip generic routing).
let current_base_fee = self.all_transactions.pending_fees.base_fee;
self.basefee_pool.enforce_basefee_with(current_base_fee, |tx| {
// Update transaction state — guaranteed Pending by the invariants above
let subpool = {
let meta =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
meta.state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
meta.subpool = meta.state.into();
meta.subpool
};
if subpool == SubPool::Pending {
on_promoted(&tx);
}
trace!(target: "txpool", hash=%tx.transaction.hash(), pool=?subpool, "Adding transaction to a subpool");
match subpool {
SubPool::Queued => self.queued_pool.add_transaction(tx),
SubPool::Pending => {
self.pending_pool.add_transaction(tx, current_base_fee);
}
SubPool::Blob => {
self.blob_pool.add_transaction(tx);
}
SubPool::BaseFee => {
// This should be unreachable as transactions from BaseFee pool with decreased
// basefee are guaranteed to become Pending
warn!(target: "txpool", "BaseFee transactions should become Pending after basefee decrease");
}
}
});
Ordering::Less
}
}
}
/// Sets the current block info for the pool.
///
/// This will also apply updates to the pool based on the new base fee and blob fee
pub fn set_block_info(&mut self, info: BlockInfo) {
// first update the subpools based on the new values
let basefee_ordering = self.update_basefee(info.pending_basefee, |_| {});
if let Some(blob_fee) = info.pending_blob_fee {
self.update_blob_fee(blob_fee, basefee_ordering, |_| {})
}
// then update tracked values
self.all_transactions.set_block_info(info);
}
/// Returns an iterator that yields transactions that are ready to be included in the block with
/// the tracked fees.
pub(crate) fn best_transactions(&self) -> BestTransactions<T> {
self.pending_pool.best()
}
/// Returns an iterator that yields transactions that are ready to be included in the block with
/// the given base fee and optional blob fee.
///
/// If the provided attributes differ from the currently tracked fees, this will also include
/// transactions that are unlocked by the new fees, or exclude transactions that are no longer
/// valid with the new fees.
pub(crate) fn best_transactions_with_attributes(
&self,
best_transactions_attributes: BestTransactionsAttributes,
) -> Box<dyn crate::traits::BestTransactions<Item = Arc<ValidPoolTransaction<T::Transaction>>>>
{
// First we need to check if the given base fee is different than what's currently being
// tracked
match best_transactions_attributes.basefee.cmp(&self.all_transactions.pending_fees.base_fee)
{
Ordering::Equal => {
// for EIP-4844 transactions we also need to check if the blob fee is now lower than
// what's currently being tracked, if so we need to include transactions from the
// blob pool that are valid with the lower blob fee
let new_blob_fee = best_transactions_attributes.blob_fee.unwrap_or_default();
match new_blob_fee.cmp(&(self.all_transactions.pending_fees.blob_fee as u64)) {
Ordering::Less => {
// it's possible that this swing unlocked more blob transactions
let unlocked =
self.blob_pool.satisfy_attributes(best_transactions_attributes);
Box::new(self.pending_pool.best_with_unlocked_and_attributes(
unlocked,
best_transactions_attributes.basefee,
new_blob_fee,
))
}
Ordering::Equal => Box::new(self.pending_pool.best()),
Ordering::Greater => {
// no additional transactions unlocked
Box::new(self.pending_pool.best_with_basefee_and_blobfee(
best_transactions_attributes.basefee,
best_transactions_attributes.blob_fee.unwrap_or_default(),
))
}
}
}
Ordering::Greater => {
// base fee increased, we need to check how the blob fee moved
let new_blob_fee = best_transactions_attributes.blob_fee.unwrap_or_default();
match new_blob_fee.cmp(&(self.all_transactions.pending_fees.blob_fee as u64)) {
Ordering::Less => {
// it's possible that this swing unlocked more blob transactions
let unlocked =
self.blob_pool.satisfy_attributes(best_transactions_attributes);
Box::new(self.pending_pool.best_with_unlocked_and_attributes(
unlocked,
best_transactions_attributes.basefee,
new_blob_fee,
))
}
Ordering::Equal | Ordering::Greater => {
// no additional transactions unlocked
Box::new(self.pending_pool.best_with_basefee_and_blobfee(
best_transactions_attributes.basefee,
new_blob_fee,
))
}
}
}
Ordering::Less => {
// base fee decreased, we need to move transactions from the basefee + blob pool to
// the pending pool that might be unlocked by the lower base fee
let mut unlocked = self
.basefee_pool
.satisfy_base_fee_transactions(best_transactions_attributes.basefee);
// also include blob pool transactions that are now unlocked
unlocked.extend(self.blob_pool.satisfy_attributes(best_transactions_attributes));
Box::new(self.pending_pool.best_with_unlocked_and_attributes(
unlocked,
best_transactions_attributes.basefee,
best_transactions_attributes.blob_fee.unwrap_or_default(),
))
}
}
}
/// Returns all transactions from the pending sub-pool
pub(crate) fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.pending_pool.all().collect()
}
/// Returns an iterator over all transactions from the pending sub-pool
pub(crate) fn pending_transactions_iter(
&self,
) -> impl Iterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
self.pending_pool.all()
}
/// Returns the number of transactions from the pending sub-pool
pub(crate) fn pending_transactions_count(&self) -> usize {
self.pending_pool.len()
}
/// Returns all pending transactions filtered by predicate
pub(crate) fn pending_transactions_with_predicate(
&self,
mut predicate: impl FnMut(&ValidPoolTransaction<T::Transaction>) -> bool,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.pending_transactions_iter().filter(|tx| predicate(tx)).collect()
}
/// Returns all pending transactions for the specified sender
pub(crate) fn pending_txs_by_sender(
&self,
sender: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.pending_transactions_iter().filter(|tx| tx.sender_id() == sender).collect()
}
/// Returns all transactions from parked pools
pub(crate) fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.basefee_pool.all().chain(self.queued_pool.all()).collect()
}
/// Returns an iterator over all transactions from parked pools
pub(crate) fn queued_transactions_iter(
&self,
) -> impl Iterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
self.basefee_pool.all().chain(self.queued_pool.all())
}
/// Returns the number of transactions in parked pools
pub(crate) fn queued_transactions_count(&self) -> usize {
self.basefee_pool.len() + self.queued_pool.len()
}
/// Returns queued and pending transactions for the specified sender
pub fn queued_and_pending_txs_by_sender(
&self,
sender: SenderId,
) -> (SmallVec<[TransactionId; TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER]>, Vec<TransactionId>) {
(self.queued_pool.get_txs_by_sender(sender), self.pending_pool.get_txs_by_sender(sender))
}
/// Returns all queued transactions for the specified sender
pub(crate) fn queued_txs_by_sender(
&self,
sender: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.queued_transactions_iter().filter(|tx| tx.sender_id() == sender).collect()
}
/// Returns `true` if the transaction with the given hash is already included in this pool.
pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool {
self.all_transactions.contains(tx_hash)
}
/// Returns `true` if the transaction with the given id is already included in the given subpool
#[cfg(test)]
pub(crate) fn subpool_contains(&self, subpool: SubPool, id: &TransactionId) -> bool {
match subpool {
SubPool::Queued => self.queued_pool.contains(id),
SubPool::Pending => self.pending_pool.contains(id),
SubPool::BaseFee => self.basefee_pool.contains(id),
SubPool::Blob => self.blob_pool.contains(id),
}
}
/// Returns `true` if the pool is over its configured limits.
#[inline]
pub(crate) fn is_exceeded(&self) -> bool {
self.config.is_exceeded(self.size())
}
/// Returns the transaction for the given hash.
pub(crate) fn get(
&self,
tx_hash: &TxHash,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all_transactions.by_hash.get(tx_hash).cloned()
}
/// Returns transactions for the multiple given hashes, if they exist.
pub(crate) fn get_all(
&self,
txs: Vec<TxHash>,
) -> impl Iterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
txs.into_iter().filter_map(|tx| self.get(&tx))
}
/// Returns all transactions sent from the given sender.
pub(crate) fn get_transactions_by_sender(
&self,
sender: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all_transactions.txs_iter(sender).map(|(_, tx)| Arc::clone(&tx.transaction)).collect()
}
/// Returns a pending transaction sent by the given sender with the given nonce.
pub(crate) fn get_pending_transaction_by_sender_and_nonce(
&self,
sender: SenderId,
nonce: u64,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all_transactions
.txs_iter(sender)
.find(|(id, tx)| id.nonce == nonce && tx.subpool == SubPool::Pending)
.map(|(_, tx)| Arc::clone(&tx.transaction))
}
/// Updates only the pending fees without triggering subpool updates.
/// Returns the previous base fee and blob fee values.
const fn update_pending_fees_only(
&mut self,
mut new_base_fee: u64,
new_blob_fee: Option<u128>,
) -> (u64, u128) {
std::mem::swap(&mut self.all_transactions.pending_fees.base_fee, &mut new_base_fee);
let prev_blob_fee = if let Some(mut blob_fee) = new_blob_fee {
std::mem::swap(&mut self.all_transactions.pending_fees.blob_fee, &mut blob_fee);
blob_fee
} else {
self.all_transactions.pending_fees.blob_fee
};
(new_base_fee, prev_blob_fee)
}
/// Applies fee-based promotion updates based on the previous fees.
///
/// Records promoted transactions based on fee swings.
///
/// Caution: This expects that the fees were previously already updated via
/// [`Self::update_pending_fees_only`].
fn apply_fee_updates(
&mut self,
prev_base_fee: u64,
prev_blob_fee: u128,
outcome: &mut UpdateOutcome<T::Transaction>,
) {
let new_base_fee = self.all_transactions.pending_fees.base_fee;
let new_blob_fee = self.all_transactions.pending_fees.blob_fee;
if new_base_fee == prev_base_fee && new_blob_fee == prev_blob_fee {
// nothing to update
return;
}
// IMPORTANT:
// Restore previous fees so that the update fee functions correctly handle fee swings
self.all_transactions.pending_fees.base_fee = prev_base_fee;
self.all_transactions.pending_fees.blob_fee = prev_blob_fee;
let base_fee_ordering = self.update_basefee(new_base_fee, |tx| {
outcome.promoted.push(tx.clone());
});
self.update_blob_fee(new_blob_fee, base_fee_ordering, |tx| {
outcome.promoted.push(tx.clone());
});
}
/// Updates the transactions for the changed senders.
pub(crate) fn update_accounts(
&mut self,
changed_senders: FxHashMap<SenderId, SenderInfo>,
) -> UpdateOutcome<T::Transaction> {
// Apply the state changes to the total set of transactions which triggers sub-pool updates.
let updates = self.all_transactions.update(&changed_senders);
// track changed accounts
self.all_transactions.sender_info.extend(changed_senders);
// Process the sub-pool updates
let update = self.process_updates(updates);
// update the metrics after the update
self.update_size_metrics();
update
}
/// Updates the entire pool after a new block was mined.
///
/// This removes all mined transactions, updates according to the new base fee and blob fee and
/// rechecks sender allowance based on the given changed sender infos.
pub(crate) fn on_canonical_state_change(
&mut self,
block_info: BlockInfo,
mined_transactions: Vec<TxHash>,
changed_senders: FxHashMap<SenderId, SenderInfo>,
_update_kind: PoolUpdateKind,
) -> OnNewCanonicalStateOutcome<T::Transaction> {
// update block info
let block_hash = block_info.last_seen_block_hash;
// Remove all transaction that were included in the block
let mut removed_txs_count = 0;
for tx_hash in &mined_transactions {
if self.prune_transaction_by_hash(tx_hash).is_some() {
removed_txs_count += 1;
}
}
// Update removed transactions metric
self.metrics.removed_transactions.increment(removed_txs_count);
// Update fees internally first without triggering subpool updates based on fee movements
// This must happen before we update the changed so that all account updates use the new fee
// values, this way all changed accounts remain unaffected by the fee updates that are
// performed in next step and we don't collect promotions twice
let (prev_base_fee, prev_blob_fee) =
self.update_pending_fees_only(block_info.pending_basefee, block_info.pending_blob_fee);
// Now update accounts with the new fees already set
let mut outcome = self.update_accounts(changed_senders);
// Apply subpool updates based on fee changes
// This will record any additional promotions based on fee movements
self.apply_fee_updates(prev_base_fee, prev_blob_fee, &mut outcome);
// Update the rest of block info (without triggering fee updates again)
self.all_transactions.set_block_info(block_info);
self.update_transaction_type_metrics();
self.metrics.performed_state_updates.increment(1);
OnNewCanonicalStateOutcome {
block_hash,
mined: mined_transactions,
promoted: outcome.promoted,
discarded: outcome.discarded,
}
}
/// Update sub-pools size metrics.
pub(crate) fn update_size_metrics(&self) {
let stats = self.size();
self.metrics.pending_pool_transactions.set(stats.pending as f64);
self.metrics.pending_pool_size_bytes.set(stats.pending_size as f64);
self.metrics.basefee_pool_transactions.set(stats.basefee as f64);
self.metrics.basefee_pool_size_bytes.set(stats.basefee_size as f64);
self.metrics.queued_pool_transactions.set(stats.queued as f64);
self.metrics.queued_pool_size_bytes.set(stats.queued_size as f64);
self.metrics.blob_pool_transactions.set(stats.blob as f64);
self.metrics.blob_pool_size_bytes.set(stats.blob_size as f64);
self.metrics.total_transactions.set(stats.total as f64);
}
/// Updates transaction type metrics for the entire pool.
pub(crate) fn update_transaction_type_metrics(&self) {
let mut legacy_count = 0;
let mut eip2930_count = 0;
let mut eip1559_count = 0;
let mut eip4844_count = 0;
let mut eip7702_count = 0;
let mut other_count = 0;
for tx in self.all_transactions.transactions_iter() {
match tx.transaction.ty() {
LEGACY_TX_TYPE_ID => legacy_count += 1,
EIP2930_TX_TYPE_ID => eip2930_count += 1,
EIP1559_TX_TYPE_ID => eip1559_count += 1,
EIP4844_TX_TYPE_ID => eip4844_count += 1,
EIP7702_TX_TYPE_ID => eip7702_count += 1,
_ => other_count += 1,
}
}
self.metrics.total_legacy_transactions.set(legacy_count as f64);
self.metrics.total_eip2930_transactions.set(eip2930_count as f64);
self.metrics.total_eip1559_transactions.set(eip1559_count as f64);
self.metrics.total_eip4844_transactions.set(eip4844_count as f64);
self.metrics.total_eip7702_transactions.set(eip7702_count as f64);
self.metrics.total_other_transactions.set(other_count as f64);
}
pub(crate) fn add_transaction(
&mut self,
tx: ValidPoolTransaction<T::Transaction>,
on_chain_balance: U256,
on_chain_nonce: u64,
on_chain_code_hash: Option<B256>,
) -> PoolResult<AddedTransaction<T::Transaction>> {
if self.contains(tx.hash()) {
return Err(PoolError::new(*tx.hash(), PoolErrorKind::AlreadyImported))
}
self.validate_auth(&tx, on_chain_nonce, on_chain_code_hash)?;
// Update sender info with balance and nonce
self.all_transactions
.sender_info
.entry(tx.sender_id())
.or_default()
.update(on_chain_nonce, on_chain_balance);
match self.all_transactions.insert_tx(tx, on_chain_balance, on_chain_nonce) {
Ok(InsertOk { transaction, move_to, replaced_tx, updates, state }) => {
// replace the new tx and remove the replaced in the subpool(s)
self.add_new_transaction(transaction.clone(), replaced_tx.clone(), move_to);
// Update inserted transactions metric
self.metrics.inserted_transactions.increment(1);
let UpdateOutcome { promoted, discarded } = self.process_updates(updates);
let replaced = replaced_tx.map(|(tx, _)| tx);
// This transaction was moved to the pending pool.
let res = if move_to.is_pending() {
AddedTransaction::Pending(AddedPendingTransaction {
transaction,
promoted,
discarded,
replaced,
})
} else {
// Determine the specific queued reason based on the transaction state
let queued_reason = state.determine_queued_reason(move_to);
AddedTransaction::Parked {
transaction,
subpool: move_to,
replaced,
queued_reason,
}
};
// Update size metrics after adding and potentially moving transactions.
self.update_size_metrics();
Ok(res)
}
Err(err) => {
// Update invalid transactions metric
self.metrics.invalid_transactions.increment(1);
match err {
InsertErr::Underpriced { existing: _, transaction } => Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::ReplacementUnderpriced,
)),
InsertErr::FeeCapBelowMinimumProtocolFeeCap { transaction, fee_cap } => {
Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::FeeCapBelowMinimumProtocolFeeCap(fee_cap),
))
}
InsertErr::ExceededSenderTransactionsCapacity { transaction } => {
Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::SpammerExceededCapacity(transaction.sender()),
))
}
InsertErr::TxGasLimitMoreThanAvailableBlockGas {
transaction,
block_gas_limit,
tx_gas_limit,
} => Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::InvalidTransaction(
InvalidPoolTransactionError::ExceedsGasLimit(
tx_gas_limit,
block_gas_limit,
),
),
)),
InsertErr::BlobTxHasNonceGap { transaction } => Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::InvalidTransaction(
Eip4844PoolTransactionError::Eip4844NonceGap.into(),
),
)),
InsertErr::Overdraft { transaction } => Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft {
cost: *transaction.cost(),
balance: on_chain_balance,
}),
)),
InsertErr::TxTypeConflict { transaction } => Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::ExistingConflictingTransactionType(
transaction.sender(),
transaction.tx_type(),
),
)),
}
}
}
}
/// Determines if the tx sender is delegated or has a pending delegation, and if so, ensures
/// they have at most one configured amount of in-flight **executable** transactions (default at
/// most one), e.g. disallow stacked and nonce-gapped transactions from the account.
fn check_delegation_limit(
&self,
transaction: &ValidPoolTransaction<T::Transaction>,
on_chain_nonce: u64,
on_chain_code_hash: Option<B256>,
) -> Result<(), PoolError> {
// Short circuit if the sender has neither delegation nor pending delegation.
if (on_chain_code_hash.is_none() || on_chain_code_hash == Some(KECCAK_EMPTY)) &&
!self.all_transactions.auths.contains_key(&transaction.sender_id())
{
return Ok(())
}
let mut txs_by_sender =
self.pending_pool.iter_txs_by_sender(transaction.sender_id()).peekable();
if txs_by_sender.peek().is_none() {
// Transaction with gapped nonce is not supported for delegated accounts
// but transaction can arrive out of order if more slots are allowed
// by default with a slot limit of 1 this will fail if the transaction's nonce >
// on_chain
let nonce_gap_distance = transaction.nonce().saturating_sub(on_chain_nonce);
if nonce_gap_distance >= self.config.max_inflight_delegated_slot_limit as u64 {
return Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Eip7702(
Eip7702PoolTransactionError::OutOfOrderTxFromDelegated,
)),
))
}
return Ok(())
}
let mut count = 0;
for id in txs_by_sender {
if id == &transaction.transaction_id {
// Transaction replacement is supported
return Ok(())
}
count += 1;
}
if count < self.config.max_inflight_delegated_slot_limit {
// account still has an available slot
return Ok(())
}
Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Eip7702(
Eip7702PoolTransactionError::InflightTxLimitReached,
)),
))
}
/// This verifies that the transaction complies with code authorization
/// restrictions brought by EIP-7702 transaction type:
/// 1. Any account with a deployed delegation or an in-flight authorization to deploy a
/// delegation will only be allowed a certain amount of transaction slots (default 1) instead
/// of the standard limit. This is due to the possibility of the account being sweeped by an
/// unrelated account.
/// 2. In case the pool is tracking a pending / queued transaction from a specific account, at
/// most one in-flight transaction is allowed; any additional delegated transactions from
/// that account will be rejected.
fn validate_auth(
&self,
transaction: &ValidPoolTransaction<T::Transaction>,
on_chain_nonce: u64,
on_chain_code_hash: Option<B256>,
) -> Result<(), PoolError> {
// Ensure in-flight limit for delegated accounts or those with a pending authorization.
self.check_delegation_limit(transaction, on_chain_nonce, on_chain_code_hash)?;
if let Some(authority_list) = &transaction.authority_ids {
for sender_id in authority_list {
// Ensure authority has at most 1 inflight transaction.
if self.all_transactions.txs_iter(*sender_id).nth(1).is_some() {
return Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Eip7702(
Eip7702PoolTransactionError::AuthorityReserved,
)),
))
}
}
}
Ok(())
}
/// Maintenance task to apply a series of updates.
///
/// This will move/discard the given transaction according to the `PoolUpdate`
fn process_updates(&mut self, updates: Vec<PoolUpdate>) -> UpdateOutcome<T::Transaction> {
let mut outcome = UpdateOutcome::default();
let mut removed = 0;
for PoolUpdate { id, current, destination } in updates {
match destination {
Destination::Discard => {
// remove the transaction from the pool and subpool
if let Some(tx) = self.prune_transaction_by_id(&id) {
outcome.discarded.push(tx);
}
removed += 1;
}
Destination::Pool(move_to) => {
debug_assert_ne!(&move_to, &current, "destination must be different");
let moved = self.move_transaction(current, move_to, &id);
if matches!(move_to, SubPool::Pending) &&
let Some(tx) = moved
{
trace!(target: "txpool", hash=%tx.transaction.hash(), "Promoted transaction to pending");
outcome.promoted.push(tx);
}
}
}
}
if removed > 0 {
self.metrics.removed_transactions.increment(removed);
}
outcome
}
/// Moves a transaction from one sub pool to another.
///
/// This will remove the given transaction from one sub-pool and insert it into the other
/// sub-pool.
fn move_transaction(
&mut self,
from: SubPool,
to: SubPool,
id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let tx = self.remove_from_subpool(from, id)?;
self.add_transaction_to_subpool(to, tx.clone());
Some(tx)
}
/// Removes and returns all matching transactions from the pool.
///
/// Note: this does not advance any descendants of the removed transactions and does not apply
/// any additional updates.
pub(crate) fn remove_transactions(
&mut self,
hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
let txs =
hashes.into_iter().filter_map(|hash| self.remove_transaction_by_hash(&hash)).collect();
self.update_size_metrics();
txs
}
/// Removes and returns all matching transactions and their descendants from the pool.
pub(crate) fn remove_transactions_and_descendants(
&mut self,
hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
let mut removed = Vec::new();
for hash in hashes {
if let Some(tx) = self.remove_transaction_by_hash(&hash) {
removed.push(tx.clone());
self.remove_descendants(tx.id(), &mut removed);
}
}
self.update_size_metrics();
removed
}
/// Removes all transactions from the given sender.
pub(crate) fn remove_transactions_by_sender(
&mut self,
sender_id: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
let mut removed = Vec::new();
let txs = self.get_transactions_by_sender(sender_id);
for tx in txs {
if let Some(tx) = self.remove_transaction(tx.id()) {
removed.push(tx);
}
}
self.update_size_metrics();
removed
}
/// Remove the transaction from the __entire__ pool.
///
/// This includes the total set of transaction and the subpool it currently resides in.
fn remove_transaction(
&mut self,
id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let (tx, pool) = self.all_transactions.remove_transaction(id)?;
self.remove_from_subpool(pool, tx.id())
}
/// Remove the transaction from the entire pool via its hash. This includes the total set of
/// transactions and the subpool it currently resides in.
///
/// This treats the descendants as if this transaction is discarded and removing the transaction
/// reduces a nonce gap.
fn remove_transaction_by_hash(
&mut self,
tx_hash: &B256,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let (tx, pool) = self.all_transactions.remove_transaction_by_hash(tx_hash)?;
// After a tx is removed, its descendants must become parked due to the nonce gap
let updates = self.all_transactions.park_descendant_transactions(tx.id());
self.process_updates(updates);
self.remove_from_subpool(pool, tx.id())
}
/// This removes the transaction from the pool and advances any descendant state inside the
/// subpool.
///
/// This is intended to be used when a transaction is included in a block,
/// [`Self::on_canonical_state_change`]. So its descendants will not change from pending to
/// parked, just like what we do in `remove_transaction_by_hash`.
fn prune_transaction_by_hash(
&mut self,
tx_hash: &B256,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let (tx, pool) = self.all_transactions.remove_transaction_by_hash(tx_hash)?;
self.remove_from_subpool(pool, tx.id())
}
/// This removes the transaction from the pool and advances any descendant state inside the
/// subpool.
///
/// This is intended to be used when we call [`Self::process_updates`].
fn prune_transaction_by_id(
&mut self,
tx_id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let (tx, pool) = self.all_transactions.remove_transaction_by_id(tx_id)?;
self.remove_from_subpool(pool, tx.id())
}
/// Removes the transaction from the given pool.
///
/// Caution: this only removes the tx from the sub-pool and not from the pool itself
fn remove_from_subpool(
&mut self,
pool: SubPool,
tx: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let tx = match pool {
SubPool::Queued => self.queued_pool.remove_transaction(tx),
SubPool::Pending => self.pending_pool.remove_transaction(tx),
SubPool::BaseFee => self.basefee_pool.remove_transaction(tx),
SubPool::Blob => self.blob_pool.remove_transaction(tx),
};
if let Some(ref tx) = tx {
// We trace here instead of in subpool structs directly, because the `ParkedPool` type
// is generic and it would not be possible to distinguish whether a transaction is
// being removed from the `BaseFee` pool, or the `Queued` pool.
trace!(target: "txpool", hash=%tx.transaction.hash(), ?pool, "Removed transaction from a subpool");
}
tx
}
/// Removes _only_ the descendants of the given transaction from the __entire__ pool.
///
/// All removed transactions are added to the `removed` vec.
fn remove_descendants(
&mut self,
tx: &TransactionId,
removed: &mut Vec<Arc<ValidPoolTransaction<T::Transaction>>>,
) {
let mut id = *tx;
// this will essentially pop _all_ descendant transactions one by one
loop {
let descendant =
self.all_transactions.descendant_txs_exclusive(&id).map(|(id, _)| *id).next();
if let Some(descendant) = descendant {
if let Some(tx) = self.remove_transaction(&descendant) {
removed.push(tx)
}
id = descendant;
} else {
return
}
}
}
/// Inserts the transaction into the given sub-pool.
fn add_transaction_to_subpool(
&mut self,
pool: SubPool,
tx: Arc<ValidPoolTransaction<T::Transaction>>,
) {
// We trace here instead of in structs directly, because the `ParkedPool` type is
// generic and it would not be possible to distinguish whether a transaction is being
// added to the `BaseFee` pool, or the `Queued` pool.
trace!(target: "txpool", hash=%tx.transaction.hash(), ?pool, "Adding transaction to a subpool");
match pool {
SubPool::Queued => self.queued_pool.add_transaction(tx),
SubPool::Pending => {
self.pending_pool.add_transaction(tx, self.all_transactions.pending_fees.base_fee);
}
SubPool::BaseFee => {
self.basefee_pool.add_transaction(tx);
}
SubPool::Blob => {
self.blob_pool.add_transaction(tx);
}
}
}
/// Inserts the transaction into the given sub-pool.
/// Optionally, removes the replacement transaction.
fn add_new_transaction(
&mut self,
transaction: Arc<ValidPoolTransaction<T::Transaction>>,
replaced: Option<(Arc<ValidPoolTransaction<T::Transaction>>, SubPool)>,
pool: SubPool,
) {
if let Some((replaced, replaced_pool)) = replaced {
// Remove the replaced transaction
self.remove_from_subpool(replaced_pool, replaced.id());
}
self.add_transaction_to_subpool(pool, transaction)
}
/// Ensures that the transactions in the sub-pools are within the given bounds.
///
/// If the current size exceeds the given bounds, the worst transactions are evicted from the
/// pool and returned.
///
/// This returns all transactions that were removed from the entire pool.
pub(crate) fn discard_worst(&mut self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
let mut removed = Vec::new();
// Helper macro that discards the worst transactions for the pools
macro_rules! discard_worst {
($this:ident, $removed:ident, [$($limit:ident => ($pool:ident, $metric:ident)),* $(,)*]) => {
$ (
while $this.$pool.exceeds(&$this.config.$limit)
{
trace!(
target: "txpool",
"discarding transactions from {}, limit: {:?}, curr size: {}, curr len: {}",
stringify!($pool),
$this.config.$limit,
$this.$pool.size(),
$this.$pool.len(),
);
// 1. first remove the worst transaction from the subpool
let removed_from_subpool = $this.$pool.truncate_pool($this.config.$limit.clone());
trace!(
target: "txpool",
"removed {} transactions from {}, limit: {:?}, curr size: {}, curr len: {}",
removed_from_subpool.len(),
stringify!($pool),
$this.config.$limit,
$this.$pool.size(),
$this.$pool.len()
);
$this.metrics.$metric.increment(removed_from_subpool.len() as u64);
// 2. remove all transactions from the total set
for tx in removed_from_subpool {
$this.all_transactions.remove_transaction(tx.id());
let id = *tx.id();
// keep track of removed transaction
removed.push(tx);
// 3. remove all its descendants from the entire pool
$this.remove_descendants(&id, &mut $removed);
}
}
)*
};
}
discard_worst!(
self, removed, [
pending_limit => (pending_pool, pending_transactions_evicted),
basefee_limit => (basefee_pool, basefee_transactions_evicted),
blob_limit => (blob_pool, blob_transactions_evicted),
queued_limit => (queued_pool, queued_transactions_evicted),
]
);
removed
}
/// Number of transactions in the entire pool
pub(crate) fn len(&self) -> usize {
self.all_transactions.len()
}
/// Whether the pool is empty
pub(crate) fn is_empty(&self) -> bool {
self.all_transactions.is_empty()
}
/// Asserts all invariants of the pool's:
///
/// - All maps are bijections (`by_id`, `by_hash`)
/// - Total size is equal to the sum of all sub-pools
///
/// # Panics
/// if any invariant is violated
#[cfg(any(test, feature = "test-utils"))]
pub fn assert_invariants(&self) {
let size = self.size();
let actual = size.basefee + size.pending + size.queued + size.blob;
assert_eq!(
size.total, actual,
"total size must be equal to the sum of all sub-pools, basefee:{}, pending:{}, queued:{}, blob:{}",
size.basefee, size.pending, size.queued, size.blob
);
self.all_transactions.assert_invariants();
self.pending_pool.assert_invariants();
self.basefee_pool.assert_invariants();
self.queued_pool.assert_invariants();
self.blob_pool.assert_invariants();
}
}
#[cfg(any(test, feature = "test-utils"))]
impl TxPool<crate::test_utils::MockOrdering> {
/// Creates a mock instance for testing.
pub fn mock() -> Self {
Self::new(crate::test_utils::MockOrdering::default(), PoolConfig::default())
}
}
#[cfg(test)]
impl<T: TransactionOrdering> Drop for TxPool<T> {
fn drop(&mut self) {
self.assert_invariants();
}
}
impl<T: TransactionOrdering> TxPool<T> {
/// Pending subpool
pub const fn pending(&self) -> &PendingPool<T> {
&self.pending_pool
}
/// Base fee subpool
pub const fn base_fee(&self) -> &ParkedPool<BasefeeOrd<T::Transaction>> {
&self.basefee_pool
}
/// Queued sub pool
pub const fn queued(&self) -> &ParkedPool<QueuedOrd<T::Transaction>> {
&self.queued_pool
}
}
impl<T: TransactionOrdering> fmt::Debug for TxPool<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TxPool").field("config", &self.config).finish_non_exhaustive()
}
}
/// Container for _all_ transaction in the pool.
///
/// This is the sole entrypoint that's guarding all sub-pools, all sub-pool actions are always
/// derived from this set. Updates returned from this type must be applied to the sub-pools.
pub(crate) struct AllTransactions<T: PoolTransaction> {
/// Minimum base fee required by the protocol.
///
/// Transactions with a lower base fee will never be included by the chain
minimal_protocol_basefee: u64,
/// The max gas limit of the block
block_gas_limit: u64,
/// Max number of executable transaction slots guaranteed per account
max_account_slots: usize,
/// _All_ transactions identified by their hash.
by_hash: HashMap<TxHash, Arc<ValidPoolTransaction<T>>>,
/// _All_ transaction in the pool sorted by their sender and nonce pair.
txs: BTreeMap<TransactionId, PoolInternalTransaction<T>>,
/// Contains the currently known information about the senders.
sender_info: FxHashMap<SenderId, SenderInfo>,
/// Tracks the number of transactions by sender that are currently in the pool.
tx_counter: FxHashMap<SenderId, usize>,
/// The current block number the pool keeps track of.
last_seen_block_number: u64,
/// The current block hash the pool keeps track of.
last_seen_block_hash: B256,
/// Expected blob and base fee for the pending block.
pending_fees: PendingFees,
/// Configured price bump settings for replacements
price_bumps: PriceBumpConfig,
/// How to handle [`TransactionOrigin::Local`](crate::TransactionOrigin) transactions.
local_transactions_config: LocalTransactionConfig,
/// All accounts with a pooled authorization
auths: FxHashMap<SenderId, HashSet<TxHash>>,
/// All Transactions metrics
metrics: AllTransactionsMetrics,
}
impl<T: PoolTransaction> AllTransactions<T> {
/// Create a new instance
fn new(config: &PoolConfig) -> Self {
Self {
max_account_slots: config.max_account_slots,
price_bumps: config.price_bumps,
local_transactions_config: config.local_transactions_config.clone(),
minimal_protocol_basefee: config.minimal_protocol_basefee,
block_gas_limit: config.gas_limit,
..Default::default()
}
}
/// Returns an iterator over all _unique_ hashes in the pool
#[expect(dead_code)]
pub(crate) fn hashes_iter(&self) -> impl Iterator<Item = TxHash> + '_ {
self.by_hash.keys().copied()
}
/// Returns an iterator over all transactions in the pool
pub(crate) fn transactions_iter(
&self,
) -> impl Iterator<Item = &Arc<ValidPoolTransaction<T>>> + '_ {
self.by_hash.values()
}
/// Returns if the transaction for the given hash is already included in this pool
pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool {
self.by_hash.contains_key(tx_hash)
}
/// Returns the internal transaction with additional metadata
pub(crate) fn get(&self, id: &TransactionId) -> Option<&PoolInternalTransaction<T>> {
self.txs.get(id)
}
/// Increments the transaction counter for the sender
pub(crate) fn tx_inc(&mut self, sender: SenderId) {
let count = self.tx_counter.entry(sender).or_default();
*count += 1;
self.metrics.all_transactions_by_all_senders.increment(1.0);
}
/// Decrements the transaction counter for the sender
pub(crate) fn tx_decr(&mut self, sender: SenderId) {
if let hash_map::Entry::Occupied(mut entry) = self.tx_counter.entry(sender) {
let count = entry.get_mut();
if *count == 1 {
entry.remove();
self.sender_info.remove(&sender);
self.metrics.all_transactions_by_all_senders.decrement(1.0);
return
}
*count -= 1;
self.metrics.all_transactions_by_all_senders.decrement(1.0);
}
}
/// Updates the block specific info
fn set_block_info(&mut self, block_info: BlockInfo) {
let BlockInfo {
block_gas_limit,
last_seen_block_hash,
last_seen_block_number,
pending_basefee,
pending_blob_fee,
} = block_info;
self.last_seen_block_number = last_seen_block_number;
self.last_seen_block_hash = last_seen_block_hash;
self.pending_fees.base_fee = pending_basefee;
self.metrics.base_fee.set(pending_basefee as f64);
self.block_gas_limit = block_gas_limit;
if let Some(pending_blob_fee) = pending_blob_fee {
self.pending_fees.blob_fee = pending_blob_fee;
self.metrics.blob_base_fee.set(pending_blob_fee as f64);
}
}
/// Updates the size metrics
pub(crate) fn update_size_metrics(&self) {
self.metrics.all_transactions_by_hash.set(self.by_hash.len() as f64);
self.metrics.all_transactions_by_id.set(self.txs.len() as f64);
}
/// Rechecks all transactions in the pool against the changes.
///
/// Possible changes are:
///
/// For all transactions:
/// - decreased basefee: promotes from `basefee` to `pending` sub-pool.
/// - increased basefee: demotes from `pending` to `basefee` sub-pool.
///
/// Individually:
/// - decreased sender allowance: demote from (`basefee`|`pending`) to `queued`.
/// - increased sender allowance: promote from `queued` to
/// - `pending` if basefee condition is met.
/// - `basefee` if basefee condition is _not_ met.
///
/// Additionally, this will also update the `cumulative_gas_used` for transactions of a sender
/// that got transaction included in the block.
pub(crate) fn update(
&mut self,
changed_accounts: &FxHashMap<SenderId, SenderInfo>,
) -> Vec<PoolUpdate> {
// pre-allocate a few updates
let mut updates = Vec::with_capacity(64);
let mut iter = self.txs.iter_mut().peekable();
// Loop over all individual senders and update all affected transactions.
// One sender may have up to `max_account_slots` transactions here, which means, worst case
// `max_accounts_slots` need to be updated, for example if the first transaction is blocked
// due to too low base fee.
// However, we don't have to necessarily check every transaction of a sender. If no updates
// are possible (nonce gap) then we can skip to the next sender.
// The `unique_sender` loop will process the first transaction of all senders, update its
// state and internally update all consecutive transactions
'transactions: while let Some((id, tx)) = iter.next() {
macro_rules! next_sender {
($iter:ident) => {
'this: while let Some((peek, _)) = iter.peek() {
if peek.sender != id.sender {
break 'this
}
iter.next();
}
};
}
// track the balance if the sender was changed in the block
// check if this is a changed account
let changed_balance = if let Some(info) = changed_accounts.get(&id.sender) {
// discard all transactions with a nonce lower than the current state nonce
if id.nonce < info.state_nonce {
updates.push(PoolUpdate {
id: *tx.transaction.id(),
current: tx.subpool,
destination: Destination::Discard,
});
continue 'transactions
}
let ancestor = TransactionId::ancestor(id.nonce, info.state_nonce, id.sender);
// If there's no ancestor then this is the next transaction.
if ancestor.is_none() {
tx.state.insert(TxState::NO_NONCE_GAPS);
tx.state.insert(TxState::NO_PARKED_ANCESTORS);
tx.cumulative_cost = U256::ZERO;
if tx.transaction.cost() > &info.balance {
// sender lacks sufficient funds to pay for this transaction
tx.state.remove(TxState::ENOUGH_BALANCE);
} else {
tx.state.insert(TxState::ENOUGH_BALANCE);
}
}
Some(&info.balance)
} else {
None
};
// If there's a nonce gap, we can shortcircuit, because there's nothing to update yet.
if tx.state.has_nonce_gap() {
next_sender!(iter);
continue 'transactions
}
// Since this is the first transaction of the sender, it has no parked ancestors
tx.state.insert(TxState::NO_PARKED_ANCESTORS);
// Update the first transaction of this sender.
Self::update_tx_base_fee(self.pending_fees.base_fee, tx);
// Track if the transaction's sub-pool changed.
Self::record_subpool_update(&mut updates, tx);
// Track blocking transactions.
let mut has_parked_ancestor = !tx.state.is_pending();
let mut cumulative_cost = tx.next_cumulative_cost();
// the next expected nonce after this transaction: nonce + 1
let mut next_nonce_in_line = tx.transaction.nonce().saturating_add(1);
// Update all consecutive transaction of this sender
while let Some((peek, tx)) = iter.peek_mut() {
if peek.sender != id.sender {
// Found the next sender we need to check
continue 'transactions
}
if tx.transaction.nonce() == next_nonce_in_line {
// no longer nonce gapped
tx.state.insert(TxState::NO_NONCE_GAPS);
} else {
// can short circuit if there's still a nonce gap
next_sender!(iter);
continue 'transactions
}
// update for next iteration of this sender's loop
next_nonce_in_line = next_nonce_in_line.saturating_add(1);
// update cumulative cost
tx.cumulative_cost = cumulative_cost;
// Update for next transaction
cumulative_cost = tx.next_cumulative_cost();
// If the account changed in the block, check the balance.
if let Some(changed_balance) = changed_balance {
if &cumulative_cost > changed_balance {
// sender lacks sufficient funds to pay for this transaction
tx.state.remove(TxState::ENOUGH_BALANCE);
} else {
tx.state.insert(TxState::ENOUGH_BALANCE);
}
}
// Update ancestor condition.
if has_parked_ancestor {
tx.state.remove(TxState::NO_PARKED_ANCESTORS);
} else {
tx.state.insert(TxState::NO_PARKED_ANCESTORS);
}
has_parked_ancestor = !tx.state.is_pending();
// Update and record sub-pool changes.
Self::update_tx_base_fee(self.pending_fees.base_fee, tx);
Self::record_subpool_update(&mut updates, tx);
// Advance iterator
iter.next();
}
}
updates
}
/// This will update the transaction's `subpool` based on its state.
///
/// If the sub-pool derived from the state differs from the current pool, it will record a
/// `PoolUpdate` for this transaction to move it to the new sub-pool.
fn record_subpool_update(updates: &mut Vec<PoolUpdate>, tx: &mut PoolInternalTransaction<T>) {
let current_pool = tx.subpool;
tx.subpool = tx.state.into();
if current_pool != tx.subpool {
updates.push(PoolUpdate {
id: *tx.transaction.id(),
current: current_pool,
destination: tx.subpool.into(),
})
}
}
/// Rechecks the transaction's dynamic fee condition.
fn update_tx_base_fee(pending_block_base_fee: u64, tx: &mut PoolInternalTransaction<T>) {
// Recheck dynamic fee condition.
match tx.transaction.max_fee_per_gas().cmp(&(pending_block_base_fee as u128)) {
Ordering::Greater | Ordering::Equal => {
tx.state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
}
Ordering::Less => {
tx.state.remove(TxState::ENOUGH_FEE_CAP_BLOCK);
}
}
}
/// Returns an iterator over all transactions for the given sender, starting with the lowest
/// nonce
pub(crate) fn txs_iter(
&self,
sender: SenderId,
) -> impl Iterator<Item = (&TransactionId, &PoolInternalTransaction<T>)> + '_ {
self.txs
.range((sender.start_bound(), Unbounded))
.take_while(move |(other, _)| sender == other.sender)
}
/// Returns a mutable iterator over all transactions for the given sender, starting with the
/// lowest nonce
#[cfg(test)]
#[expect(dead_code)]
pub(crate) fn txs_iter_mut(
&mut self,
sender: SenderId,
) -> impl Iterator<Item = (&TransactionId, &mut PoolInternalTransaction<T>)> + '_ {
self.txs
.range_mut((sender.start_bound(), Unbounded))
.take_while(move |(other, _)| sender == other.sender)
}
/// Returns all transactions that _follow_ after the given id and have the same sender.
///
/// NOTE: The range is _exclusive_
pub(crate) fn descendant_txs_exclusive<'a, 'b: 'a>(
&'a self,
id: &'b TransactionId,
) -> impl Iterator<Item = (&'a TransactionId, &'a PoolInternalTransaction<T>)> + 'a {
self.txs.range((Excluded(id), Unbounded)).take_while(|(other, _)| id.sender == other.sender)
}
/// Returns all transactions that _follow_ after the given id but have the same sender.
///
/// NOTE: The range is _inclusive_: if the transaction that belongs to `id` it will be the
/// first value.
pub(crate) fn descendant_txs_inclusive<'a, 'b: 'a>(
&'a self,
id: &'b TransactionId,
) -> impl Iterator<Item = (&'a TransactionId, &'a PoolInternalTransaction<T>)> + 'a {
self.txs.range(id..).take_while(|(other, _)| id.sender == other.sender)
}
/// Returns all mutable transactions that _follow_ after the given id but have the same sender.
///
/// NOTE: The range is _inclusive_: if the transaction that belongs to `id` it field be the
/// first value.
pub(crate) fn descendant_txs_mut<'a, 'b: 'a>(
&'a mut self,
id: &'b TransactionId,
) -> impl Iterator<Item = (&'a TransactionId, &'a mut PoolInternalTransaction<T>)> + 'a {
self.txs.range_mut(id..).take_while(|(other, _)| id.sender == other.sender)
}
/// Removes a transaction from the set using its hash.
pub(crate) fn remove_transaction_by_hash(
&mut self,
tx_hash: &B256,
) -> Option<(Arc<ValidPoolTransaction<T>>, SubPool)> {
let tx = self.by_hash.remove(tx_hash)?;
let internal = self.txs.remove(&tx.transaction_id)?;
self.remove_auths(&internal);
// decrement the counter for the sender.
self.tx_decr(tx.sender_id());
Some((tx, internal.subpool))
}
/// Removes a transaction from the set using its id.
///
/// This is intended for processing updates after state changes.
pub(crate) fn remove_transaction_by_id(
&mut self,
tx_id: &TransactionId,
) -> Option<(Arc<ValidPoolTransaction<T>>, SubPool)> {
let internal = self.txs.remove(tx_id)?;
let tx = self.by_hash.remove(internal.transaction.hash())?;
self.remove_auths(&internal);
// decrement the counter for the sender.
self.tx_decr(tx.sender_id());
Some((tx, internal.subpool))
}
/// If a tx is removed (_not_ mined), all descendants are set to parked due to the nonce gap
pub(crate) fn park_descendant_transactions(
&mut self,
tx_id: &TransactionId,
) -> Vec<PoolUpdate> {
let mut updates = Vec::new();
for (id, tx) in self.descendant_txs_mut(tx_id) {
let current_pool = tx.subpool;
tx.state.remove(TxState::NO_NONCE_GAPS);
// update the pool based on the state.
tx.subpool = tx.state.into();
// check if anything changed.
if current_pool != tx.subpool {
updates.push(PoolUpdate {
id: *id,
current: current_pool,
destination: tx.subpool.into(),
})
}
}
updates
}
/// Removes a transaction from the set.
///
/// This will _not_ trigger additional updates, because descendants without nonce gaps are
/// already in the pending pool, and this transaction will be the first transaction of the
/// sender in this pool.
pub(crate) fn remove_transaction(
&mut self,
id: &TransactionId,
) -> Option<(Arc<ValidPoolTransaction<T>>, SubPool)> {
let internal = self.txs.remove(id)?;
// decrement the counter for the sender.
self.tx_decr(internal.transaction.sender_id());
let result =
self.by_hash.remove(internal.transaction.hash()).map(|tx| (tx, internal.subpool));
self.remove_auths(&internal);
result
}
/// Removes any pending auths for the given transaction.
///
/// This is a noop for non EIP-7702 transactions.
fn remove_auths(&mut self, tx: &PoolInternalTransaction<T>) {
let Some(auths) = &tx.transaction.authority_ids else { return };
let tx_hash = tx.transaction.hash();
for auth in auths {
if let Some(list) = self.auths.get_mut(auth) {
list.remove(tx_hash);
if list.is_empty() {
self.auths.remove(auth);
}
}
}
}
/// Checks if the given transaction's type conflicts with an existing transaction.
///
/// See also [`ValidPoolTransaction::tx_type_conflicts_with`].
///
/// Caution: This assumes that mutually exclusive invariant is always true for the same sender.
#[inline]
fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction<T>) -> bool {
self.txs_iter(tx.transaction_id.sender)
.next()
.is_some_and(|(_, existing)| tx.tx_type_conflicts_with(&existing.transaction))
}
/// Additional checks for a new transaction.
///
/// This will enforce all additional rules in the context of this pool, such as:
/// - Spam protection: reject new non-local transaction from a sender that exhausted its slot
/// capacity.
/// - Gas limit: reject transactions if they exceed a block's maximum gas.
/// - Ensures transaction types are not conflicting for the sender: blob vs normal
/// transactions are mutually exclusive for the same sender.
fn ensure_valid(
&self,
transaction: ValidPoolTransaction<T>,
on_chain_nonce: u64,
) -> Result<ValidPoolTransaction<T>, InsertErr<T>> {
if !self.local_transactions_config.is_local(transaction.origin, transaction.sender_ref()) {
let current_txs =
self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default();
// Reject transactions if sender's capacity is exceeded.
// If transaction's nonce matches on-chain nonce always let it through
if current_txs >= self.max_account_slots && transaction.nonce() > on_chain_nonce {
return Err(InsertErr::ExceededSenderTransactionsCapacity {
transaction: Arc::new(transaction),
})
}
}
if transaction.gas_limit() > self.block_gas_limit {
return Err(InsertErr::TxGasLimitMoreThanAvailableBlockGas {
block_gas_limit: self.block_gas_limit,
tx_gas_limit: transaction.gas_limit(),
transaction: Arc::new(transaction),
})
}
if self.contains_conflicting_transaction(&transaction) {
// blob vs non blob transactions are mutually exclusive for the same sender
return Err(InsertErr::TxTypeConflict { transaction: Arc::new(transaction) })
}
Ok(transaction)
}
/// Enforces additional constraints for blob transactions before attempting to insert:
/// - new blob transactions must not have any nonce gaps
/// - blob transactions cannot go into overdraft
/// - replacement blob transaction with a higher fee must not shift an already propagated
/// descending blob transaction into overdraft
fn ensure_valid_blob_transaction(
&self,
new_blob_tx: ValidPoolTransaction<T>,
on_chain_balance: U256,
ancestor: Option<TransactionId>,
) -> Result<ValidPoolTransaction<T>, InsertErr<T>> {
if let Some(ancestor) = ancestor {
let Some(ancestor_tx) = self.txs.get(&ancestor) else {
// ancestor tx is missing, so we can't insert the new blob
self.metrics.blob_transactions_nonce_gaps.increment(1);
return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) })
};
if ancestor_tx.state.has_nonce_gap() {
// the ancestor transaction already has a nonce gap, so we can't insert the new
// blob
self.metrics.blob_transactions_nonce_gaps.increment(1);
return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) })
}
// the max cost executing this transaction requires
let mut cumulative_cost = ancestor_tx.next_cumulative_cost() + new_blob_tx.cost();
// check if the new blob would go into overdraft
if cumulative_cost > on_chain_balance {
// the transaction would go into overdraft
return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) })
}
// ensure that a replacement would not shift already propagated blob transactions into
// overdraft
let id = new_blob_tx.transaction_id;
let mut descendants = self.descendant_txs_inclusive(&id).peekable();
if let Some((maybe_replacement, _)) = descendants.peek() &&
**maybe_replacement == new_blob_tx.transaction_id
{
// replacement transaction
descendants.next();
// check if any of descendant blob transactions should be shifted into overdraft
for (_, tx) in descendants {
cumulative_cost += tx.transaction.cost();
if tx.transaction.is_eip4844() && cumulative_cost > on_chain_balance {
// the transaction would shift
return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) })
}
}
}
} else if new_blob_tx.cost() > &on_chain_balance {
// the transaction would go into overdraft
return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) })
}
Ok(new_blob_tx)
}
/// Inserts a new _valid_ transaction into the pool.
///
/// If the transaction already exists, it will be replaced if not underpriced.
/// Returns info to which sub-pool the transaction should be moved.
/// Also returns a set of pool updates triggered by this insert, that need to be handled by the
/// caller.
///
/// These can include:
/// - closing nonce gaps of descendant transactions
/// - enough balance updates
///
/// Note: For EIP-4844 blob transactions additional constraints are enforced:
/// - new blob transactions must not have any nonce gaps
/// - blob transactions cannot go into overdraft
///
/// ## Transaction type Exclusivity
///
/// The pool enforces exclusivity of eip-4844 blob vs non-blob transactions on a per sender
/// basis:
/// - If the pool already includes a blob transaction from the `transaction`'s sender, then the
/// `transaction` must also be a blob transaction
/// - If the pool already includes a non-blob transaction from the `transaction`'s sender, then
/// the `transaction` must _not_ be a blob transaction.
///
/// In other words, the presence of blob transactions exclude non-blob transactions and vice
/// versa.
///
/// ## Replacements
///
/// The replacement candidate must satisfy given price bump constraints: replacement candidate
/// must not be underpriced
pub(crate) fn insert_tx(
&mut self,
transaction: ValidPoolTransaction<T>,
on_chain_balance: U256,
on_chain_nonce: u64,
) -> InsertResult<T> {
assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction");
let mut transaction = self.ensure_valid(transaction, on_chain_nonce)?;
let inserted_tx_id = *transaction.id();
let mut state = TxState::default();
let mut cumulative_cost = U256::ZERO;
let mut updates = Vec::new();
// Current tx does not exceed block gas limit after ensure_valid check
state.insert(TxState::NOT_TOO_MUCH_GAS);
// identifier of the ancestor transaction, will be None if the transaction is the next tx of
// the sender
let ancestor = TransactionId::ancestor(
transaction.transaction.nonce(),
on_chain_nonce,
inserted_tx_id.sender,
);
// before attempting to insert a blob transaction, we need to ensure that additional
// constraints are met that only apply to blob transactions
if transaction.is_eip4844() {
state.insert(TxState::BLOB_TRANSACTION);
transaction =
self.ensure_valid_blob_transaction(transaction, on_chain_balance, ancestor)?;
let blob_fee_cap = transaction.transaction.max_fee_per_blob_gas().unwrap_or_default();
if blob_fee_cap >= self.pending_fees.blob_fee {
state.insert(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
}
} else {
// Non-EIP4844 transaction always satisfy the blob fee cap condition
state.insert(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
}
let transaction = Arc::new(transaction);
// If there's no ancestor tx then this is the next transaction.
if ancestor.is_none() {
state.insert(TxState::NO_NONCE_GAPS);
state.insert(TxState::NO_PARKED_ANCESTORS);
}
// Check dynamic fee
let fee_cap = transaction.max_fee_per_gas();
if fee_cap < self.minimal_protocol_basefee as u128 {
return Err(InsertErr::FeeCapBelowMinimumProtocolFeeCap { transaction, fee_cap })
}
if fee_cap >= self.pending_fees.base_fee as u128 {
state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
}
// placeholder for the replaced transaction, if any
let mut replaced_tx = None;
let pool_tx = PoolInternalTransaction {
transaction: Arc::clone(&transaction),
subpool: state.into(),
state,
cumulative_cost,
};
// try to insert the transaction
match self.txs.entry(*transaction.id()) {
Entry::Vacant(entry) => {
// Insert the transaction in both maps
self.by_hash.insert(*pool_tx.transaction.hash(), pool_tx.transaction.clone());
entry.insert(pool_tx);
}
Entry::Occupied(mut entry) => {
// Transaction with the same nonce already exists: replacement candidate
let existing_transaction = entry.get().transaction.as_ref();
let maybe_replacement = transaction.as_ref();
// Ensure the new transaction is not underpriced
if existing_transaction.is_underpriced(maybe_replacement, &self.price_bumps) {
return Err(InsertErr::Underpriced {
transaction: pool_tx.transaction,
existing: *entry.get().transaction.hash(),
})
}
let new_hash = *pool_tx.transaction.hash();
let new_transaction = pool_tx.transaction.clone();
let replaced = entry.insert(pool_tx);
self.by_hash.remove(replaced.transaction.hash());
self.by_hash.insert(new_hash, new_transaction);
self.remove_auths(&replaced);
// also remove the hash
replaced_tx = Some((replaced.transaction, replaced.subpool));
}
}
if let Some(auths) = &transaction.authority_ids {
let tx_hash = transaction.hash();
for auth in auths {
self.auths.entry(*auth).or_default().insert(*tx_hash);
}
}
// The next transaction of this sender
let on_chain_id = TransactionId::new(transaction.sender_id(), on_chain_nonce);
{
// Tracks the next nonce we expect if the transactions are gapless
let mut next_nonce = on_chain_id.nonce;
// We need to find out if the next transaction of the sender is considered pending
// The direct descendant has _no_ parked ancestors because the `on_chain_nonce` is
// pending, so we can set this to `false`
let mut has_parked_ancestor = false;
// Traverse all future transactions of the sender starting with the on chain nonce, and
// update existing transactions: `[on_chain_nonce,..]`
for (id, tx) in self.descendant_txs_mut(&on_chain_id) {
let current_pool = tx.subpool;
// If there's a nonce gap, we can shortcircuit
if next_nonce != id.nonce {
break
}
// close the nonce gap
tx.state.insert(TxState::NO_NONCE_GAPS);
// set cumulative cost
tx.cumulative_cost = cumulative_cost;
// Update for next transaction
cumulative_cost = tx.next_cumulative_cost();
if cumulative_cost > on_chain_balance {
// sender lacks sufficient funds to pay for this transaction
tx.state.remove(TxState::ENOUGH_BALANCE);
} else {
tx.state.insert(TxState::ENOUGH_BALANCE);
}
// Update ancestor condition.
if has_parked_ancestor {
tx.state.remove(TxState::NO_PARKED_ANCESTORS);
} else {
tx.state.insert(TxState::NO_PARKED_ANCESTORS);
}
has_parked_ancestor = !tx.state.is_pending();
// update the pool based on the state
tx.subpool = tx.state.into();
if inserted_tx_id.eq(id) {
// if it is the new transaction, track its updated state
state = tx.state;
} else {
// check if anything changed
if current_pool != tx.subpool {
updates.push(PoolUpdate {
id: *id,
current: current_pool,
destination: tx.subpool.into(),
})
}
}
// increment for next iteration
next_nonce = id.next_nonce();
}
}
// If this wasn't a replacement transaction we need to update the counter.
if replaced_tx.is_none() {
self.tx_inc(inserted_tx_id.sender);
}
self.update_size_metrics();
Ok(InsertOk { transaction, move_to: state.into(), state, replaced_tx, updates })
}
/// Number of transactions in the entire pool
pub(crate) fn len(&self) -> usize {
self.txs.len()
}
/// Whether the pool is empty
pub(crate) fn is_empty(&self) -> bool {
self.txs.is_empty()
}
/// Asserts that the bijection between `by_hash` and `txs` is valid.
#[cfg(any(test, feature = "test-utils"))]
pub(crate) fn assert_invariants(&self) {
assert_eq!(self.by_hash.len(), self.txs.len(), "by_hash.len() != txs.len()");
assert!(self.auths.len() <= self.txs.len(), "auths.len() > txs.len()");
}
}
#[cfg(test)]
impl<T: PoolTransaction> AllTransactions<T> {
/// This function retrieves the number of transactions stored in the pool for a specific sender.
///
/// If there are no transactions for the given sender, it returns zero by default.
pub(crate) fn tx_count(&self, sender: SenderId) -> usize {
self.tx_counter.get(&sender).copied().unwrap_or_default()
}
}
impl<T: PoolTransaction> Default for AllTransactions<T> {
fn default() -> Self {
Self {
max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE,
block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
by_hash: Default::default(),
txs: Default::default(),
sender_info: Default::default(),
tx_counter: Default::default(),
last_seen_block_number: Default::default(),
last_seen_block_hash: Default::default(),
pending_fees: Default::default(),
price_bumps: Default::default(),
local_transactions_config: Default::default(),
auths: Default::default(),
metrics: Default::default(),
}
}
}
/// Represents updated fees for the pending block.
#[derive(Debug, Clone)]
pub(crate) struct PendingFees {
/// The pending base fee
pub(crate) base_fee: u64,
/// The pending blob fee
pub(crate) blob_fee: u128,
}
impl Default for PendingFees {
fn default() -> Self {
Self { base_fee: Default::default(), blob_fee: BLOB_TX_MIN_BLOB_GASPRICE }
}
}
/// Result type for inserting a transaction
pub(crate) type InsertResult<T> = Result<InsertOk<T>, InsertErr<T>>;
/// Err variant of `InsertResult`
#[derive(Debug)]
pub(crate) enum InsertErr<T: PoolTransaction> {
/// Attempted to replace existing transaction, but was underpriced
Underpriced {
transaction: Arc<ValidPoolTransaction<T>>,
#[expect(dead_code)]
existing: TxHash,
},
/// Attempted to insert a blob transaction with a nonce gap
BlobTxHasNonceGap { transaction: Arc<ValidPoolTransaction<T>> },
/// Attempted to insert a transaction that would overdraft the sender's balance at the time of
/// insertion.
Overdraft { transaction: Arc<ValidPoolTransaction<T>> },
/// The transactions feeCap is lower than the chain's minimum fee requirement.
///
/// See also [`MIN_PROTOCOL_BASE_FEE`]
FeeCapBelowMinimumProtocolFeeCap { transaction: Arc<ValidPoolTransaction<T>>, fee_cap: u128 },
/// Sender currently exceeds the configured limit for max account slots.
///
/// The sender can be considered a spammer at this point.
ExceededSenderTransactionsCapacity { transaction: Arc<ValidPoolTransaction<T>> },
/// Transaction gas limit exceeds block's gas limit
TxGasLimitMoreThanAvailableBlockGas {
transaction: Arc<ValidPoolTransaction<T>>,
block_gas_limit: u64,
tx_gas_limit: u64,
},
/// Thrown if the mutual exclusivity constraint (blob vs normal transaction) is violated.
TxTypeConflict { transaction: Arc<ValidPoolTransaction<T>> },
}
/// Transaction was successfully inserted into the pool
#[derive(Debug)]
pub(crate) struct InsertOk<T: PoolTransaction> {
/// Ref to the inserted transaction.
transaction: Arc<ValidPoolTransaction<T>>,
/// Where to move the transaction to.
move_to: SubPool,
/// Current state of the inserted tx.
state: TxState,
/// The transaction that was replaced by this.
replaced_tx: Option<(Arc<ValidPoolTransaction<T>>, SubPool)>,
/// Additional updates to transactions affected by this change.
updates: Vec<PoolUpdate>,
}
/// The internal transaction typed used by `AllTransactions` which also additional info used for
/// determining the current state of the transaction.
#[derive(Debug)]
pub(crate) struct PoolInternalTransaction<T: PoolTransaction> {
/// The actual transaction object.
pub(crate) transaction: Arc<ValidPoolTransaction<T>>,
/// The `SubPool` that currently contains this transaction.
pub(crate) subpool: SubPool,
/// Keeps track of the current state of the transaction and therefore in which subpool it
/// should reside
pub(crate) state: TxState,
/// The total cost all transactions before this transaction.
///
/// This is the combined `cost` of all transactions from the same sender that currently
/// come before this transaction.
pub(crate) cumulative_cost: U256,
}
// === impl PoolInternalTransaction ===
impl<T: PoolTransaction> PoolInternalTransaction<T> {
fn next_cumulative_cost(&self) -> U256 {
self.cumulative_cost + self.transaction.cost()
}
}
/// Stores relevant context about a sender.
#[derive(Debug, Clone, Default)]
pub(crate) struct SenderInfo {
/// current nonce of the sender.
pub(crate) state_nonce: u64,
/// Balance of the sender at the current point.
pub(crate) balance: U256,
}
// === impl SenderInfo ===
impl SenderInfo {
/// Updates the info with the new values.
const fn update(&mut self, state_nonce: u64, balance: U256) {
*self = Self { state_nonce, balance };
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet},
traits::TransactionOrigin,
SubPoolLimit,
};
use alloy_consensus::{Transaction, TxType};
use alloy_primitives::address;
#[test]
fn test_insert_blob() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip4844().inc_price().inc_limit();
let valid_tx = f.validated(tx);
let InsertOk { updates, replaced_tx, move_to, state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(updates.is_empty());
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(state.contains(TxState::ENOUGH_BALANCE));
assert!(state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(move_to, SubPool::Pending);
let inserted = pool.txs.get(&valid_tx.transaction_id).unwrap();
assert_eq!(inserted.subpool, SubPool::Pending);
}
#[test]
fn test_insert_blob_not_enough_blob_fee() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions {
pending_fees: PendingFees { blob_fee: 10_000_000, ..Default::default() },
..Default::default()
};
let tx = MockTransaction::eip4844().inc_price().inc_limit();
pool.pending_fees.blob_fee = tx.max_fee_per_blob_gas().unwrap() + 1;
let valid_tx = f.validated(tx);
let InsertOk { state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(!state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
let _ = pool.txs.get(&valid_tx.transaction_id).unwrap();
}
#[test]
fn test_valid_tx_with_decreasing_blob_fee() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions {
pending_fees: PendingFees { blob_fee: 10_000_000, ..Default::default() },
..Default::default()
};
let tx = MockTransaction::eip4844().inc_price().inc_limit();
pool.pending_fees.blob_fee = tx.max_fee_per_blob_gas().unwrap() + 1;
let valid_tx = f.validated(tx.clone());
let InsertOk { state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(!state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
let _ = pool.txs.get(&valid_tx.transaction_id).unwrap();
pool.remove_transaction(&valid_tx.transaction_id);
pool.pending_fees.blob_fee = tx.max_fee_per_blob_gas().unwrap();
let InsertOk { state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
}
#[test]
fn test_demote_valid_tx_with_increasing_blob_fee() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip4844().inc_price().inc_limit();
// set block info so the tx is initially underpriced w.r.t. blob fee
let mut block_info = pool.block_info();
block_info.pending_blob_fee = Some(tx.max_fee_per_blob_gas().unwrap());
pool.set_block_info(block_info);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, on_chain_balance, on_chain_nonce, None).unwrap();
// assert pool lengths
assert!(pool.blob_pool.is_empty());
assert_eq!(pool.pending_pool.len(), 1);
// check tx state and derived subpool
let internal_tx = pool.all_transactions.txs.get(&id).unwrap();
assert!(internal_tx.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(internal_tx.subpool, SubPool::Pending);
// set block info so the pools are updated
block_info.pending_blob_fee = Some(tx.max_fee_per_blob_gas().unwrap() + 1);
pool.set_block_info(block_info);
// check that the tx is promoted
let internal_tx = pool.all_transactions.txs.get(&id).unwrap();
assert!(!internal_tx.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(internal_tx.subpool, SubPool::Blob);
// make sure the blob transaction was promoted into the pending pool
assert_eq!(pool.blob_pool.len(), 1);
assert!(pool.pending_pool.is_empty());
}
#[test]
fn test_promote_valid_tx_with_decreasing_blob_fee() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip4844().inc_price().inc_limit();
// set block info so the tx is initially underpriced w.r.t. blob fee
let mut block_info = pool.block_info();
block_info.pending_blob_fee = Some(tx.max_fee_per_blob_gas().unwrap() + 1);
pool.set_block_info(block_info);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, on_chain_balance, on_chain_nonce, None).unwrap();
// assert pool lengths
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.blob_pool.len(), 1);
// check tx state and derived subpool
let internal_tx = pool.all_transactions.txs.get(&id).unwrap();
assert!(!internal_tx.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(internal_tx.subpool, SubPool::Blob);
// set block info so the pools are updated
block_info.pending_blob_fee = Some(tx.max_fee_per_blob_gas().unwrap());
pool.set_block_info(block_info);
// check that the tx is promoted
let internal_tx = pool.all_transactions.txs.get(&id).unwrap();
assert!(internal_tx.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(internal_tx.subpool, SubPool::Pending);
// make sure the blob transaction was promoted into the pending pool
assert_eq!(pool.pending_pool.len(), 1);
assert!(pool.blob_pool.is_empty());
}
/// A struct representing a txpool promotion test instance
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
struct PromotionTest {
/// The basefee at the start of the test
basefee: u64,
/// The blobfee at the start of the test
blobfee: u128,
/// The subpool at the start of the test
subpool: SubPool,
/// The basefee update
basefee_update: u64,
/// The blobfee update
blobfee_update: u128,
/// The subpool after the update
new_subpool: SubPool,
}
impl PromotionTest {
/// Returns the test case for the opposite update
const fn opposite(&self) -> Self {
Self {
basefee: self.basefee_update,
blobfee: self.blobfee_update,
subpool: self.new_subpool,
blobfee_update: self.blobfee,
basefee_update: self.basefee,
new_subpool: self.subpool,
}
}
fn assert_subpool_lengths<T: TransactionOrdering>(
&self,
pool: &TxPool<T>,
failure_message: String,
check_subpool: SubPool,
) {
match check_subpool {
SubPool::Blob => {
assert_eq!(pool.blob_pool.len(), 1, "{failure_message}");
assert!(pool.pending_pool.is_empty(), "{failure_message}");
assert!(pool.basefee_pool.is_empty(), "{failure_message}");
assert!(pool.queued_pool.is_empty(), "{failure_message}");
}
SubPool::Pending => {
assert!(pool.blob_pool.is_empty(), "{failure_message}");
assert_eq!(pool.pending_pool.len(), 1, "{failure_message}");
assert!(pool.basefee_pool.is_empty(), "{failure_message}");
assert!(pool.queued_pool.is_empty(), "{failure_message}");
}
SubPool::BaseFee => {
assert!(pool.blob_pool.is_empty(), "{failure_message}");
assert!(pool.pending_pool.is_empty(), "{failure_message}");
assert_eq!(pool.basefee_pool.len(), 1, "{failure_message}");
assert!(pool.queued_pool.is_empty(), "{failure_message}");
}
SubPool::Queued => {
assert!(pool.blob_pool.is_empty(), "{failure_message}");
assert!(pool.pending_pool.is_empty(), "{failure_message}");
assert!(pool.basefee_pool.is_empty(), "{failure_message}");
assert_eq!(pool.queued_pool.len(), 1, "{failure_message}");
}
}
}
/// Runs an assertion on the provided pool, ensuring that the transaction is in the correct
/// subpool based on the starting condition of the test, assuming the pool contains only a
/// single transaction.
fn assert_single_tx_starting_subpool<T: TransactionOrdering>(&self, pool: &TxPool<T>) {
self.assert_subpool_lengths(
pool,
format!("pool length check failed at start of test: {self:?}"),
self.subpool,
);
}
/// Runs an assertion on the provided pool, ensuring that the transaction is in the correct
/// subpool based on the ending condition of the test, assuming the pool contains only a
/// single transaction.
fn assert_single_tx_ending_subpool<T: TransactionOrdering>(&self, pool: &TxPool<T>) {
self.assert_subpool_lengths(
pool,
format!("pool length check failed at end of test: {self:?}"),
self.new_subpool,
);
}
}
#[test]
fn test_promote_blob_tx_with_both_pending_fee_updates() {
// this exhaustively tests all possible promotion scenarios for a single transaction moving
// between the blob and pending pool
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let tx = MockTransaction::eip4844().inc_price().inc_limit();
let max_fee_per_blob_gas = tx.max_fee_per_blob_gas().unwrap();
let max_fee_per_gas = tx.max_fee_per_gas() as u64;
// These are all _promotion_ tests or idempotent tests.
let mut expected_promotions = vec![
PromotionTest {
blobfee: max_fee_per_blob_gas + 1,
basefee: max_fee_per_gas + 1,
subpool: SubPool::Blob,
blobfee_update: max_fee_per_blob_gas + 1,
basefee_update: max_fee_per_gas + 1,
new_subpool: SubPool::Blob,
},
PromotionTest {
blobfee: max_fee_per_blob_gas + 1,
basefee: max_fee_per_gas + 1,
subpool: SubPool::Blob,
blobfee_update: max_fee_per_blob_gas,
basefee_update: max_fee_per_gas + 1,
new_subpool: SubPool::Blob,
},
PromotionTest {
blobfee: max_fee_per_blob_gas + 1,
basefee: max_fee_per_gas + 1,
subpool: SubPool::Blob,
blobfee_update: max_fee_per_blob_gas + 1,
basefee_update: max_fee_per_gas,
new_subpool: SubPool::Blob,
},
PromotionTest {
blobfee: max_fee_per_blob_gas + 1,
basefee: max_fee_per_gas + 1,
subpool: SubPool::Blob,
blobfee_update: max_fee_per_blob_gas,
basefee_update: max_fee_per_gas,
new_subpool: SubPool::Pending,
},
PromotionTest {
blobfee: max_fee_per_blob_gas,
basefee: max_fee_per_gas + 1,
subpool: SubPool::Blob,
blobfee_update: max_fee_per_blob_gas,
basefee_update: max_fee_per_gas,
new_subpool: SubPool::Pending,
},
PromotionTest {
blobfee: max_fee_per_blob_gas + 1,
basefee: max_fee_per_gas,
subpool: SubPool::Blob,
blobfee_update: max_fee_per_blob_gas,
basefee_update: max_fee_per_gas,
new_subpool: SubPool::Pending,
},
PromotionTest {
blobfee: max_fee_per_blob_gas,
basefee: max_fee_per_gas,
subpool: SubPool::Pending,
blobfee_update: max_fee_per_blob_gas,
basefee_update: max_fee_per_gas,
new_subpool: SubPool::Pending,
},
];
// extend the test cases with reversed updates - this will add all _demotion_ tests
let reversed = expected_promotions.iter().map(|test| test.opposite()).collect::<Vec<_>>();
expected_promotions.extend(reversed);
// dedup the test cases
let expected_promotions = expected_promotions.into_iter().collect::<HashSet<_>>();
for promotion_test in &expected_promotions {
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// set block info so the tx is initially underpriced w.r.t. blob fee
let mut block_info = pool.block_info();
block_info.pending_blob_fee = Some(promotion_test.blobfee);
block_info.pending_basefee = promotion_test.basefee;
pool.set_block_info(block_info);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, on_chain_balance, on_chain_nonce, None).unwrap();
// assert pool lengths
promotion_test.assert_single_tx_starting_subpool(&pool);
// check tx state and derived subpool, it should not move into the blob pool
let internal_tx = pool.all_transactions.txs.get(&id).unwrap();
assert_eq!(
internal_tx.subpool, promotion_test.subpool,
"Subpools do not match at start of test: {promotion_test:?}"
);
// set block info with new base fee
block_info.pending_basefee = promotion_test.basefee_update;
block_info.pending_blob_fee = Some(promotion_test.blobfee_update);
pool.set_block_info(block_info);
// check tx state and derived subpool, it should not move into the blob pool
let internal_tx = pool.all_transactions.txs.get(&id).unwrap();
assert_eq!(
internal_tx.subpool, promotion_test.new_subpool,
"Subpools do not match at end of test: {promotion_test:?}"
);
// assert new pool lengths
promotion_test.assert_single_tx_ending_subpool(&pool);
}
}
#[test]
fn test_insert_pending() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let valid_tx = f.validated(tx);
let InsertOk { updates, replaced_tx, move_to, state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(updates.is_empty());
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(state.contains(TxState::ENOUGH_BALANCE));
assert_eq!(move_to, SubPool::Pending);
let inserted = pool.txs.get(&valid_tx.transaction_id).unwrap();
assert_eq!(inserted.subpool, SubPool::Pending);
}
#[test]
fn test_simple_insert() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let mut tx = MockTransaction::eip1559().inc_price().inc_limit();
tx.set_priority_fee(100);
tx.set_max_fee(100);
let valid_tx = f.validated(tx.clone());
let InsertOk { updates, replaced_tx, move_to, state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(updates.is_empty());
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(!state.contains(TxState::ENOUGH_BALANCE));
assert_eq!(move_to, SubPool::Queued);
assert_eq!(pool.len(), 1);
assert!(pool.contains(valid_tx.hash()));
let expected_state = TxState::ENOUGH_FEE_CAP_BLOCK | TxState::NO_NONCE_GAPS;
let inserted = pool.get(valid_tx.id()).unwrap();
assert!(inserted.state.intersects(expected_state));
// insert the same tx again
let res = pool.insert_tx(valid_tx, on_chain_balance, on_chain_nonce);
res.unwrap_err();
assert_eq!(pool.len(), 1);
let valid_tx = f.validated(tx.next());
let InsertOk { updates, replaced_tx, move_to, state, .. } =
pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(updates.is_empty());
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert!(!state.contains(TxState::ENOUGH_BALANCE));
assert_eq!(move_to, SubPool::Queued);
assert!(pool.contains(valid_tx.hash()));
assert_eq!(pool.len(), 2);
let inserted = pool.get(valid_tx.id()).unwrap();
assert!(inserted.state.intersects(expected_state));
}
#[test]
// Test that on_canonical_state_change doesn't double-process transactions
// when both fee and account updates would affect the same transaction
fn test_on_canonical_state_change_no_double_processing() {
let mut tx_factory = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Setup: Create a sender with a transaction in basefee pool
let tx = MockTransaction::eip1559().with_gas_price(50).with_gas_limit(30_000);
let sender = tx.sender();
// Set high base fee initially
let mut block_info = pool.block_info();
block_info.pending_basefee = 100;
pool.set_block_info(block_info);
let validated = tx_factory.validated(tx);
pool.add_transaction(validated, U256::from(10_000_000), 0, None).unwrap();
// Get sender_id after the transaction has been added
let sender_id = tx_factory.ids.sender_id(&sender).unwrap();
assert_eq!(pool.basefee_pool.len(), 1);
assert_eq!(pool.pending_pool.len(), 0);
// Now simulate a canonical state change with:
// 1. Lower base fee (would promote tx)
// 2. Account balance update (would also evaluate tx)
block_info.pending_basefee = 40;
let mut changed_senders = FxHashMap::default();
changed_senders.insert(
sender_id,
SenderInfo {
state_nonce: 0,
balance: U256::from(20_000_000), // Increased balance
},
);
let outcome = pool.on_canonical_state_change(
block_info,
vec![], // no mined transactions
changed_senders,
PoolUpdateKind::Commit,
);
// Transaction should be promoted exactly once
assert_eq!(pool.pending_pool.len(), 1, "Transaction should be in pending pool");
assert_eq!(pool.basefee_pool.len(), 0, "Transaction should not be in basefee pool");
assert_eq!(outcome.promoted.len(), 1, "Should report exactly one promotion");
}
#[test]
// Regression test: ensure we don't double-count promotions when base fee
// decreases and account is updated. This test would fail before the fix.
fn test_canonical_state_change_with_basefee_update_regression() {
let mut tx_factory = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Create transactions from different senders to test independently
let sender_balance = U256::from(100_000_000);
// Sender 1: tx will be promoted (gas price 60 > new base fee 50)
let tx1 =
MockTransaction::eip1559().with_gas_price(60).with_gas_limit(21_000).with_nonce(0);
let sender1 = tx1.sender();
// Sender 2: tx will be promoted (gas price 55 > new base fee 50)
let tx2 =
MockTransaction::eip1559().with_gas_price(55).with_gas_limit(21_000).with_nonce(0);
let sender2 = tx2.sender();
// Sender 3: tx will NOT be promoted (gas price 45 < new base fee 50)
let tx3 =
MockTransaction::eip1559().with_gas_price(45).with_gas_limit(21_000).with_nonce(0);
let sender3 = tx3.sender();
// Set high initial base fee (all txs will go to basefee pool)
let mut block_info = pool.block_info();
block_info.pending_basefee = 70;
pool.set_block_info(block_info);
// Add all transactions
let validated1 = tx_factory.validated(tx1);
let validated2 = tx_factory.validated(tx2);
let validated3 = tx_factory.validated(tx3);
pool.add_transaction(validated1, sender_balance, 0, None).unwrap();
pool.add_transaction(validated2, sender_balance, 0, None).unwrap();
pool.add_transaction(validated3, sender_balance, 0, None).unwrap();
let sender1_id = tx_factory.ids.sender_id(&sender1).unwrap();
let sender2_id = tx_factory.ids.sender_id(&sender2).unwrap();
let sender3_id = tx_factory.ids.sender_id(&sender3).unwrap();
// All should be in basefee pool initially
assert_eq!(pool.basefee_pool.len(), 3, "All txs should be in basefee pool");
assert_eq!(pool.pending_pool.len(), 0, "No txs should be in pending pool");
// Now decrease base fee to 50 - this should promote tx1 and tx2 (prices 60 and 55)
// but not tx3 (price 45)
block_info.pending_basefee = 50;
// Update all senders' balances (simulating account state changes)
let mut changed_senders = FxHashMap::default();
changed_senders.insert(
sender1_id,
SenderInfo { state_nonce: 0, balance: sender_balance + U256::from(1000) },
);
changed_senders.insert(
sender2_id,
SenderInfo { state_nonce: 0, balance: sender_balance + U256::from(1000) },
);
changed_senders.insert(
sender3_id,
SenderInfo { state_nonce: 0, balance: sender_balance + U256::from(1000) },
);
let outcome = pool.on_canonical_state_change(
block_info,
vec![],
changed_senders,
PoolUpdateKind::Commit,
);
// Check final state
assert_eq!(pool.pending_pool.len(), 2, "tx1 and tx2 should be promoted");
assert_eq!(pool.basefee_pool.len(), 1, "tx3 should remain in basefee");
// CRITICAL: Should report exactly 2 promotions, not 4 (which would happen with
// double-processing)
assert_eq!(
outcome.promoted.len(),
2,
"Should report exactly 2 promotions, not double-counted"
);
// Verify the correct transactions were promoted
let promoted_prices: Vec<u128> =
outcome.promoted.iter().map(|tx| tx.max_fee_per_gas()).collect();
assert!(promoted_prices.contains(&60));
assert!(promoted_prices.contains(&55));
}
#[test]
fn test_basefee_decrease_with_empty_senders() {
// Test that fee promotions still occur when basefee decreases
// even with no changed_senders
let mut tx_factory = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Create transaction that will be promoted when fee drops
let tx = MockTransaction::eip1559().with_gas_price(60).with_gas_limit(21_000);
// Set high initial base fee
let mut block_info = pool.block_info();
block_info.pending_basefee = 100;
pool.set_block_info(block_info);
// Add transaction - should go to basefee pool
let validated = tx_factory.validated(tx);
pool.add_transaction(validated, U256::from(10_000_000), 0, None).unwrap();
assert_eq!(pool.basefee_pool.len(), 1);
assert_eq!(pool.pending_pool.len(), 0);
// Decrease base fee with NO changed senders
block_info.pending_basefee = 50;
let outcome = pool.on_canonical_state_change(
block_info,
vec![],
FxHashMap::default(), // Empty changed_senders!
PoolUpdateKind::Commit,
);
// Transaction should still be promoted by fee-driven logic
assert_eq!(pool.pending_pool.len(), 1, "Fee decrease should promote tx");
assert_eq!(pool.basefee_pool.len(), 0);
assert_eq!(outcome.promoted.len(), 1, "Should report promotion from fee update");
}
#[test]
fn test_basefee_decrease_account_makes_unfundable() {
// Test that when basefee decreases but account update makes tx unfundable,
// we don't get transient promote-then-discard double counting
let mut tx_factory = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559().with_gas_price(60).with_gas_limit(21_000);
let sender = tx.sender();
// High initial base fee
let mut block_info = pool.block_info();
block_info.pending_basefee = 100;
pool.set_block_info(block_info);
let validated = tx_factory.validated(tx);
pool.add_transaction(validated, U256::from(10_000_000), 0, None).unwrap();
let sender_id = tx_factory.ids.sender_id(&sender).unwrap();
assert_eq!(pool.basefee_pool.len(), 1);
// Decrease base fee (would normally promote) but also drain account
block_info.pending_basefee = 50;
let mut changed_senders = FxHashMap::default();
changed_senders.insert(
sender_id,
SenderInfo {
state_nonce: 0,
balance: U256::from(100), // Too low to pay for gas!
},
);
let outcome = pool.on_canonical_state_change(
block_info,
vec![],
changed_senders,
PoolUpdateKind::Commit,
);
// With insufficient balance, transaction goes to queued pool
assert_eq!(pool.pending_pool.len(), 0, "Unfunded tx should not be in pending");
assert_eq!(pool.basefee_pool.len(), 0, "Tx no longer in basefee pool");
assert_eq!(pool.queued_pool.len(), 1, "Unfunded tx should be in queued pool");
// Transaction is not removed, just moved to queued
let tx_count = pool.all_transactions.txs.len();
assert_eq!(tx_count, 1, "Transaction should still be in pool (in queued)");
assert_eq!(outcome.promoted.len(), 0, "Should not report promotion");
assert_eq!(outcome.discarded.len(), 0, "Queued tx is not reported as discarded");
}
#[test]
fn insert_already_imported() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let tx = f.validated(tx);
pool.add_transaction(tx.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
match pool.add_transaction(tx, on_chain_balance, on_chain_nonce, None).unwrap_err().kind {
PoolErrorKind::AlreadyImported => {}
_ => unreachable!(),
}
}
#[test]
fn insert_replace() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let first = f.validated(tx.clone());
let _ = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce).unwrap();
let replacement = f.validated(tx.rng_hash().inc_price());
let InsertOk { updates, replaced_tx, .. } =
pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap();
assert!(updates.is_empty());
let replaced = replaced_tx.unwrap();
assert_eq!(replaced.0.hash(), first.hash());
// ensure replaced tx is fully removed
assert!(!pool.contains(first.hash()));
assert!(pool.contains(replacement.hash()));
assert_eq!(pool.len(), 1);
}
#[test]
fn insert_replace_txpool() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::mock();
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let first = f.validated(tx.clone());
let first_added =
pool.add_transaction(first, on_chain_balance, on_chain_nonce, None).unwrap();
let replacement = f.validated(tx.rng_hash().inc_price());
let replacement_added = pool
.add_transaction(replacement.clone(), on_chain_balance, on_chain_nonce, None)
.unwrap();
// // ensure replaced tx removed
assert!(!pool.contains(first_added.hash()));
// but the replacement is still there
assert!(pool.subpool_contains(replacement_added.subpool(), replacement_added.id()));
assert!(pool.contains(replacement.hash()));
let size = pool.size();
assert_eq!(size.total, 1);
size.assert_invariants();
}
#[test]
fn insert_replace_underpriced() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let first = f.validated(tx.clone());
let _res = pool.insert_tx(first, on_chain_balance, on_chain_nonce);
let mut replacement = f.validated(tx.rng_hash());
replacement.transaction = replacement.transaction.decr_price();
let err = pool.insert_tx(replacement, on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::Underpriced { .. }));
}
#[test]
fn insert_replace_underpriced_not_enough_bump() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let mut tx = MockTransaction::eip1559().inc_price().inc_limit();
tx.set_priority_fee(100);
tx.set_max_fee(100);
let first = f.validated(tx.clone());
let _ = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce).unwrap();
let mut replacement = f.validated(tx.rng_hash().inc_price());
// a price bump of 9% is not enough for a default min price bump of 10%
replacement.transaction.set_priority_fee(109);
replacement.transaction.set_max_fee(109);
let err =
pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::Underpriced { .. }));
// ensure first tx is not removed
assert!(pool.contains(first.hash()));
assert_eq!(pool.len(), 1);
// should also fail if the bump in max fee is not enough
replacement.transaction.set_priority_fee(110);
replacement.transaction.set_max_fee(109);
let err =
pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::Underpriced { .. }));
assert!(pool.contains(first.hash()));
assert_eq!(pool.len(), 1);
// should also fail if the bump in priority fee is not enough
replacement.transaction.set_priority_fee(109);
replacement.transaction.set_max_fee(110);
let err = pool.insert_tx(replacement, on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::Underpriced { .. }));
assert!(pool.contains(first.hash()));
assert_eq!(pool.len(), 1);
}
#[test]
fn insert_conflicting_type_normal_to_blob() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().inc_price().inc_limit();
let first = f.validated(tx.clone());
pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap();
let tx = MockTransaction::eip4844().set_sender(tx.sender()).inc_price_by(100).inc_limit();
let blob = f.validated(tx);
let err = pool.insert_tx(blob, on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}");
}
#[test]
fn insert_conflicting_type_blob_to_normal() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip4844().inc_price().inc_limit();
let first = f.validated(tx.clone());
pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap();
let tx = MockTransaction::eip1559().set_sender(tx.sender()).inc_price_by(100).inc_limit();
let tx = f.validated(tx);
let err = pool.insert_tx(tx, on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}");
}
// insert nonce then nonce - 1
#[test]
fn insert_previous() {
let on_chain_balance = U256::ZERO;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().inc_nonce().inc_price().inc_limit();
let first = f.validated(tx.clone());
let _res = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce);
let first_in_pool = pool.get(first.id()).unwrap();
// has nonce gap
assert!(!first_in_pool.state.contains(TxState::NO_NONCE_GAPS));
let prev = f.validated(tx.prev());
let InsertOk { updates, replaced_tx, state, move_to, .. } =
pool.insert_tx(prev, on_chain_balance, on_chain_nonce).unwrap();
// no updates since still in queued pool
assert!(updates.is_empty());
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert_eq!(move_to, SubPool::Queued);
let first_in_pool = pool.get(first.id()).unwrap();
// has non nonce gap
assert!(first_in_pool.state.contains(TxState::NO_NONCE_GAPS));
}
// insert nonce then nonce - 1
#[test]
fn insert_with_updates() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().inc_nonce().set_gas_price(100).inc_limit();
let first = f.validated(tx.clone());
let _res = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce).unwrap();
let first_in_pool = pool.get(first.id()).unwrap();
// has nonce gap
assert!(!first_in_pool.state.contains(TxState::NO_NONCE_GAPS));
assert_eq!(SubPool::Queued, first_in_pool.subpool);
let prev = f.validated(tx.prev());
let InsertOk { updates, replaced_tx, state, move_to, .. } =
pool.insert_tx(prev, on_chain_balance, on_chain_nonce).unwrap();
// updated previous tx
assert_eq!(updates.len(), 1);
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert_eq!(move_to, SubPool::Pending);
let first_in_pool = pool.get(first.id()).unwrap();
// has non nonce gap
assert!(first_in_pool.state.contains(TxState::NO_NONCE_GAPS));
assert_eq!(SubPool::Pending, first_in_pool.subpool);
}
#[test]
fn insert_previous_blocking() {
let on_chain_balance = U256::from(1_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
pool.pending_fees.base_fee = pool.minimal_protocol_basefee.checked_add(1).unwrap();
let tx = MockTransaction::eip1559().inc_nonce().inc_limit();
let first = f.validated(tx.clone());
let _res = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce);
let first_in_pool = pool.get(first.id()).unwrap();
assert!(tx.get_gas_price() < pool.pending_fees.base_fee as u128);
// has nonce gap
assert!(!first_in_pool.state.contains(TxState::NO_NONCE_GAPS));
let prev = f.validated(tx.prev());
let InsertOk { updates, replaced_tx, state, move_to, .. } =
pool.insert_tx(prev, on_chain_balance, on_chain_nonce).unwrap();
assert!(!state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
// no updates since still in queued pool
assert!(updates.is_empty());
assert!(replaced_tx.is_none());
assert!(state.contains(TxState::NO_NONCE_GAPS));
assert_eq!(move_to, SubPool::BaseFee);
let first_in_pool = pool.get(first.id()).unwrap();
// has non nonce gap
assert!(first_in_pool.state.contains(TxState::NO_NONCE_GAPS));
}
#[test]
fn rejects_spammer() {
let on_chain_balance = U256::from(1_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let mut tx = MockTransaction::eip1559();
let unblocked_tx = tx.clone();
for _ in 0..pool.max_account_slots {
tx = tx.next();
pool.insert_tx(f.validated(tx.clone()), on_chain_balance, on_chain_nonce).unwrap();
}
assert_eq!(
pool.max_account_slots,
pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap())
);
let err =
pool.insert_tx(f.validated(tx.next()), on_chain_balance, on_chain_nonce).unwrap_err();
assert!(matches!(err, InsertErr::ExceededSenderTransactionsCapacity { .. }));
assert!(pool
.insert_tx(f.validated(unblocked_tx), on_chain_balance, on_chain_nonce)
.is_ok());
}
#[test]
fn allow_local_spamming() {
let on_chain_balance = U256::from(1_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let mut tx = MockTransaction::eip1559();
for _ in 0..pool.max_account_slots {
tx = tx.next();
pool.insert_tx(
f.validated_with_origin(TransactionOrigin::Local, tx.clone()),
on_chain_balance,
on_chain_nonce,
)
.unwrap();
}
assert_eq!(
pool.max_account_slots,
pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap())
);
pool.insert_tx(
f.validated_with_origin(TransactionOrigin::Local, tx.next()),
on_chain_balance,
on_chain_nonce,
)
.unwrap();
}
#[test]
fn reject_tx_over_gas_limit() {
let on_chain_balance = U256::from(1_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().with_gas_limit(30_000_001);
assert!(matches!(
pool.insert_tx(f.validated(tx), on_chain_balance, on_chain_nonce),
Err(InsertErr::TxGasLimitMoreThanAvailableBlockGas { .. })
));
}
#[test]
fn test_tx_equal_gas_limit() {
let on_chain_balance = U256::from(1_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = AllTransactions::default();
let tx = MockTransaction::eip1559().with_gas_limit(30_000_000);
let InsertOk { state, .. } =
pool.insert_tx(f.validated(tx), on_chain_balance, on_chain_nonce).unwrap();
assert!(state.contains(TxState::NOT_TOO_MUCH_GAS));
}
#[test]
fn update_basefee_subpools() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559().inc_price_by(10);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 1);
pool.update_basefee((tx.max_fee_per_gas() + 1) as u64, |_| {});
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.basefee_pool.len(), 1);
assert_eq!(pool.all_transactions.txs.get(&id).unwrap().subpool, SubPool::BaseFee)
}
#[test]
fn update_basefee_subpools_setting_block_info() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559().inc_price_by(10);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 1);
// use set_block_info for the basefee update
let mut block_info = pool.block_info();
block_info.pending_basefee = (tx.max_fee_per_gas() + 1) as u64;
pool.set_block_info(block_info);
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.basefee_pool.len(), 1);
assert_eq!(pool.all_transactions.txs.get(&id).unwrap().subpool, SubPool::BaseFee)
}
#[test]
fn basefee_decrease_promotes_affordable_and_keeps_unaffordable() {
use alloy_primitives::address;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Create transactions that will be in basefee pool (can't afford initial high fee)
// Use different senders to avoid nonce gap issues
let sender_a = address!("0x000000000000000000000000000000000000000a");
let sender_b = address!("0x000000000000000000000000000000000000000b");
let sender_c = address!("0x000000000000000000000000000000000000000c");
let tx1 = MockTransaction::eip1559()
.set_sender(sender_a)
.set_nonce(0)
.set_max_fee(500)
.inc_limit();
let tx2 = MockTransaction::eip1559()
.set_sender(sender_b)
.set_nonce(0)
.set_max_fee(600)
.inc_limit();
let tx3 = MockTransaction::eip1559()
.set_sender(sender_c)
.set_nonce(0)
.set_max_fee(400)
.inc_limit();
// Set high initial basefee so transactions go to basefee pool
let mut block_info = pool.block_info();
block_info.pending_basefee = 700;
pool.set_block_info(block_info);
let validated1 = f.validated(tx1);
let validated2 = f.validated(tx2);
let validated3 = f.validated(tx3);
let id1 = *validated1.id();
let id2 = *validated2.id();
let id3 = *validated3.id();
// Add transactions - they should go to basefee pool due to high basefee
// All transactions have nonce 0 from different senders, so on_chain_nonce should be 0 for
// all
pool.add_transaction(validated1, U256::from(10_000), 0, None).unwrap();
pool.add_transaction(validated2, U256::from(10_000), 0, None).unwrap();
pool.add_transaction(validated3, U256::from(10_000), 0, None).unwrap();
// Debug: Check where transactions ended up
println!("Basefee pool len: {}", pool.basefee_pool.len());
println!("Pending pool len: {}", pool.pending_pool.len());
println!("tx1 subpool: {:?}", pool.all_transactions.txs.get(&id1).unwrap().subpool);
println!("tx2 subpool: {:?}", pool.all_transactions.txs.get(&id2).unwrap().subpool);
println!("tx3 subpool: {:?}", pool.all_transactions.txs.get(&id3).unwrap().subpool);
// Verify they're in basefee pool
assert_eq!(pool.basefee_pool.len(), 3);
assert_eq!(pool.pending_pool.len(), 0);
assert_eq!(pool.all_transactions.txs.get(&id1).unwrap().subpool, SubPool::BaseFee);
assert_eq!(pool.all_transactions.txs.get(&id2).unwrap().subpool, SubPool::BaseFee);
assert_eq!(pool.all_transactions.txs.get(&id3).unwrap().subpool, SubPool::BaseFee);
// Now decrease basefee to trigger the zero-allocation optimization
let mut block_info = pool.block_info();
block_info.pending_basefee = 450; // tx1 (500) and tx2 (600) can now afford it, tx3 (400) cannot
pool.set_block_info(block_info);
// Verify the optimization worked correctly:
// - tx1 and tx2 should be promoted to pending (mathematical certainty)
// - tx3 should remain in basefee pool
// - All state transitions should be correct
assert_eq!(pool.basefee_pool.len(), 1);
assert_eq!(pool.pending_pool.len(), 2);
// tx3 should still be in basefee pool (fee 400 < basefee 450)
assert_eq!(pool.all_transactions.txs.get(&id3).unwrap().subpool, SubPool::BaseFee);
// tx1 and tx2 should be in pending pool with correct state bits
let tx1_meta = pool.all_transactions.txs.get(&id1).unwrap();
let tx2_meta = pool.all_transactions.txs.get(&id2).unwrap();
assert_eq!(tx1_meta.subpool, SubPool::Pending);
assert_eq!(tx2_meta.subpool, SubPool::Pending);
assert!(tx1_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
assert!(tx2_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
// Verify that best_transactions returns the promoted transactions
let best: Vec<_> = pool.best_transactions().take(3).collect();
assert_eq!(best.len(), 2); // Only tx1 and tx2 should be returned
assert!(best.iter().any(|tx| tx.id() == &id1));
assert!(best.iter().any(|tx| tx.id() == &id2));
}
#[test]
fn apply_fee_updates_records_promotions_after_basefee_drop() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559()
.with_gas_limit(21_000)
.with_max_fee(500)
.with_priority_fee(1);
let validated = f.validated(tx);
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 1);
// Raise base fee beyond the transaction's cap so it gets parked in BaseFee pool.
pool.update_basefee(600, |_| {});
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.basefee_pool.len(), 1);
let prev_base_fee = 600;
let prev_blob_fee = pool.all_transactions.pending_fees.blob_fee;
// Simulate the canonical state path updating pending fees before applying promotions.
pool.all_transactions.pending_fees.base_fee = 400;
let mut outcome = UpdateOutcome::default();
pool.apply_fee_updates(prev_base_fee, prev_blob_fee, &mut outcome);
assert_eq!(pool.pending_pool.len(), 1);
assert!(pool.basefee_pool.is_empty());
assert_eq!(outcome.promoted.len(), 1);
assert_eq!(outcome.promoted[0].id(), &id);
assert_eq!(pool.all_transactions.pending_fees.base_fee, 400);
assert_eq!(pool.all_transactions.pending_fees.blob_fee, prev_blob_fee);
let tx_meta = pool.all_transactions.txs.get(&id).unwrap();
assert_eq!(tx_meta.subpool, SubPool::Pending);
assert!(tx_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
}
#[test]
fn apply_fee_updates_records_promotions_after_blob_fee_drop() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let initial_blob_fee = pool.all_transactions.pending_fees.blob_fee;
let tx = MockTransaction::eip4844().with_blob_fee(initial_blob_fee + 100);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 1);
// Raise blob fee beyond the transaction's cap so it gets parked in Blob pool.
let increased_blob_fee = tx.max_fee_per_blob_gas().unwrap() + 200;
pool.update_blob_fee(increased_blob_fee, Ordering::Equal, |_| {});
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.blob_pool.len(), 1);
let prev_base_fee = pool.all_transactions.pending_fees.base_fee;
let prev_blob_fee = pool.all_transactions.pending_fees.blob_fee;
// Simulate the canonical state path updating pending fees before applying promotions.
pool.all_transactions.pending_fees.blob_fee = tx.max_fee_per_blob_gas().unwrap();
let mut outcome = UpdateOutcome::default();
pool.apply_fee_updates(prev_base_fee, prev_blob_fee, &mut outcome);
assert_eq!(pool.pending_pool.len(), 1);
assert!(pool.blob_pool.is_empty());
assert_eq!(outcome.promoted.len(), 1);
assert_eq!(outcome.promoted[0].id(), &id);
assert_eq!(pool.all_transactions.pending_fees.base_fee, prev_base_fee);
assert_eq!(pool.all_transactions.pending_fees.blob_fee, tx.max_fee_per_blob_gas().unwrap());
let tx_meta = pool.all_transactions.txs.get(&id).unwrap();
assert_eq!(tx_meta.subpool, SubPool::Pending);
assert!(tx_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert!(tx_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
}
#[test]
fn apply_fee_updates_promotes_blob_after_basefee_drop() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let initial_blob_fee = pool.all_transactions.pending_fees.blob_fee;
let tx = MockTransaction::eip4844()
.with_max_fee(500)
.with_priority_fee(1)
.with_blob_fee(initial_blob_fee + 100);
let validated = f.validated(tx);
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 1);
// Raise base fee beyond the transaction's cap so it gets parked in Blob pool.
let high_base_fee = 600;
pool.update_basefee(high_base_fee, |_| {});
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.blob_pool.len(), 1);
let prev_base_fee = high_base_fee;
let prev_blob_fee = pool.all_transactions.pending_fees.blob_fee;
// Simulate applying a lower base fee while keeping blob fee unchanged.
pool.all_transactions.pending_fees.base_fee = 400;
let mut outcome = UpdateOutcome::default();
pool.apply_fee_updates(prev_base_fee, prev_blob_fee, &mut outcome);
assert_eq!(pool.pending_pool.len(), 1);
assert!(pool.blob_pool.is_empty());
assert_eq!(outcome.promoted.len(), 1);
assert_eq!(outcome.promoted[0].id(), &id);
assert_eq!(pool.all_transactions.pending_fees.base_fee, 400);
assert_eq!(pool.all_transactions.pending_fees.blob_fee, prev_blob_fee);
let tx_meta = pool.all_transactions.txs.get(&id).unwrap();
assert_eq!(tx_meta.subpool, SubPool::Pending);
assert!(tx_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert!(tx_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
}
#[test]
fn apply_fee_updates_demotes_after_basefee_rise() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559()
.with_gas_limit(21_000)
.with_max_fee(400)
.with_priority_fee(1);
let validated = f.validated(tx);
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 1);
let prev_base_fee = pool.all_transactions.pending_fees.base_fee;
let prev_blob_fee = pool.all_transactions.pending_fees.blob_fee;
// Simulate canonical path raising the base fee beyond the transaction's cap.
let new_base_fee = prev_base_fee + 1_000;
pool.all_transactions.pending_fees.base_fee = new_base_fee;
let mut outcome = UpdateOutcome::default();
pool.apply_fee_updates(prev_base_fee, prev_blob_fee, &mut outcome);
assert!(pool.pending_pool.is_empty());
assert_eq!(pool.basefee_pool.len(), 1);
assert!(outcome.promoted.is_empty());
assert_eq!(pool.all_transactions.pending_fees.base_fee, new_base_fee);
assert_eq!(pool.all_transactions.pending_fees.blob_fee, prev_blob_fee);
let tx_meta = pool.all_transactions.txs.get(&id).unwrap();
assert_eq!(tx_meta.subpool, SubPool::BaseFee);
assert!(!tx_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
}
#[test]
fn get_highest_transaction_by_sender_and_nonce() {
// Set up a mock transaction factory and a new transaction pool.
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Create a mock transaction and add it to the pool.
let tx = MockTransaction::eip1559();
pool.add_transaction(f.validated(tx.clone()), U256::from(1_000), 0, None).unwrap();
// Create another mock transaction with an incremented price.
let tx1 = tx.inc_price().next();
// Validate the second mock transaction and add it to the pool.
let tx1_validated = f.validated(tx1.clone());
pool.add_transaction(tx1_validated, U256::from(1_000), 0, None).unwrap();
// Ensure that the calculated next nonce for the sender matches the expected value.
assert_eq!(
pool.get_highest_nonce_by_sender(f.ids.sender_id(&tx.sender()).unwrap()),
Some(1)
);
// Retrieve the highest transaction by sender.
let highest_tx = pool
.get_highest_transaction_by_sender(f.ids.sender_id(&tx.sender()).unwrap())
.expect("Failed to retrieve highest transaction");
// Validate that the retrieved highest transaction matches the expected transaction.
assert_eq!(highest_tx.as_ref().transaction, tx1);
}
#[test]
fn get_highest_consecutive_transaction_by_sender() {
// Set up a mock transaction factory and a new transaction pool.
let mut pool = TxPool::new(MockOrdering::default(), PoolConfig::default());
let mut f = MockTransactionFactory::default();
// Create transactions with nonces 0, 1, 2, 4, 5.
let sender = Address::random();
let txs: Vec<_> = vec![0, 1, 2, 4, 5, 8, 9];
for nonce in txs {
let mut mock_tx = MockTransaction::eip1559();
mock_tx.set_sender(sender);
mock_tx.set_nonce(nonce);
let validated_tx = f.validated(mock_tx);
pool.add_transaction(validated_tx, U256::from(1000), 0, None).unwrap();
}
// Get last consecutive transaction
let sender_id = f.ids.sender_id(&sender).unwrap();
let next_tx =
pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(0));
assert_eq!(next_tx.map(|tx| tx.nonce()), Some(2), "Expected nonce 2 for on-chain nonce 0");
let next_tx =
pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(4));
assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 4");
let next_tx =
pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5));
assert_eq!(next_tx.map(|tx| tx.nonce()), Some(5), "Expected nonce 5 for on-chain nonce 5");
// update the tracked nonce
let mut info = SenderInfo::default();
info.update(8, U256::ZERO);
pool.all_transactions.sender_info.insert(sender_id, info);
let next_tx =
pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5));
assert_eq!(next_tx.map(|tx| tx.nonce()), Some(9), "Expected nonce 9 for on-chain nonce 8");
}
#[test]
fn discard_nonce_too_low() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx = MockTransaction::eip1559().inc_price_by(10);
let validated = f.validated(tx.clone());
let id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
let next = tx.next();
let validated = f.validated(next.clone());
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
assert_eq!(pool.pending_pool.len(), 2);
let mut changed_senders = HashMap::default();
changed_senders.insert(
id.sender,
SenderInfo { state_nonce: next.nonce(), balance: U256::from(1_000) },
);
let outcome = pool.update_accounts(changed_senders);
assert_eq!(outcome.discarded.len(), 1);
assert_eq!(pool.pending_pool.len(), 1);
}
#[test]
fn discard_with_large_blob_txs() {
// init tracing
reth_tracing::init_test_tracing();
// this test adds large txs to the parked pool, then attempting to discard worst
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let default_limits = pool.config.blob_limit;
// create a chain of transactions by sender A
// make sure they are all one over half the limit
let a_sender = address!("0x000000000000000000000000000000000000000a");
// set the base fee of the pool
let mut block_info = pool.block_info();
block_info.pending_blob_fee = Some(100);
block_info.pending_basefee = 100;
// update
pool.set_block_info(block_info);
// 2 txs, that should put the pool over the size limit but not max txs
let a_txs = MockTransactionSet::dependent(a_sender, 0, 2, TxType::Eip4844)
.into_iter()
.map(|mut tx| {
tx.set_size(default_limits.max_size / 2 + 1);
tx.set_max_fee((block_info.pending_basefee - 1).into());
tx
})
.collect::<Vec<_>>();
// add all the transactions to the parked pool
for tx in a_txs {
pool.add_transaction(f.validated(tx), U256::from(1_000), 0, None).unwrap();
}
// truncate the pool, it should remove at least one transaction
let removed = pool.discard_worst();
assert_eq!(removed.len(), 1);
}
#[test]
fn discard_with_parked_large_txs() {
// init tracing
reth_tracing::init_test_tracing();
// this test adds large txs to the parked pool, then attempting to discard worst
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let default_limits = pool.config.queued_limit;
// create a chain of transactions by sender A
// make sure they are all one over half the limit
let a_sender = address!("0x000000000000000000000000000000000000000a");
// set the base fee of the pool
let pool_base_fee = 100;
pool.update_basefee(pool_base_fee, |_| {});
// 2 txs, that should put the pool over the size limit but not max txs
let a_txs = MockTransactionSet::dependent(a_sender, 0, 3, TxType::Eip1559)
.into_iter()
.map(|mut tx| {
tx.set_size(default_limits.max_size / 2 + 1);
tx.set_max_fee((pool_base_fee - 1).into());
tx
})
.collect::<Vec<_>>();
// add all the transactions to the parked pool
for tx in a_txs {
pool.add_transaction(f.validated(tx), U256::from(1_000), 0, None).unwrap();
}
// truncate the pool, it should remove at least one transaction
let removed = pool.discard_worst();
assert_eq!(removed.len(), 1);
}
#[test]
fn discard_at_capacity() {
let mut f = MockTransactionFactory::default();
let queued_limit = SubPoolLimit::new(1000, usize::MAX);
let mut pool =
TxPool::new(MockOrdering::default(), PoolConfig { queued_limit, ..Default::default() });
// insert a bunch of transactions into the queued pool
for _ in 0..queued_limit.max_txs {
let tx = MockTransaction::eip1559().inc_price_by(10).inc_nonce();
let validated = f.validated(tx.clone());
let _id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
}
let size = pool.size();
assert_eq!(size.queued, queued_limit.max_txs);
for _ in 0..queued_limit.max_txs {
let tx = MockTransaction::eip1559().inc_price_by(10).inc_nonce();
let validated = f.validated(tx.clone());
let _id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
pool.discard_worst();
pool.assert_invariants();
assert!(pool.size().queued <= queued_limit.max_txs);
}
}
#[test]
fn discard_blobs_at_capacity() {
let mut f = MockTransactionFactory::default();
let blob_limit = SubPoolLimit::new(1000, usize::MAX);
let mut pool =
TxPool::new(MockOrdering::default(), PoolConfig { blob_limit, ..Default::default() });
pool.all_transactions.pending_fees.blob_fee = 10000;
// insert a bunch of transactions into the queued pool
for _ in 0..blob_limit.max_txs {
let tx = MockTransaction::eip4844().inc_price_by(100).with_blob_fee(100);
let validated = f.validated(tx.clone());
let _id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
}
let size = pool.size();
assert_eq!(size.blob, blob_limit.max_txs);
for _ in 0..blob_limit.max_txs {
let tx = MockTransaction::eip4844().inc_price_by(100).with_blob_fee(100);
let validated = f.validated(tx.clone());
let _id = *validated.id();
pool.add_transaction(validated, U256::from(1_000), 0, None).unwrap();
pool.discard_worst();
pool.assert_invariants();
assert!(pool.size().blob <= blob_limit.max_txs);
}
}
#[test]
fn account_updates_sender_balance() {
let mut on_chain_balance = U256::from(100);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = tx_1.next();
// Create 3 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce, None).unwrap();
// The sender does not have enough balance to put all txs into pending.
assert_eq!(1, pool.pending_transactions().len());
assert_eq!(2, pool.queued_transactions().len());
// Simulate new block arrival - and chain balance increase.
let mut updated_accounts = HashMap::default();
on_chain_balance = U256::from(300);
updated_accounts.insert(
v0.sender_id(),
SenderInfo { state_nonce: on_chain_nonce, balance: on_chain_balance },
);
pool.update_accounts(updated_accounts.clone());
assert_eq!(3, pool.pending_transactions().len());
assert!(pool.queued_transactions().is_empty());
// Simulate new block arrival - and chain balance decrease.
updated_accounts.entry(v0.sender_id()).and_modify(|v| v.balance = U256::from(1));
pool.update_accounts(updated_accounts);
assert!(pool.pending_transactions().is_empty());
assert_eq!(3, pool.queued_transactions().len());
}
#[test]
fn account_updates_nonce_gap() {
let on_chain_balance = U256::from(10_000);
let mut on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = tx_1.next();
// Create 3 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
// Add first 2 to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce, None).unwrap();
assert!(pool.queued_transactions().is_empty());
assert_eq!(2, pool.pending_transactions().len());
// Remove first (nonce 0)
pool.remove_transaction_by_hash(v0.hash());
// Now add transaction with nonce 2
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce, None).unwrap();
// v1 and v2 should both be in the queue now.
assert_eq!(2, pool.queued_transactions().len());
assert!(pool.pending_transactions().is_empty());
// Simulate new block arrival - and chain nonce increasing.
let mut updated_accounts = HashMap::default();
on_chain_nonce += 1;
updated_accounts.insert(
v0.sender_id(),
SenderInfo { state_nonce: on_chain_nonce, balance: on_chain_balance },
);
pool.update_accounts(updated_accounts);
// 'pending' now).
assert!(pool.queued_transactions().is_empty());
assert_eq!(2, pool.pending_transactions().len());
}
#[test]
fn test_transaction_removal() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
// Create 2 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
// Add them to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(2, pool.pending_transactions().len());
// Remove first (nonce 0) - simulating that it was taken to be a part of the block.
pool.remove_transaction(v0.id());
// assert the second transaction is really at the top of the queue
let pool_txs = pool.best_transactions().map(|x| x.id().nonce).collect::<Vec<_>>();
assert_eq!(vec![v1.nonce()], pool_txs);
}
#[test]
fn test_remove_transactions() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_3 = tx_2.next();
// Create 4 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
// Add them to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v3.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(4, pool.pending_transactions().len());
pool.remove_transactions(vec![*v0.hash(), *v2.hash()]);
assert_eq!(2, pool.queued_transactions().len());
assert!(pool.pending_transactions().is_empty());
assert!(pool.contains(v1.hash()));
assert!(pool.contains(v3.hash()));
}
#[test]
fn test_remove_transactions_middle_pending_hash() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = tx_1.next();
let tx_3 = tx_2.next();
// Create 4 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
// Add them to the pool
let _res = pool.add_transaction(v0, on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(4, pool.pending_transactions().len());
let mut removed_txs = pool.remove_transactions(vec![*v1.hash()]);
assert_eq!(1, removed_txs.len());
assert_eq!(2, pool.queued_transactions().len());
assert_eq!(1, pool.pending_transactions().len());
// reinsert
let removed_tx = removed_txs.pop().unwrap();
let v1 = f.validated(removed_tx.transaction.clone());
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(4, pool.pending_transactions().len());
}
#[test]
fn test_remove_transactions_and_descendants() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_3 = tx_2.next();
let tx_4 = tx_3.next();
// Create 5 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
let v4 = f.validated(tx_4);
// Add them to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v4, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(5, pool.pending_transactions().len());
pool.remove_transactions_and_descendants(vec![*v0.hash(), *v2.hash()]);
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(0, pool.pending_transactions().len());
}
#[test]
fn test_remove_descendants() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = tx_1.next();
let tx_3 = tx_2.next();
// Create 4 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
// Add them to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(4, pool.pending_transactions().len());
let mut removed = Vec::new();
pool.remove_transaction(v0.id());
pool.remove_descendants(v0.id(), &mut removed);
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(0, pool.pending_transactions().len());
assert_eq!(3, removed.len());
}
#[test]
fn test_remove_transactions_by_sender() {
let on_chain_balance = U256::from(10_000);
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_3 = tx_2.next();
let tx_4 = tx_3.next();
// Create 5 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
let v4 = f.validated(tx_4);
// Add them to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v1.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res =
pool.add_transaction(v2.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v4, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(5, pool.pending_transactions().len());
pool.remove_transactions_by_sender(v2.sender_id());
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(2, pool.pending_transactions().len());
assert!(pool.contains(v0.hash()));
assert!(pool.contains(v1.hash()));
}
#[test]
fn wrong_best_order_of_transactions() {
let on_chain_balance = U256::from(10_000);
let mut on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let tx_2 = tx_1.next();
let tx_3 = tx_2.next();
// Create 4 transactions
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
let v2 = f.validated(tx_2);
let v3 = f.validated(tx_3);
// Add first 2 to the pool
let _res =
pool.add_transaction(v0.clone(), on_chain_balance, on_chain_nonce, None).unwrap();
let _res = pool.add_transaction(v1, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(2, pool.pending_transactions().len());
// Remove first (nonce 0) - simulating that it was taken to be a part of the block.
pool.remove_transaction(v0.id());
// Now add transaction with nonce 2
let _res = pool.add_transaction(v2, on_chain_balance, on_chain_nonce, None).unwrap();
// v2 is in the queue now. v1 is still in 'pending'.
assert_eq!(1, pool.queued_transactions().len());
assert_eq!(1, pool.pending_transactions().len());
// Simulate new block arrival - and chain nonce increasing.
let mut updated_accounts = HashMap::default();
on_chain_nonce += 1;
updated_accounts.insert(
v0.sender_id(),
SenderInfo { state_nonce: on_chain_nonce, balance: on_chain_balance },
);
pool.update_accounts(updated_accounts);
// Transactions are not changed (IMHO - this is a bug, as transaction v2 should be in the
// 'pending' now).
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(2, pool.pending_transactions().len());
// Add transaction v3 - it 'unclogs' everything.
let _res = pool.add_transaction(v3, on_chain_balance, on_chain_nonce, None).unwrap();
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(3, pool.pending_transactions().len());
// It should have returned transactions in order (v1, v2, v3 - as there is nothing blocking
// them).
assert_eq!(
pool.best_transactions().map(|x| x.id().nonce).collect::<Vec<_>>(),
vec![1, 2, 3]
);
}
#[test]
fn test_best_with_attributes() {
let on_chain_balance = U256::MAX;
let on_chain_nonce = 0;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let base_fee: u128 = 100;
let blob_fee: u128 = 100;
// set base fee and blob fee.
let mut block_info = pool.block_info();
block_info.pending_basefee = base_fee as u64;
block_info.pending_blob_fee = Some(blob_fee);
pool.set_block_info(block_info);
// Insert transactions with varying max_fee_per_gas and max_fee_per_blob_gas.
let tx1 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(1))
.with_max_fee(base_fee + 10)
.with_blob_fee(blob_fee + 10);
let tx2 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(2))
.with_max_fee(base_fee + 10)
.with_blob_fee(blob_fee);
let tx3 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(3))
.with_max_fee(base_fee)
.with_blob_fee(blob_fee + 10);
let tx4 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(4))
.with_max_fee(base_fee)
.with_blob_fee(blob_fee);
let tx5 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(5))
.with_max_fee(base_fee)
.with_blob_fee(blob_fee - 10);
let tx6 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(6))
.with_max_fee(base_fee - 10)
.with_blob_fee(blob_fee);
let tx7 = MockTransaction::eip4844()
.with_sender(Address::with_last_byte(7))
.with_max_fee(base_fee - 10)
.with_blob_fee(blob_fee - 10);
for tx in vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
tx6.clone(),
tx7.clone(),
] {
pool.add_transaction(f.validated(tx.clone()), on_chain_balance, on_chain_nonce, None)
.unwrap();
}
let base_fee = base_fee as u64;
let blob_fee = blob_fee as u64;
let cases = vec![
// 1. Base fee increase, blob fee increase
(BestTransactionsAttributes::new(base_fee + 5, Some(blob_fee + 5)), vec![tx1.clone()]),
// 2. Base fee increase, blob fee not change
(
BestTransactionsAttributes::new(base_fee + 5, Some(blob_fee)),
vec![tx1.clone(), tx2.clone()],
),
// 3. Base fee increase, blob fee decrease
(
BestTransactionsAttributes::new(base_fee + 5, Some(blob_fee - 5)),
vec![tx1.clone(), tx2.clone()],
),
// 4. Base fee not change, blob fee increase
(
BestTransactionsAttributes::new(base_fee, Some(blob_fee + 5)),
vec![tx1.clone(), tx3.clone()],
),
// 5. Base fee not change, blob fee not change
(
BestTransactionsAttributes::new(base_fee, Some(blob_fee)),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
),
// 6. Base fee not change, blob fee decrease
(
BestTransactionsAttributes::new(base_fee, Some(blob_fee - 10)),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone(), tx5.clone()],
),
// 7. Base fee decrease, blob fee increase
(
BestTransactionsAttributes::new(base_fee - 5, Some(blob_fee + 5)),
vec![tx1.clone(), tx3.clone()],
),
// 8. Base fee decrease, blob fee not change
(
BestTransactionsAttributes::new(base_fee - 10, Some(blob_fee)),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone(), tx6.clone()],
),
// 9. Base fee decrease, blob fee decrease
(
BestTransactionsAttributes::new(base_fee - 10, Some(blob_fee - 10)),
vec![tx1, tx2, tx5, tx3, tx4, tx6, tx7],
),
];
for (idx, (attribute, expected)) in cases.into_iter().enumerate() {
let mut best = pool.best_transactions_with_attributes(attribute);
for (tx_idx, expected_tx) in expected.into_iter().enumerate() {
let tx = best.next().expect("Transaction should be returned");
assert_eq!(
tx.transaction,
expected_tx,
"Failed tx {} in case {}",
tx_idx + 1,
idx + 1
);
}
// No more transactions should be returned
assert!(best.next().is_none());
}
}
#[test]
fn test_pending_ordering() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let tx_0 = MockTransaction::eip1559().with_nonce(1).set_gas_price(100).inc_limit();
let tx_1 = tx_0.next();
let v0 = f.validated(tx_0);
let v1 = f.validated(tx_1);
// nonce gap, tx should be queued
pool.add_transaction(v0.clone(), U256::MAX, 0, None).unwrap();
assert_eq!(1, pool.queued_transactions().len());
// nonce gap is closed on-chain, both transactions should be moved to pending
pool.add_transaction(v1, U256::MAX, 1, None).unwrap();
assert_eq!(2, pool.pending_transactions().len());
assert_eq!(0, pool.queued_transactions().len());
assert_eq!(
pool.pending_pool.independent().get(&v0.sender_id()).unwrap().transaction.nonce(),
v0.nonce()
);
}
// <https://github.com/paradigmxyz/reth/issues/12286>
#[test]
fn one_sender_one_independent_transaction() {
let mut on_chain_balance = U256::from(4_999); // only enough for 4 txs
let mut on_chain_nonce = 40;
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::mock();
let mut submitted_txs = Vec::new();
// We use a "template" because we want all txs to have the same sender.
let template =
MockTransaction::eip1559().inc_price().inc_limit().with_value(U256::from(1_001));
// Add 8 txs. Because the balance is only sufficient for 4, so the last 4 will be
// Queued.
for tx_nonce in 40..48 {
let tx = f.validated(template.clone().with_nonce(tx_nonce).rng_hash());
submitted_txs.push(*tx.id());
pool.add_transaction(tx, on_chain_balance, on_chain_nonce, None).unwrap();
}
// A block is mined with two txs (so nonce is changed from 40 to 42).
// Now the balance gets so high that it's enough to execute alltxs.
on_chain_balance = U256::from(999_999);
on_chain_nonce = 42;
pool.remove_transaction(&submitted_txs[0]);
pool.remove_transaction(&submitted_txs[1]);
// Add 4 txs.
for tx_nonce in 48..52 {
pool.add_transaction(
f.validated(template.clone().with_nonce(tx_nonce).rng_hash()),
on_chain_balance,
on_chain_nonce,
None,
)
.unwrap();
}
let best_txs: Vec<_> = pool.pending().best().map(|tx| *tx.id()).collect();
assert_eq!(best_txs.len(), 10); // 8 - 2 + 4 = 10
assert_eq!(pool.pending_pool.independent().len(), 1);
}
#[test]
fn test_insertion_disorder() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let sender = address!("0x1234567890123456789012345678901234567890");
let tx0 = f.validated_arc(
MockTransaction::legacy().with_sender(sender).with_nonce(0).with_gas_price(10),
);
let tx1 = f.validated_arc(
MockTransaction::eip1559()
.with_sender(sender)
.with_nonce(1)
.with_gas_limit(1000)
.with_gas_price(10),
);
let tx2 = f.validated_arc(
MockTransaction::legacy().with_sender(sender).with_nonce(2).with_gas_price(10),
);
let tx3 = f.validated_arc(
MockTransaction::legacy().with_sender(sender).with_nonce(3).with_gas_price(10),
);
// tx0 should be put in the pending subpool
pool.add_transaction((*tx0).clone(), U256::from(1000), 0, None).unwrap();
let mut best = pool.best_transactions();
let t0 = best.next().expect("tx0 should be put in the pending subpool");
assert_eq!(t0.id(), tx0.id());
// tx1 should be put in the queued subpool due to insufficient sender balance
pool.add_transaction((*tx1).clone(), U256::from(1000), 0, None).unwrap();
let mut best = pool.best_transactions();
let t0 = best.next().expect("tx0 should be put in the pending subpool");
assert_eq!(t0.id(), tx0.id());
assert!(best.next().is_none());
// tx2 should be put in the pending subpool, and tx1 should be promoted to pending
pool.add_transaction((*tx2).clone(), U256::MAX, 0, None).unwrap();
let mut best = pool.best_transactions();
let t0 = best.next().expect("tx0 should be put in the pending subpool");
let t1 = best.next().expect("tx1 should be put in the pending subpool");
let t2 = best.next().expect("tx2 should be put in the pending subpool");
assert_eq!(t0.id(), tx0.id());
assert_eq!(t1.id(), tx1.id());
assert_eq!(t2.id(), tx2.id());
// tx3 should be put in the pending subpool,
pool.add_transaction((*tx3).clone(), U256::MAX, 0, None).unwrap();
let mut best = pool.best_transactions();
let t0 = best.next().expect("tx0 should be put in the pending subpool");
let t1 = best.next().expect("tx1 should be put in the pending subpool");
let t2 = best.next().expect("tx2 should be put in the pending subpool");
let t3 = best.next().expect("tx3 should be put in the pending subpool");
assert_eq!(t0.id(), tx0.id());
assert_eq!(t1.id(), tx1.id());
assert_eq!(t2.id(), tx2.id());
assert_eq!(t3.id(), tx3.id());
}
#[test]
fn test_non_4844_blob_fee_bit_invariant() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
let non_4844_tx = MockTransaction::eip1559().set_max_fee(200).inc_limit();
let validated = f.validated(non_4844_tx.clone());
assert!(!non_4844_tx.is_eip4844());
pool.add_transaction(validated.clone(), U256::from(10_000), 0, None).unwrap();
// Core invariant: Non-4844 transactions must ALWAYS have ENOUGH_BLOB_FEE_CAP_BLOCK bit
let tx_meta = pool.all_transactions.txs.get(validated.id()).unwrap();
assert!(tx_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(tx_meta.subpool, SubPool::Pending);
}
#[test]
fn test_blob_fee_enforcement_only_applies_to_eip4844() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Set blob fee higher than EIP-4844 tx can afford
let mut block_info = pool.block_info();
block_info.pending_blob_fee = Some(160);
block_info.pending_basefee = 100;
pool.set_block_info(block_info);
let eip4844_tx = MockTransaction::eip4844()
.with_sender(address!("0x000000000000000000000000000000000000000a"))
.with_max_fee(200)
.with_blob_fee(150) // Less than block blob fee (160)
.inc_limit();
let non_4844_tx = MockTransaction::eip1559()
.with_sender(address!("0x000000000000000000000000000000000000000b"))
.set_max_fee(200)
.inc_limit();
let validated_4844 = f.validated(eip4844_tx);
let validated_non_4844 = f.validated(non_4844_tx);
pool.add_transaction(validated_4844.clone(), U256::from(10_000), 0, None).unwrap();
pool.add_transaction(validated_non_4844.clone(), U256::from(10_000), 0, None).unwrap();
let tx_4844_meta = pool.all_transactions.txs.get(validated_4844.id()).unwrap();
let tx_non_4844_meta = pool.all_transactions.txs.get(validated_non_4844.id()).unwrap();
// EIP-4844: blob fee enforcement applies - insufficient blob fee removes bit
assert!(!tx_4844_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(tx_4844_meta.subpool, SubPool::Blob);
// Non-4844: blob fee enforcement does NOT apply - bit always remains true
assert!(tx_non_4844_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK));
assert_eq!(tx_non_4844_meta.subpool, SubPool::Pending);
}
#[test]
fn test_basefee_decrease_preserves_non_4844_blob_fee_bit() {
let mut f = MockTransactionFactory::default();
let mut pool = TxPool::new(MockOrdering::default(), Default::default());
// Create non-4844 transaction with fee that initially can't afford high basefee
let non_4844_tx = MockTransaction::eip1559()
.with_sender(address!("0x000000000000000000000000000000000000000a"))
.set_max_fee(500) // Can't afford basefee of 600
.inc_limit();
// Set high basefee so transaction goes to BaseFee pool initially
pool.update_basefee(600, |_| {});
let validated = f.validated(non_4844_tx);
let tx_id = *validated.id();
pool.add_transaction(validated, U256::from(10_000), 0, None).unwrap();
// Initially should be in BaseFee pool but STILL have blob fee bit (critical invariant)
let tx_meta = pool.all_transactions.txs.get(&tx_id).unwrap();
assert_eq!(tx_meta.subpool, SubPool::BaseFee);
assert!(
tx_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK),
"Non-4844 tx in BaseFee pool must retain ENOUGH_BLOB_FEE_CAP_BLOCK bit"
);
// Decrease basefee - transaction should be promoted to Pending
// This is where PR #18215 bug would manifest: blob fee bit incorrectly removed
pool.update_basefee(400, |_| {});
// After basefee decrease: should be promoted to Pending with blob fee bit preserved
let tx_meta = pool.all_transactions.txs.get(&tx_id).unwrap();
assert_eq!(
tx_meta.subpool,
SubPool::Pending,
"Non-4844 tx should be promoted from BaseFee to Pending after basefee decrease"
);
assert!(
tx_meta.state.contains(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK),
"Non-4844 tx must NEVER lose ENOUGH_BLOB_FEE_CAP_BLOCK bit during basefee promotion"
);
assert!(
tx_meta.state.contains(TxState::ENOUGH_FEE_CAP_BLOCK),
"Non-4844 tx should gain ENOUGH_FEE_CAP_BLOCK bit after basefee decrease"
);
}
}