mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-01-31 10:08:13 -05:00
Merge branch 'main' of github.com:foundry-rs/reth into rkrasiuk/stage-test-suite
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,2 @@
|
||||
.idea
|
||||
/target
|
||||
target
|
||||
|
||||
40
Cargo.lock
generated
40
Cargo.lock
generated
@@ -2155,6 +2155,15 @@ version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
|
||||
|
||||
[[package]]
|
||||
name = "linked_hash_set"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588"
|
||||
dependencies = [
|
||||
"linked-hash-map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.9"
|
||||
@@ -3043,6 +3052,21 @@ dependencies = [
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-bodies-downloaders"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"assert_matches",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"rand 0.8.5",
|
||||
"reth-eth-wire",
|
||||
"reth-interfaces",
|
||||
"reth-primitives",
|
||||
"serial_test",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-codecs"
|
||||
version = "0.1.0"
|
||||
@@ -3057,16 +3081,11 @@ dependencies = [
|
||||
"async-trait",
|
||||
"auto_impl",
|
||||
"eyre",
|
||||
"hash-db",
|
||||
"plain_hasher",
|
||||
"reth-interfaces",
|
||||
"reth-primitives",
|
||||
"reth-rlp",
|
||||
"rlp",
|
||||
"sha3",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"triehash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3225,6 +3244,7 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"reth-codecs",
|
||||
"reth-db",
|
||||
"reth-eth-wire",
|
||||
"reth-primitives",
|
||||
"reth-rpc-types",
|
||||
"serde",
|
||||
@@ -3291,6 +3311,7 @@ dependencies = [
|
||||
"either",
|
||||
"fnv",
|
||||
"futures",
|
||||
"linked_hash_set",
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project",
|
||||
"rand 0.8.5",
|
||||
@@ -3332,10 +3353,12 @@ dependencies = [
|
||||
"crc",
|
||||
"derive_more",
|
||||
"ethers-core",
|
||||
"hash-db",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"maplit",
|
||||
"parity-scale-codec",
|
||||
"plain_hasher",
|
||||
"reth-codecs",
|
||||
"reth-rlp",
|
||||
"secp256k1",
|
||||
@@ -3344,6 +3367,7 @@ dependencies = [
|
||||
"sucds",
|
||||
"thiserror",
|
||||
"tiny-keccak",
|
||||
"triehash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3416,9 +3440,12 @@ dependencies = [
|
||||
"aquamarine",
|
||||
"assert_matches",
|
||||
"async-trait",
|
||||
"futures-util",
|
||||
"metrics",
|
||||
"rand 0.8.5",
|
||||
"reth-bodies-downloaders",
|
||||
"reth-db",
|
||||
"reth-eth-wire",
|
||||
"reth-headers-downloaders",
|
||||
"reth-interfaces",
|
||||
"reth-primitives",
|
||||
@@ -3438,7 +3465,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"bitflags",
|
||||
"fnv",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"linked-hash-map",
|
||||
"parking_lot 0.12.1",
|
||||
"paste",
|
||||
@@ -3446,6 +3473,7 @@ dependencies = [
|
||||
"reth-primitives",
|
||||
"serde",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ members = [
|
||||
"crates/net/rpc-api",
|
||||
"crates/net/rpc-types",
|
||||
"crates/net/headers-downloaders",
|
||||
"crates/net/bodies-downloaders",
|
||||
"crates/primitives",
|
||||
"crates/stages",
|
||||
"crates/transaction-pool",
|
||||
|
||||
1
crates/.gitignore
vendored
1
crates/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
target/
|
||||
@@ -17,14 +17,4 @@ async-trait = "0.1.57"
|
||||
thiserror = "1.0.37"
|
||||
eyre = "0.6.8"
|
||||
auto_impl = "1.0"
|
||||
tokio = { version = "1.21.2", features = ["sync"] }
|
||||
|
||||
# proof related
|
||||
triehash = "0.8"
|
||||
# See to replace hashers to simplify libraries
|
||||
plain_hasher = "0.2"
|
||||
hash-db = "0.15"
|
||||
# todo replace with faster rlp impl
|
||||
rlp = { version = "0.5", default-features = false }
|
||||
# replace with tiny-keccak (it is faster hasher)
|
||||
sha3 = { version = "0.10", default-features = false }
|
||||
tokio = { version = "1.21.2", features = ["sync"] }
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use crate::{verification, Config};
|
||||
use reth_interfaces::consensus::{Consensus, Error, ForkchoiceState};
|
||||
use reth_primitives::{SealedHeader, H256};
|
||||
use reth_primitives::{BlockLocked, SealedHeader, H256};
|
||||
use tokio::sync::watch;
|
||||
|
||||
/// Ethereum consensus
|
||||
@@ -40,4 +40,8 @@ impl Consensus for EthConsensus {
|
||||
// * mix_hash & nonce PoW stuf
|
||||
// * extra_data
|
||||
}
|
||||
|
||||
fn pre_validate_block(&self, block: &BlockLocked) -> Result<(), Error> {
|
||||
verification::validate_block_standalone(block, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,9 +10,6 @@ pub mod config;
|
||||
pub mod consensus;
|
||||
pub mod verification;
|
||||
|
||||
/// Helper function for calculating Merkle proofs and hashes
|
||||
pub mod proofs;
|
||||
|
||||
pub use config::Config;
|
||||
pub use consensus::EthConsensus;
|
||||
pub use reth_interfaces::consensus::Error;
|
||||
|
||||
@@ -115,10 +115,20 @@ pub fn validate_transaction_regarding_state<AP: AccountProvider>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate block standalone
|
||||
pub fn validate_block_standalone(block: &BlockLocked) -> Result<(), Error> {
|
||||
// check ommers hash
|
||||
let ommers_hash = crate::proofs::calculate_ommers_root(block.ommers.iter().map(|h| h.as_ref()));
|
||||
/// Validate a block without regard for state:
|
||||
///
|
||||
/// - Compares the ommer hash in the block header to the block body
|
||||
/// - Compares the transactions root in the block header to the block body
|
||||
/// - Pre-execution transaction validation
|
||||
/// - (Optionally) Compares the receipts root in the block header to the block body
|
||||
pub fn validate_block_standalone(
|
||||
block: &BlockLocked,
|
||||
validate_receipts: bool,
|
||||
) -> Result<(), Error> {
|
||||
// Check ommers hash
|
||||
// TODO(onbjerg): This should probably be accessible directly on [Block]
|
||||
let ommers_hash =
|
||||
reth_primitives::proofs::calculate_ommers_root(block.ommers.iter().map(|h| h.as_ref()));
|
||||
if block.header.ommers_hash != ommers_hash {
|
||||
return Err(Error::BodyOmmersHashDiff {
|
||||
got: ommers_hash,
|
||||
@@ -126,8 +136,9 @@ pub fn validate_block_standalone(block: &BlockLocked) -> Result<(), Error> {
|
||||
})
|
||||
}
|
||||
|
||||
// check transaction root
|
||||
let transaction_root = crate::proofs::calculate_transaction_root(block.body.iter());
|
||||
// Check transaction root
|
||||
// TODO(onbjerg): This should probably be accessible directly on [Block]
|
||||
let transaction_root = reth_primitives::proofs::calculate_transaction_root(block.body.iter());
|
||||
if block.header.transactions_root != transaction_root {
|
||||
return Err(Error::BodyTransactionRootDiff {
|
||||
got: transaction_root,
|
||||
@@ -135,18 +146,27 @@ pub fn validate_block_standalone(block: &BlockLocked) -> Result<(), Error> {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO transaction verification, Maybe make it configurable as in check only
|
||||
// TODO: transaction verification,maybe make it configurable as in check only
|
||||
// signatures/limits/types
|
||||
// Things to probably check:
|
||||
// - Chain ID
|
||||
// - Base fee per gas (if applicable)
|
||||
// - Max priority fee per gas (if applicable)
|
||||
|
||||
// check if all transactions limit does not goes over block limit
|
||||
// TODO: Check if all transaction gas total does not go over block limit
|
||||
|
||||
// check receipts root
|
||||
let receipts_root = crate::proofs::calculate_receipt_root(block.receipts.iter());
|
||||
if block.header.receipts_root != receipts_root {
|
||||
return Err(Error::BodyReceiptsRootDiff {
|
||||
got: receipts_root,
|
||||
expected: block.header.receipts_root,
|
||||
})
|
||||
// Check receipts root
|
||||
// TODO(onbjerg): This should probably be accessible directly on [Block]
|
||||
// NOTE(onbjerg): Pre-validation does not validate the receipts root since we do not have the
|
||||
// receipts yet (this validation is before execution). Maybe this should not be in here?
|
||||
if validate_receipts {
|
||||
let receipts_root = reth_primitives::proofs::calculate_receipt_root(block.receipts.iter());
|
||||
if block.header.receipts_root != receipts_root {
|
||||
return Err(Error::BodyReceiptsRootDiff {
|
||||
got: receipts_root,
|
||||
expected: block.header.receipts_root,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -284,7 +304,7 @@ pub fn full_validation<PROV: HeaderProvider>(
|
||||
config: &Config,
|
||||
) -> RethResult<()> {
|
||||
validate_header_standalone(&block.header, config)?;
|
||||
validate_block_standalone(block)?;
|
||||
validate_block_standalone(block, true)?;
|
||||
let parent = validate_block_regarding_chain(block, &provider)?;
|
||||
validate_header_regarding_parent(&parent, &block.header, config)?;
|
||||
Ok(())
|
||||
|
||||
@@ -50,7 +50,7 @@ impl<E: EnvironmentKind> Database for Env<E> {
|
||||
impl<E: EnvironmentKind> Env<E> {
|
||||
/// Opens the database at the specified path with the given `EnvKind`.
|
||||
///
|
||||
/// It does not create the tables, for that call [`create_tables`].
|
||||
/// It does not create the tables, for that call [`Env::create_tables`].
|
||||
pub fn open(path: &Path, kind: EnvKind) -> Result<Env<E>, Error> {
|
||||
let mode = match kind {
|
||||
EnvKind::RO => Mode::ReadOnly,
|
||||
|
||||
@@ -16,6 +16,9 @@ auto_impl = "1.0"
|
||||
tokio = { version = "1.21.2", features = ["sync"] }
|
||||
bytes = "1.2"
|
||||
|
||||
# TODO(onbjerg): We only need this for [BlockBody]
|
||||
reth-eth-wire = { path = "../net/eth-wire" }
|
||||
|
||||
# codecs
|
||||
serde = { version = "1.0.*", default-features = false }
|
||||
postcard = { version = "1.0.2", features = ["alloc"] }
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
use async_trait::async_trait;
|
||||
use reth_primitives::{BlockHash, BlockNumber, SealedHeader, H256};
|
||||
use reth_primitives::{BlockHash, BlockLocked, BlockNumber, SealedHeader, H256};
|
||||
use tokio::sync::watch::Receiver;
|
||||
|
||||
/// Re-export forkchoice state
|
||||
pub use reth_rpc_types::engine::ForkchoiceState;
|
||||
|
||||
/// Consensus is a protocol that chooses canonical chain.
|
||||
/// We are checking validity of block header here.
|
||||
#[async_trait]
|
||||
#[auto_impl::auto_impl(&, Arc)]
|
||||
pub trait Consensus: Send + Sync {
|
||||
/// Get a receiver for the fork choice state
|
||||
fn fork_choice_state(&self) -> Receiver<ForkchoiceState>;
|
||||
|
||||
/// Validate if header is correct and follows consensus specification
|
||||
/// Validate if header is correct and follows consensus specification.
|
||||
///
|
||||
/// **This should not be called for the genesis block**.
|
||||
fn validate_header(&self, header: &SealedHeader, parent: &SealedHeader) -> Result<(), Error>;
|
||||
|
||||
/// Validate a block disregarding world state, i.e. things that can be checked before sender
|
||||
/// recovery and execution.
|
||||
///
|
||||
/// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and
|
||||
/// 11.1 "Ommer Validation".
|
||||
///
|
||||
/// **This should not be called for the genesis block**.
|
||||
fn pre_validate_block(&self, block: &BlockLocked) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Consensus Errors
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use crate::db::{models::accounts::AccountBeforeTx, Compress, Decompress, Error};
|
||||
use crate::db::{
|
||||
models::{accounts::AccountBeforeTx, StoredBlockBody},
|
||||
Compress, Decompress, Error,
|
||||
};
|
||||
use parity_scale_codec::decode_from_bytes;
|
||||
use reth_primitives::*;
|
||||
|
||||
@@ -53,7 +56,16 @@ impl ScaleValue for Vec<u8> {}
|
||||
impl sealed::Sealed for Vec<u8> {}
|
||||
|
||||
impl_scale!(U256, H256, H160);
|
||||
impl_scale!(Header, Account, Log, Receipt, TxType, StorageEntry, TransactionSigned);
|
||||
impl_scale!(
|
||||
Header,
|
||||
Account,
|
||||
Log,
|
||||
Receipt,
|
||||
TxType,
|
||||
StorageEntry,
|
||||
TransactionSigned,
|
||||
StoredBlockBody
|
||||
);
|
||||
impl_scale!(AccountBeforeTx);
|
||||
|
||||
impl_scale_value!(u8, u32, u16, u64);
|
||||
|
||||
@@ -74,9 +74,9 @@ pub trait Database: for<'a> DatabaseGAT<'a> {
|
||||
/// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers
|
||||
pub trait DbTxGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync {
|
||||
/// Cursor GAT
|
||||
type Cursor<T: Table>: DbCursorRO<'a, T>;
|
||||
type Cursor<T: Table>: DbCursorRO<'a, T> + Send + Sync;
|
||||
/// DupCursor GAT
|
||||
type DupCursor<T: DupSort>: DbDupCursorRO<'a, T> + DbCursorRO<'a, T>;
|
||||
type DupCursor<T: DupSort>: DbDupCursorRO<'a, T> + DbCursorRO<'a, T> + Send + Sync;
|
||||
}
|
||||
|
||||
/// Implements the GAT method from:
|
||||
@@ -85,12 +85,14 @@ pub trait DbTxGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync
|
||||
/// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers
|
||||
pub trait DbTxMutGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync {
|
||||
/// Cursor GAT
|
||||
type CursorMut<T: Table>: DbCursorRW<'a, T> + DbCursorRO<'a, T>;
|
||||
type CursorMut<T: Table>: DbCursorRW<'a, T> + DbCursorRO<'a, T> + Send + Sync;
|
||||
/// DupCursor GAT
|
||||
type DupCursorMut<T: DupSort>: DbDupCursorRW<'a, T>
|
||||
+ DbCursorRW<'a, T>
|
||||
+ DbDupCursorRO<'a, T>
|
||||
+ DbCursorRO<'a, T>;
|
||||
+ DbCursorRO<'a, T>
|
||||
+ Send
|
||||
+ Sync;
|
||||
}
|
||||
|
||||
/// Read only transaction
|
||||
@@ -190,7 +192,9 @@ pub trait DbCursorRW<'tx, T: Table> {
|
||||
/// exists in a table, and insert a new row if the specified value doesn't already exist
|
||||
fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), Error>;
|
||||
|
||||
/// Append value to next cursor item
|
||||
/// Append value to next cursor item.
|
||||
///
|
||||
/// This is efficient for pre-sorted data. If the data is not pre-sorted, use [`insert`].
|
||||
fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), Error>;
|
||||
|
||||
/// Delete current value that cursor points to
|
||||
@@ -201,7 +205,10 @@ pub trait DbCursorRW<'tx, T: Table> {
|
||||
pub trait DbDupCursorRW<'tx, T: DupSort> {
|
||||
/// Append value to next cursor item
|
||||
fn delete_current_duplicates(&mut self) -> Result<(), Error>;
|
||||
/// Append duplicate value
|
||||
|
||||
/// Append duplicate value.
|
||||
///
|
||||
/// This is efficient for pre-sorted data. If the data is not pre-sorted, use [`insert`].
|
||||
fn append_dup(&mut self, key: T::Key, value: T::Value) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,30 @@ use crate::{
|
||||
impl_fixed_arbitrary,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use reth_primitives::{BlockHash, BlockNumber, H256};
|
||||
use reth_codecs::main_codec;
|
||||
use reth_primitives::{BlockHash, BlockNumber, Header, TxNumber, H256};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Total chain number of transactions. Key for [`CumulativeTxCount`].
|
||||
pub type NumTransactions = u64;
|
||||
|
||||
/// Number of transactions in the block. Value for [`BlockBodies`].
|
||||
pub type NumTxesInBlock = u16;
|
||||
/// The storage representation of a block body.
|
||||
///
|
||||
/// A block body is stored as a pointer to the first transaction in the block (`base_tx_id`), a
|
||||
/// count of how many transactions are in the block, and the headers of the block's uncles.
|
||||
///
|
||||
/// The [TxNumber]s for all the transactions in the block are `base_tx_id..(base_tx_id +
|
||||
/// tx_amount)`.
|
||||
#[derive(Debug)]
|
||||
#[main_codec]
|
||||
pub struct StoredBlockBody {
|
||||
/// The ID of the first transaction in the block.
|
||||
pub base_tx_id: TxNumber,
|
||||
/// The number of transactions in the block.
|
||||
pub tx_amount: u64,
|
||||
/// The block headers of this block's uncles.
|
||||
pub ommers: Vec<Header>,
|
||||
}
|
||||
|
||||
/// Hash of the block header. Value for [`CanonicalHeaders`]
|
||||
pub type HeaderHash = H256;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
use crate::db::{
|
||||
models::{
|
||||
accounts::{AccountBeforeTx, TxNumberAddress},
|
||||
blocks::{BlockNumHash, HeaderHash, NumTransactions, NumTxesInBlock},
|
||||
blocks::{BlockNumHash, HeaderHash, NumTransactions, StoredBlockBody},
|
||||
ShardedKey,
|
||||
},
|
||||
DupSort,
|
||||
@@ -13,7 +13,7 @@ use reth_primitives::{
|
||||
TransactionSigned, TxNumber, H256,
|
||||
};
|
||||
|
||||
/// Enum for the type of table present in libmdbx.
|
||||
/// Enum for the types of tables present in libmdbx.
|
||||
#[derive(Debug)]
|
||||
pub enum TableType {
|
||||
/// key value table
|
||||
@@ -119,8 +119,10 @@ table!(
|
||||
Headers => BlockNumHash => Header);
|
||||
|
||||
table!(
|
||||
/// Stores the number of transactions of a block.
|
||||
BlockBodies => BlockNumHash => NumTxesInBlock);
|
||||
/// Stores a pointer to the first transaction in the block, the number of transactions in the block, and the uncles/ommers of the block.
|
||||
///
|
||||
/// The transaction IDs point to the [`Transactions`] table.
|
||||
BlockBodies => BlockNumHash => StoredBlockBody);
|
||||
|
||||
table!(
|
||||
/// Stores the maximum [`TxNumber`] from which this particular block starts.
|
||||
@@ -131,19 +133,19 @@ table!(
|
||||
NonCanonicalTransactions => BlockNumHashTxNumber => TransactionSigned);
|
||||
|
||||
table!(
|
||||
/// Stores the transaction body from canonical transactions. Canonical only
|
||||
/// (Canonical only) Stores the transaction body for canonical transactions.
|
||||
Transactions => TxNumber => TransactionSigned);
|
||||
|
||||
table!(
|
||||
/// Stores transaction receipts. Canonical only
|
||||
/// (Canonical only) Stores transaction receipts.
|
||||
Receipts => TxNumber => Receipt);
|
||||
|
||||
table!(
|
||||
/// Stores transaction logs. Canonical only
|
||||
/// (Canonical only) Stores transaction logs.
|
||||
Logs => TxNumber => Receipt);
|
||||
|
||||
table!(
|
||||
/// Stores the current state of an Account.
|
||||
/// Stores the current state of an [`Account`].
|
||||
PlainAccountState => Address => Account);
|
||||
|
||||
table!(
|
||||
@@ -200,27 +202,27 @@ table!(
|
||||
AccountHistory => ShardedKey<Address> => TxNumberList);
|
||||
|
||||
table!(
|
||||
/// Stores the transaction numbers that changed each storage key.
|
||||
/// Stores pointers to transactions that changed each storage key.
|
||||
StorageHistory => AddressStorageKey => TxNumberList);
|
||||
|
||||
dupsort!(
|
||||
/// Stores state of an account before a certain transaction changed it.
|
||||
/// Stores the state of an account before a certain transaction changed it.
|
||||
AccountChangeSet => TxNumber => [Address] AccountBeforeTx);
|
||||
|
||||
dupsort!(
|
||||
/// Stores state of a storage key before a certain transaction changed it.
|
||||
/// Stores the state of a storage key before a certain transaction changed it.
|
||||
StorageChangeSet => TxNumberAddress => [H256] StorageEntry);
|
||||
|
||||
table!(
|
||||
/// Stores the transaction sender from each transaction.
|
||||
/// Stores the transaction sender for each transaction.
|
||||
TxSenders => TxNumber => Address); // Is it necessary? if so, inverted index index so we dont repeat addresses?
|
||||
|
||||
table!(
|
||||
/// Config.
|
||||
/// Configuration values.
|
||||
Config => ConfigKey => ConfigValue);
|
||||
|
||||
table!(
|
||||
/// Stores the block number of each stage id.
|
||||
/// Stores the highest synced block number of each stage.
|
||||
SyncStage => StageId => BlockNumber);
|
||||
|
||||
///
|
||||
|
||||
14
crates/interfaces/src/p2p/bodies/client.rs
Normal file
14
crates/interfaces/src/p2p/bodies/client.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
use reth_eth_wire::BlockBody;
|
||||
use reth_primitives::H256;
|
||||
|
||||
use crate::p2p::bodies::error::BodiesClientError;
|
||||
use async_trait::async_trait;
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// A client capable of downloading block bodies.
|
||||
#[async_trait]
|
||||
#[auto_impl::auto_impl(&, Arc, Box)]
|
||||
pub trait BodiesClient: Send + Sync + Debug {
|
||||
/// Fetches the block body for the requested block.
|
||||
async fn get_block_body(&self, hash: H256) -> Result<BlockBody, BodiesClientError>;
|
||||
}
|
||||
44
crates/interfaces/src/p2p/bodies/downloader.rs
Normal file
44
crates/interfaces/src/p2p/bodies/downloader.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use super::client::BodiesClient;
|
||||
use crate::p2p::bodies::error::DownloadError;
|
||||
use reth_eth_wire::BlockBody;
|
||||
use reth_primitives::{BlockNumber, H256};
|
||||
use std::{pin::Pin, time::Duration};
|
||||
use tokio_stream::Stream;
|
||||
|
||||
/// A downloader capable of fetching block bodies from header hashes.
|
||||
///
|
||||
/// A downloader represents a distinct strategy for submitting requests to download block bodies,
|
||||
/// while a [BodiesClient] represents a client capable of fulfilling these requests.
|
||||
pub trait BodyDownloader: Sync + Send {
|
||||
/// The [BodiesClient] used to fetch the block bodies
|
||||
type Client: BodiesClient;
|
||||
|
||||
/// The request timeout duration
|
||||
fn timeout(&self) -> Duration;
|
||||
|
||||
/// The block bodies client
|
||||
fn client(&self) -> &Self::Client;
|
||||
|
||||
/// Download the bodies from `starting_block` (inclusive) up until `target_block` (inclusive).
|
||||
///
|
||||
/// The returned stream will always emit bodies in the order they were requested, but multiple
|
||||
/// requests may be in flight at the same time.
|
||||
///
|
||||
/// The stream may exit early in some cases. Thus, a downloader can only at a minimum guarantee:
|
||||
///
|
||||
/// - All emitted bodies map onto a request
|
||||
/// - The emitted bodies are emitted in order: i.e. the body for the first block is emitted
|
||||
/// first, even if it was not fetched first.
|
||||
///
|
||||
/// It is *not* guaranteed that all the requested bodies are fetched: the downloader may close
|
||||
/// the stream before the entire range has been fetched for any reason
|
||||
fn bodies_stream<'a, 'b, I>(&'a self, headers: I) -> BodiesStream<'a>
|
||||
where
|
||||
I: IntoIterator<Item = &'b (BlockNumber, H256)>,
|
||||
<I as IntoIterator>::IntoIter: Send + 'b,
|
||||
'b: 'a;
|
||||
}
|
||||
|
||||
/// A stream of block bodies.
|
||||
pub type BodiesStream<'a> =
|
||||
Pin<Box<dyn Stream<Item = Result<(BlockNumber, H256, BlockBody), DownloadError>> + Send + 'a>>;
|
||||
51
crates/interfaces/src/p2p/bodies/error.rs
Normal file
51
crates/interfaces/src/p2p/bodies/error.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use crate::p2p::error::RequestError;
|
||||
use reth_primitives::H256;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Body client errors.
|
||||
#[derive(Error, Debug, Clone)]
|
||||
pub enum BodiesClientError {
|
||||
/// Timed out while waiting for a response.
|
||||
#[error("Timed out while getting bodies for block {header_hash}.")]
|
||||
Timeout {
|
||||
/// The header hash of the block that timed out.
|
||||
header_hash: H256,
|
||||
},
|
||||
/// The client encountered an internal error.
|
||||
#[error(transparent)]
|
||||
Internal(#[from] RequestError),
|
||||
}
|
||||
|
||||
/// Body downloader errors.
|
||||
#[derive(Error, Debug, Clone)]
|
||||
pub enum DownloadError {
|
||||
/// Timed out while waiting for a response.
|
||||
#[error("Timed out while getting bodies for block {header_hash}.")]
|
||||
Timeout {
|
||||
/// The header hash of the block that timed out.
|
||||
header_hash: H256,
|
||||
},
|
||||
/// The [BodiesClient] used by the downloader experienced an error.
|
||||
#[error("The downloader client encountered an error.")]
|
||||
Client {
|
||||
/// The underlying client error.
|
||||
#[source]
|
||||
source: BodiesClientError,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<BodiesClientError> for DownloadError {
|
||||
fn from(error: BodiesClientError) -> Self {
|
||||
match error {
|
||||
BodiesClientError::Timeout { header_hash } => DownloadError::Timeout { header_hash },
|
||||
_ => DownloadError::Client { source: error },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DownloadError {
|
||||
/// Indicates whether this error is retryable or fatal.
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
matches!(self, DownloadError::Timeout { .. })
|
||||
}
|
||||
}
|
||||
8
crates/interfaces/src/p2p/bodies/mod.rs
Normal file
8
crates/interfaces/src/p2p/bodies/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
/// Traits and types for block body clients.
|
||||
pub mod client;
|
||||
|
||||
/// Block body downloaders.
|
||||
pub mod downloader;
|
||||
|
||||
/// Error types.
|
||||
pub mod error;
|
||||
@@ -4,13 +4,15 @@ use tokio::sync::{mpsc, oneshot};
|
||||
pub type RequestResult<T> = Result<T, RequestError>;
|
||||
|
||||
/// Error variants that can happen when sending requests to a session.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[derive(Debug, thiserror::Error, Clone)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum RequestError {
|
||||
#[error("Closed channel to the peer.")]
|
||||
ChannelClosed,
|
||||
#[error("Not connected to the peer.")]
|
||||
NotConnected,
|
||||
#[error("Connection to a peer dropped while handling the request.")]
|
||||
ConnectionDropped,
|
||||
#[error("Capability Message is not supported by remote peer.")]
|
||||
UnsupportedCapability,
|
||||
#[error("Request timed out while awaiting response.")]
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use crate::p2p::MessageStream;
|
||||
|
||||
use reth_primitives::{rpc::BlockId, Header, H256, H512};
|
||||
use reth_primitives::{Header, H256, H512};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use reth_primitives::BlockHashOrNumber;
|
||||
use std::{collections::HashSet, fmt::Debug};
|
||||
|
||||
/// Each peer returns a list of headers and the request id corresponding
|
||||
@@ -31,7 +32,7 @@ impl From<(u64, Vec<Header>)> for HeadersResponse {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HeadersRequest {
|
||||
/// The starting block
|
||||
pub start: BlockId,
|
||||
pub start: BlockHashOrNumber,
|
||||
/// The response max size
|
||||
pub limit: u64,
|
||||
/// Flag indicating whether the blocks should
|
||||
|
||||
@@ -1,60 +1,18 @@
|
||||
use super::client::{HeadersClient, HeadersRequest, HeadersStream};
|
||||
use crate::consensus::Consensus;
|
||||
|
||||
use crate::{consensus::Consensus, p2p::headers::error::DownloadError};
|
||||
use async_trait::async_trait;
|
||||
use reth_primitives::{
|
||||
rpc::{BlockId, BlockNumber},
|
||||
Header, SealedHeader, H256,
|
||||
};
|
||||
use reth_primitives::{BlockHashOrNumber, Header, SealedHeader};
|
||||
use reth_rpc_types::engine::ForkchoiceState;
|
||||
use std::{fmt::Debug, time::Duration};
|
||||
use thiserror::Error;
|
||||
use std::time::Duration;
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
/// The downloader error type
|
||||
#[derive(Error, Debug, Clone)]
|
||||
pub enum DownloadError {
|
||||
/// Header validation failed
|
||||
#[error("Failed to validate header {hash}. Details: {details}.")]
|
||||
HeaderValidation {
|
||||
/// Hash of header failing validation
|
||||
hash: H256,
|
||||
/// The details of validation failure
|
||||
details: String,
|
||||
},
|
||||
/// Timed out while waiting for request id response.
|
||||
#[error("Timed out while getting headers for request {request_id}.")]
|
||||
Timeout {
|
||||
/// The request id that timed out
|
||||
request_id: u64,
|
||||
},
|
||||
/// Error when checking that the current [`Header`] has the parent's hash as the parent_hash
|
||||
/// field, and that they have sequential block numbers.
|
||||
#[error("Headers did not match, current number: {header_number} / current hash: {header_hash}, parent number: {parent_number} / parent_hash: {parent_hash}")]
|
||||
MismatchedHeaders {
|
||||
/// The header number being evaluated
|
||||
header_number: BlockNumber,
|
||||
/// The header hash being evaluated
|
||||
header_hash: H256,
|
||||
/// The parent number being evaluated
|
||||
parent_number: BlockNumber,
|
||||
/// The parent hash being evaluated
|
||||
parent_hash: H256,
|
||||
},
|
||||
}
|
||||
|
||||
impl DownloadError {
|
||||
/// Returns bool indicating whether this error is retryable or fatal, in the cases
|
||||
/// where the peer responds with no headers, or times out.
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
matches!(self, DownloadError::Timeout { .. })
|
||||
}
|
||||
}
|
||||
|
||||
/// The header downloading strategy
|
||||
/// A downloader capable of fetching block headers.
|
||||
///
|
||||
/// A downloader represents a distinct strategy for submitting requests to download block headers,
|
||||
/// while a [HeadersClient] represents a client capable of fulfilling these requests.
|
||||
#[async_trait]
|
||||
#[auto_impl::auto_impl(&, Arc, Box)]
|
||||
pub trait Downloader: Sync + Send {
|
||||
pub trait HeaderDownloader: Sync + Send {
|
||||
/// The Consensus used to verify block validity when
|
||||
/// downloading
|
||||
type Consensus: Consensus;
|
||||
@@ -86,7 +44,7 @@ pub trait Downloader: Sync + Send {
|
||||
async fn download_headers(
|
||||
&self,
|
||||
stream: &mut HeadersStream,
|
||||
start: BlockId,
|
||||
start: BlockHashOrNumber,
|
||||
limit: u64,
|
||||
) -> Result<Vec<Header>, DownloadError> {
|
||||
let request_id = rand::random();
|
||||
@@ -118,9 +76,9 @@ pub trait Downloader: Sync + Send {
|
||||
})
|
||||
}
|
||||
|
||||
self.consensus().validate_header(header, parent).map_err(|e| {
|
||||
DownloadError::HeaderValidation { hash: parent.hash(), details: e.to_string() }
|
||||
})?;
|
||||
self.consensus()
|
||||
.validate_header(header, parent)
|
||||
.map_err(|error| DownloadError::HeaderValidation { hash: parent.hash(), error })?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
44
crates/interfaces/src/p2p/headers/error.rs
Normal file
44
crates/interfaces/src/p2p/headers/error.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use crate::consensus;
|
||||
use reth_primitives::{rpc::BlockNumber, H256};
|
||||
use thiserror::Error;
|
||||
|
||||
/// The downloader error type
|
||||
#[derive(Error, Debug, Clone)]
|
||||
pub enum DownloadError {
|
||||
/// Header validation failed
|
||||
#[error("Failed to validate header {hash}. Details: {error}.")]
|
||||
HeaderValidation {
|
||||
/// Hash of header failing validation
|
||||
hash: H256,
|
||||
/// The details of validation failure
|
||||
#[source]
|
||||
error: consensus::Error,
|
||||
},
|
||||
/// Timed out while waiting for request id response.
|
||||
#[error("Timed out while getting headers for request {request_id}.")]
|
||||
Timeout {
|
||||
/// The request id that timed out
|
||||
request_id: u64,
|
||||
},
|
||||
/// Error when checking that the current [`Header`] has the parent's hash as the parent_hash
|
||||
/// field, and that they have sequential block numbers.
|
||||
#[error("Headers did not match, current number: {header_number} / current hash: {header_hash}, parent number: {parent_number} / parent_hash: {parent_hash}")]
|
||||
MismatchedHeaders {
|
||||
/// The header number being evaluated
|
||||
header_number: BlockNumber,
|
||||
/// The header hash being evaluated
|
||||
header_hash: H256,
|
||||
/// The parent number being evaluated
|
||||
parent_number: BlockNumber,
|
||||
/// The parent hash being evaluated
|
||||
parent_hash: H256,
|
||||
},
|
||||
}
|
||||
|
||||
impl DownloadError {
|
||||
/// Returns bool indicating whether this error is retryable or fatal, in the cases
|
||||
/// where the peer responds with no headers, or times out.
|
||||
pub fn is_retryable(&self) -> bool {
|
||||
matches!(self, DownloadError::Timeout { .. })
|
||||
}
|
||||
}
|
||||
@@ -9,3 +9,6 @@ pub mod client;
|
||||
/// [`Consensus`]: crate::consensus::Consensus
|
||||
/// [`HeadersClient`]: client::HeadersClient
|
||||
pub mod downloader;
|
||||
|
||||
/// Error types.
|
||||
pub mod error;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/// Traits for implementing P2P block body clients.
|
||||
pub mod bodies;
|
||||
|
||||
/// Traits for implementing P2P Header Clients. Also includes implementations
|
||||
/// of a Linear and a Parallel downloader generic over the [`Consensus`] and
|
||||
/// [`HeadersClient`].
|
||||
|
||||
33
crates/interfaces/src/test_utils/bodies.rs
Normal file
33
crates/interfaces/src/test_utils/bodies.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
use crate::p2p::bodies::{client::BodiesClient, error::BodiesClientError};
|
||||
use async_trait::async_trait;
|
||||
use reth_eth_wire::BlockBody;
|
||||
use reth_primitives::H256;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
/// A test client for fetching bodies
|
||||
pub struct TestBodiesClient<F>
|
||||
where
|
||||
F: Fn(H256) -> Result<BlockBody, BodiesClientError>,
|
||||
{
|
||||
/// The function that is called on each body request.
|
||||
pub responder: F,
|
||||
}
|
||||
|
||||
impl<F> Debug for TestBodiesClient<F>
|
||||
where
|
||||
F: Fn(H256) -> Result<BlockBody, BodiesClientError>,
|
||||
{
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("TestBodiesClient").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<F> BodiesClient for TestBodiesClient<F>
|
||||
where
|
||||
F: Fn(H256) -> Result<BlockBody, BodiesClientError> + Send + Sync,
|
||||
{
|
||||
async fn get_block_body(&self, hash: H256) -> Result<BlockBody, BodiesClientError> {
|
||||
(self.responder)(hash)
|
||||
}
|
||||
}
|
||||
142
crates/interfaces/src/test_utils/generators.rs
Normal file
142
crates/interfaces/src/test_utils/generators.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use rand::{thread_rng, Rng};
|
||||
use reth_primitives::{
|
||||
proofs, Address, BlockLocked, Bytes, Header, SealedHeader, Signature, Transaction,
|
||||
TransactionKind, TransactionSigned, H256, U256,
|
||||
};
|
||||
|
||||
// TODO(onbjerg): Maybe we should split this off to its own crate, or move the helpers to the
|
||||
// relevant crates?
|
||||
|
||||
/// Generates a range of random [SealedHeader]s.
|
||||
///
|
||||
/// The parent hash of the first header
|
||||
/// in the result will be equal to `head`.
|
||||
///
|
||||
/// The headers are assumed to not be correct if validated.
|
||||
pub fn random_header_range(rng: std::ops::Range<u64>, head: H256) -> Vec<SealedHeader> {
|
||||
let mut headers = Vec::with_capacity(rng.end.saturating_sub(rng.start) as usize);
|
||||
for idx in rng {
|
||||
headers.push(random_header(
|
||||
idx,
|
||||
Some(headers.last().map(|h: &SealedHeader| h.hash()).unwrap_or(head)),
|
||||
));
|
||||
}
|
||||
headers
|
||||
}
|
||||
|
||||
/// Generate a random [SealedHeader].
|
||||
///
|
||||
/// The header is assumed to not be correct if validated.
|
||||
pub fn random_header(number: u64, parent: Option<H256>) -> SealedHeader {
|
||||
let header = reth_primitives::Header {
|
||||
number,
|
||||
nonce: rand::random(),
|
||||
difficulty: U256::from(rand::random::<u32>()),
|
||||
parent_hash: parent.unwrap_or_default(),
|
||||
..Default::default()
|
||||
};
|
||||
header.seal()
|
||||
}
|
||||
|
||||
/// Generates a random legacy [Transaction].
|
||||
///
|
||||
/// Every field is random, except:
|
||||
///
|
||||
/// - The chain ID, which is always 1
|
||||
/// - The input, which is always nothing
|
||||
pub fn random_tx() -> Transaction {
|
||||
Transaction::Legacy {
|
||||
chain_id: Some(1),
|
||||
nonce: rand::random::<u16>().into(),
|
||||
gas_price: rand::random::<u16>().into(),
|
||||
gas_limit: rand::random::<u16>().into(),
|
||||
to: TransactionKind::Call(Address::random()),
|
||||
value: rand::random::<u16>().into(),
|
||||
input: Bytes::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a random legacy [Transaction] that is signed.
|
||||
///
|
||||
/// On top of the considerations of [gen_random_tx], these apply as well:
|
||||
///
|
||||
/// - There is no guarantee that the nonce is not used twice for the same account
|
||||
pub fn random_signed_tx() -> TransactionSigned {
|
||||
let tx = random_tx();
|
||||
let hash = tx.signature_hash();
|
||||
TransactionSigned {
|
||||
transaction: tx,
|
||||
hash,
|
||||
signature: Signature {
|
||||
// TODO
|
||||
r: Default::default(),
|
||||
s: Default::default(),
|
||||
odd_y_parity: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a random block filled with a random number of signed transactions (generated using
|
||||
/// [random_signed_tx]).
|
||||
///
|
||||
/// All fields use the default values (and are assumed to be invalid) except for:
|
||||
///
|
||||
/// - `parent_hash`
|
||||
/// - `transactions_root`
|
||||
/// - `ommers_hash`
|
||||
///
|
||||
/// Additionally, `gas_used` and `gas_limit` always exactly match the total `gas_limit` of all
|
||||
/// transactions in the block.
|
||||
///
|
||||
/// The ommer headers are not assumed to be valid.
|
||||
pub fn random_block(number: u64, parent: Option<H256>) -> BlockLocked {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
// Generate transactions
|
||||
let transactions: Vec<TransactionSigned> =
|
||||
(0..rand::random::<u8>()).into_iter().map(|_| random_signed_tx()).collect();
|
||||
let total_gas = transactions.iter().fold(0, |sum, tx| sum + tx.transaction.gas_limit());
|
||||
|
||||
// Generate ommers
|
||||
let mut ommers = Vec::new();
|
||||
for _ in 0..rng.gen_range(0..2) {
|
||||
ommers.push(random_header(number, parent).unseal());
|
||||
}
|
||||
|
||||
// Calculate roots
|
||||
let transactions_root = proofs::calculate_transaction_root(transactions.iter());
|
||||
let ommers_hash = proofs::calculate_ommers_root(ommers.iter());
|
||||
|
||||
BlockLocked {
|
||||
header: Header {
|
||||
parent_hash: parent.unwrap_or_default(),
|
||||
number,
|
||||
gas_used: total_gas,
|
||||
gas_limit: total_gas,
|
||||
transactions_root,
|
||||
ommers_hash,
|
||||
..Default::default()
|
||||
}
|
||||
.seal(),
|
||||
body: transactions,
|
||||
ommers: ommers.into_iter().map(|ommer| ommer.seal()).collect(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a range of random blocks.
|
||||
///
|
||||
/// The parent hash of the first block
|
||||
/// in the result will be equal to `head`.
|
||||
///
|
||||
/// See [random_block] for considerations when validating the generated blocks.
|
||||
pub fn random_block_range(rng: std::ops::Range<u64>, head: H256) -> Vec<BlockLocked> {
|
||||
let mut blocks = Vec::with_capacity(rng.end.saturating_sub(rng.start) as usize);
|
||||
for idx in rng {
|
||||
blocks.push(random_block(
|
||||
idx,
|
||||
Some(blocks.last().map(|block: &BlockLocked| block.header.hash()).unwrap_or(head)),
|
||||
));
|
||||
}
|
||||
blocks
|
||||
}
|
||||
@@ -1,32 +1,33 @@
|
||||
//! Testing support for headers related interfaces.
|
||||
use crate::{
|
||||
consensus::{self, Consensus},
|
||||
consensus::{self, Consensus, Error},
|
||||
p2p::headers::{
|
||||
client::{HeadersClient, HeadersRequest, HeadersResponse, HeadersStream},
|
||||
downloader::{DownloadError, Downloader},
|
||||
downloader::HeaderDownloader,
|
||||
error::DownloadError,
|
||||
},
|
||||
};
|
||||
use reth_primitives::{Header, SealedHeader, H256, H512, U256};
|
||||
use reth_primitives::{BlockLocked, Header, SealedHeader, H256, H512};
|
||||
use reth_rpc_types::engine::ForkchoiceState;
|
||||
use std::{collections::HashSet, sync::Arc, time::Duration};
|
||||
use tokio::sync::{broadcast, mpsc, watch};
|
||||
use tokio_stream::{wrappers::BroadcastStream, StreamExt};
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A test downloader which just returns the values that have been pushed to it.
|
||||
pub struct TestDownloader {
|
||||
result: Result<Vec<SealedHeader>, DownloadError>,
|
||||
#[derive(Debug)]
|
||||
pub struct TestHeaderDownloader {
|
||||
client: Arc<TestHeadersClient>,
|
||||
}
|
||||
|
||||
impl TestDownloader {
|
||||
impl TestHeaderDownloader {
|
||||
/// Instantiates the downloader with the mock responses
|
||||
pub fn new(result: Result<Vec<SealedHeader>, DownloadError>) -> Self {
|
||||
Self { result }
|
||||
pub fn new(client: Arc<TestHeadersClient>) -> Self {
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Downloader for TestDownloader {
|
||||
impl HeaderDownloader for TestHeaderDownloader {
|
||||
type Consensus = TestConsensus;
|
||||
type Client = TestHeadersClient;
|
||||
|
||||
@@ -39,7 +40,7 @@ impl Downloader for TestDownloader {
|
||||
}
|
||||
|
||||
fn client(&self) -> &Self::Client {
|
||||
unimplemented!()
|
||||
&self.client
|
||||
}
|
||||
|
||||
async fn download(
|
||||
@@ -47,12 +48,26 @@ impl Downloader for TestDownloader {
|
||||
_: &SealedHeader,
|
||||
_: &ForkchoiceState,
|
||||
) -> Result<Vec<SealedHeader>, DownloadError> {
|
||||
self.result.clone()
|
||||
let stream = self.client.stream_headers().await;
|
||||
let stream = stream.timeout(Duration::from_secs(1));
|
||||
|
||||
match Box::pin(stream).try_next().await {
|
||||
Ok(Some(res)) => {
|
||||
let mut headers = res.headers.iter().map(|h| h.clone().seal()).collect::<Vec<_>>();
|
||||
if !headers.is_empty() {
|
||||
headers.sort_unstable_by_key(|h| h.number);
|
||||
headers.remove(0); // remove head from response
|
||||
headers.reverse();
|
||||
}
|
||||
Ok(headers)
|
||||
}
|
||||
_ => Err(DownloadError::Timeout { request_id: 0 }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A test client for fetching headers
|
||||
#[derive(Debug)]
|
||||
pub struct TestHeadersClient {
|
||||
req_tx: mpsc::Sender<(u64, HeadersRequest)>,
|
||||
req_rx: Arc<tokio::sync::Mutex<mpsc::Receiver<(u64, HeadersRequest)>>>,
|
||||
@@ -118,7 +133,7 @@ impl HeadersClient for TestHeadersClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// Consensus client impl for testing
|
||||
/// Consensus engine implementation for testing
|
||||
#[derive(Debug)]
|
||||
pub struct TestConsensus {
|
||||
/// Watcher over the forkchoice state
|
||||
@@ -141,14 +156,14 @@ impl Default for TestConsensus {
|
||||
}
|
||||
|
||||
impl TestConsensus {
|
||||
/// Update the forkchoice state
|
||||
/// Update the fork choice state
|
||||
pub fn update_tip(&self, tip: H256) {
|
||||
let state = ForkchoiceState {
|
||||
head_block_hash: tip,
|
||||
finalized_block_hash: H256::zero(),
|
||||
safe_block_hash: H256::zero(),
|
||||
};
|
||||
self.channel.0.send(state).expect("updating forkchoice state failed");
|
||||
self.channel.0.send(state).expect("updating fork choice state failed");
|
||||
}
|
||||
|
||||
/// Update the validation flag
|
||||
@@ -174,29 +189,12 @@ impl Consensus for TestConsensus {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a range of random header. The parent hash of the first header
|
||||
/// in the result will be equal to head
|
||||
pub fn gen_random_header_range(rng: std::ops::Range<u64>, head: H256) -> Vec<SealedHeader> {
|
||||
let mut headers = Vec::with_capacity(rng.end.saturating_sub(rng.start) as usize);
|
||||
for idx in rng {
|
||||
headers.push(gen_random_header(
|
||||
idx,
|
||||
Some(headers.last().map(|h: &SealedHeader| h.hash()).unwrap_or(head)),
|
||||
));
|
||||
fn pre_validate_block(&self, _block: &BlockLocked) -> Result<(), Error> {
|
||||
if self.fail_validation {
|
||||
Err(consensus::Error::BaseFeeMissing)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
headers
|
||||
}
|
||||
|
||||
/// Generate a random header
|
||||
pub fn gen_random_header(number: u64, parent: Option<H256>) -> SealedHeader {
|
||||
let header = reth_primitives::Header {
|
||||
number,
|
||||
nonce: rand::random(),
|
||||
difficulty: U256::from(rand::random::<u32>()),
|
||||
parent_hash: parent.unwrap_or_default(),
|
||||
..Default::default()
|
||||
};
|
||||
header.seal()
|
||||
}
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
mod api;
|
||||
mod bodies;
|
||||
mod headers;
|
||||
|
||||
/// Generators for different data structures like block headers, block bodies and ranges of those.
|
||||
pub mod generators;
|
||||
|
||||
pub use api::TestApi;
|
||||
pub use bodies::*;
|
||||
pub use headers::*;
|
||||
|
||||
21
crates/net/bodies-downloaders/Cargo.toml
Normal file
21
crates/net/bodies-downloaders/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "reth-bodies-downloaders"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/foundry-rs/reth"
|
||||
readme = "README.md"
|
||||
description = "Implementations of various block body downloaders"
|
||||
|
||||
[dependencies]
|
||||
futures-util = "0.3.25"
|
||||
reth-interfaces = { path = "../../interfaces" }
|
||||
reth-primitives = { path = "../../primitives" }
|
||||
reth-eth-wire = { path= "../eth-wire" }
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
once_cell = "1.15.0"
|
||||
rand = "0.8.5"
|
||||
reth-interfaces = { path = "../../interfaces", features = ["test-utils"] }
|
||||
tokio = { version = "1.21.2", features = ["full"] }
|
||||
serial_test = "0.9.0"
|
||||
132
crates/net/bodies-downloaders/src/concurrent.rs
Normal file
132
crates/net/bodies-downloaders/src/concurrent.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use futures_util::{stream, StreamExt, TryFutureExt};
|
||||
use reth_interfaces::p2p::bodies::{
|
||||
client::BodiesClient,
|
||||
downloader::{BodiesStream, BodyDownloader},
|
||||
error::{BodiesClientError, DownloadError},
|
||||
};
|
||||
use reth_primitives::{BlockNumber, H256};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
/// Downloads bodies in batches.
|
||||
///
|
||||
/// All blocks in a batch are fetched at the same time.
|
||||
#[derive(Debug)]
|
||||
pub struct ConcurrentDownloader<C> {
|
||||
/// The bodies client
|
||||
client: Arc<C>,
|
||||
/// The batch size per one request
|
||||
pub batch_size: usize,
|
||||
/// A single request timeout
|
||||
pub request_timeout: Duration,
|
||||
/// The number of retries for downloading
|
||||
pub request_retries: usize,
|
||||
}
|
||||
|
||||
impl<C: BodiesClient> BodyDownloader for ConcurrentDownloader<C> {
|
||||
type Client = C;
|
||||
|
||||
/// The request timeout duration
|
||||
fn timeout(&self) -> Duration {
|
||||
self.request_timeout
|
||||
}
|
||||
|
||||
/// The block bodies client
|
||||
fn client(&self) -> &Self::Client {
|
||||
&self.client
|
||||
}
|
||||
|
||||
fn bodies_stream<'a, 'b, I>(&'a self, headers: I) -> BodiesStream<'a>
|
||||
where
|
||||
I: IntoIterator<Item = &'b (BlockNumber, H256)>,
|
||||
<I as IntoIterator>::IntoIter: Send + 'b,
|
||||
'b: 'a,
|
||||
{
|
||||
// TODO: Retry
|
||||
Box::pin(
|
||||
stream::iter(headers.into_iter().map(|(block_number, header_hash)| {
|
||||
{
|
||||
self.client
|
||||
.get_block_body(*header_hash)
|
||||
.map_ok(move |body| (*block_number, *header_hash, body))
|
||||
.map_err(|err| match err {
|
||||
BodiesClientError::Timeout { header_hash } => {
|
||||
DownloadError::Timeout { header_hash }
|
||||
}
|
||||
err => DownloadError::Client { source: err },
|
||||
})
|
||||
}
|
||||
}))
|
||||
.buffered(self.batch_size),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A [ConcurrentDownloader] builder.
|
||||
#[derive(Debug)]
|
||||
pub struct ConcurrentDownloaderBuilder {
|
||||
/// The batch size per one request
|
||||
batch_size: usize,
|
||||
/// A single request timeout
|
||||
request_timeout: Duration,
|
||||
/// The number of retries for downloading
|
||||
request_retries: usize,
|
||||
}
|
||||
|
||||
impl Default for ConcurrentDownloaderBuilder {
|
||||
fn default() -> Self {
|
||||
Self { batch_size: 100, request_timeout: Duration::from_millis(100), request_retries: 5 }
|
||||
}
|
||||
}
|
||||
|
||||
impl ConcurrentDownloaderBuilder {
|
||||
/// Set the request batch size
|
||||
pub fn batch_size(mut self, size: usize) -> Self {
|
||||
self.batch_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request timeout
|
||||
pub fn timeout(mut self, timeout: Duration) -> Self {
|
||||
self.request_timeout = timeout;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the number of retries per request
|
||||
pub fn retries(mut self, retries: usize) -> Self {
|
||||
self.request_retries = retries;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build [ConcurrentDownloader] with the provided client
|
||||
pub fn build<C: BodiesClient>(self, client: Arc<C>) -> ConcurrentDownloader<C> {
|
||||
ConcurrentDownloader {
|
||||
client,
|
||||
batch_size: self.batch_size,
|
||||
request_timeout: self.request_timeout,
|
||||
request_retries: self.request_retries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn emits_bodies_in_order() {}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn header_iter_failure() {}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn client_failure() {}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn retries_requests() {}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn timeout() {}
|
||||
}
|
||||
11
crates/net/bodies-downloaders/src/lib.rs
Normal file
11
crates/net/bodies-downloaders/src/lib.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
#![warn(missing_docs, unreachable_pub)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![doc(test(
|
||||
no_crate_inject,
|
||||
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
|
||||
))]
|
||||
|
||||
//! Implements body downloader algorithms.
|
||||
|
||||
/// A naive concurrent downloader.
|
||||
pub mod concurrent;
|
||||
@@ -24,7 +24,7 @@ pub struct Discv4Config {
|
||||
pub find_node_timeout: Duration,
|
||||
/// The duration we set for neighbours responses
|
||||
pub neighbours_timeout: Duration,
|
||||
/// A set of lists that permit or ban IP's or NodeIds from the server. See
|
||||
/// A set of lists that permit or ban IP's or PeerIds from the server. See
|
||||
/// `crate::PermitBanList`.
|
||||
pub permit_ban_list: PermitBanList,
|
||||
/// Set the default duration for which nodes are banned for. This timeouts are checked every 5
|
||||
@@ -110,7 +110,7 @@ impl Discv4ConfigBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// A set of lists that permit or ban IP's or NodeIds from the server. See
|
||||
/// A set of lists that permit or ban IP's or PeerIds from the server. See
|
||||
/// `crate::PermitBanList`.
|
||||
pub fn permit_ban_list(&mut self, list: PermitBanList) -> &mut Self {
|
||||
self.config.permit_ban_list = list;
|
||||
|
||||
@@ -30,7 +30,7 @@ use discv5::{
|
||||
},
|
||||
ConnectionDirection, ConnectionState,
|
||||
};
|
||||
use reth_primitives::{H256, H512};
|
||||
use reth_primitives::{PeerId, H256};
|
||||
use secp256k1::SecretKey;
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
@@ -67,9 +67,6 @@ pub mod mock;
|
||||
/// reexport to get public ip.
|
||||
pub use public_ip;
|
||||
|
||||
/// Identifier for nodes.
|
||||
pub type NodeId = H512;
|
||||
|
||||
/// The default port for discv4 via UDP
|
||||
///
|
||||
/// Note: the default TCP port is the same.
|
||||
@@ -140,12 +137,13 @@ impl Discv4 {
|
||||
/// use std::str::FromStr;
|
||||
/// use rand::thread_rng;
|
||||
/// use secp256k1::SECP256K1;
|
||||
/// use reth_discv4::{Discv4, Discv4Config, NodeId, NodeRecord};
|
||||
/// use reth_primitives::PeerId;
|
||||
/// use reth_discv4::{Discv4, Discv4Config, NodeRecord};
|
||||
/// # async fn t() -> io::Result<()> {
|
||||
/// // generate a (random) keypair
|
||||
/// let mut rng = thread_rng();
|
||||
/// let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||
/// let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
/// let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
///
|
||||
/// let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
||||
/// let local_enr = NodeRecord {
|
||||
@@ -212,11 +210,11 @@ impl Discv4 {
|
||||
}
|
||||
|
||||
/// Looks up the given node id
|
||||
pub async fn lookup(&self, node_id: NodeId) -> Result<Vec<NodeRecord>, Discv4Error> {
|
||||
pub async fn lookup(&self, node_id: PeerId) -> Result<Vec<NodeRecord>, Discv4Error> {
|
||||
self.lookup_node(Some(node_id)).await
|
||||
}
|
||||
|
||||
async fn lookup_node(&self, node_id: Option<NodeId>) -> Result<Vec<NodeRecord>, Discv4Error> {
|
||||
async fn lookup_node(&self, node_id: Option<PeerId>) -> Result<Vec<NodeRecord>, Discv4Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let cmd = Discv4Command::Lookup { node_id, tx: Some(tx) };
|
||||
self.to_service.send(cmd).await?;
|
||||
@@ -269,9 +267,9 @@ pub struct Discv4Service {
|
||||
/// followup `FindNode` requests.... Buffering them effectively prevents high `Ping` peaks.
|
||||
queued_pings: VecDeque<(NodeRecord, PingReason)>,
|
||||
/// Currently active pings to specific nodes.
|
||||
pending_pings: HashMap<NodeId, PingRequest>,
|
||||
pending_pings: HashMap<PeerId, PingRequest>,
|
||||
/// Currently active FindNode requests
|
||||
pending_find_nodes: HashMap<NodeId, FindNodeRequest>,
|
||||
pending_find_nodes: HashMap<PeerId, FindNodeRequest>,
|
||||
/// Commands listener
|
||||
commands_rx: Option<mpsc::Receiver<Discv4Command>>,
|
||||
/// All subscribers for table updates
|
||||
@@ -377,8 +375,8 @@ impl Discv4Service {
|
||||
&mut self.local_enr
|
||||
}
|
||||
|
||||
/// Returns true if the given NodeId is currently in the bucket
|
||||
pub fn contains_node(&self, id: NodeId) -> bool {
|
||||
/// Returns true if the given PeerId is currently in the bucket
|
||||
pub fn contains_node(&self, id: PeerId) -> bool {
|
||||
let key = kad_key(id);
|
||||
self.kbuckets.get_index(&key).is_some()
|
||||
}
|
||||
@@ -431,7 +429,7 @@ impl Discv4Service {
|
||||
//
|
||||
// To guard against traffic amplification attacks, Neighbors replies should only be sent if the
|
||||
// sender of FindNode has been verified by the endpoint proof procedure.
|
||||
pub fn lookup(&mut self, target: NodeId) {
|
||||
pub fn lookup(&mut self, target: PeerId) {
|
||||
self.lookup_with(target, None)
|
||||
}
|
||||
|
||||
@@ -445,7 +443,7 @@ impl Discv4Service {
|
||||
/// This takes an optional Sender through which all successfully discovered nodes are sent once
|
||||
/// the request has finished.
|
||||
#[instrument(skip_all, fields(?target), target = "net::discv4")]
|
||||
fn lookup_with(&mut self, target: NodeId, tx: Option<NodeRecordSender>) {
|
||||
fn lookup_with(&mut self, target: PeerId, tx: Option<NodeRecordSender>) {
|
||||
trace!("Starting lookup");
|
||||
let key = kad_key(target);
|
||||
|
||||
@@ -499,7 +497,7 @@ impl Discv4Service {
|
||||
///
|
||||
/// This allows applications, for whatever reason, to remove nodes from the local routing
|
||||
/// table. Returns `true` if the node was in the table and `false` otherwise.
|
||||
pub fn remove_node(&mut self, node_id: NodeId) -> bool {
|
||||
pub fn remove_node(&mut self, node_id: PeerId) -> bool {
|
||||
let key = kad_key(node_id);
|
||||
let removed = self.kbuckets.remove(&key);
|
||||
if removed {
|
||||
@@ -559,7 +557,7 @@ impl Discv4Service {
|
||||
}
|
||||
|
||||
/// Message handler for an incoming `Ping`
|
||||
fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: NodeId, hash: H256) {
|
||||
fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: PeerId, hash: H256) {
|
||||
// update the record
|
||||
let record = NodeRecord {
|
||||
address: ping.from.address,
|
||||
@@ -611,7 +609,7 @@ impl Discv4Service {
|
||||
}
|
||||
|
||||
/// Message handler for an incoming `Pong`.
|
||||
fn on_pong(&mut self, pong: Pong, remote_addr: SocketAddr, remote_id: NodeId) {
|
||||
fn on_pong(&mut self, pong: Pong, remote_addr: SocketAddr, remote_id: PeerId) {
|
||||
if self.is_expired(pong.expire) {
|
||||
return
|
||||
}
|
||||
@@ -654,7 +652,7 @@ impl Discv4Service {
|
||||
}
|
||||
|
||||
/// Handler for incoming `FindNode` message
|
||||
fn on_find_node(&mut self, msg: FindNode, remote_addr: SocketAddr, node_id: NodeId) {
|
||||
fn on_find_node(&mut self, msg: FindNode, remote_addr: SocketAddr, node_id: PeerId) {
|
||||
match self.node_status(node_id, remote_addr) {
|
||||
NodeEntryStatus::IsLocal => {
|
||||
// received address from self
|
||||
@@ -675,7 +673,7 @@ impl Discv4Service {
|
||||
|
||||
/// Handler for incoming `Neighbours` messages that are handled if they're responses to
|
||||
/// `FindNode` requests
|
||||
fn on_neighbours(&mut self, msg: Neighbours, remote_addr: SocketAddr, node_id: NodeId) {
|
||||
fn on_neighbours(&mut self, msg: Neighbours, remote_addr: SocketAddr, node_id: PeerId) {
|
||||
// check if this request was expected
|
||||
let ctx = match self.pending_find_nodes.entry(node_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
@@ -732,7 +730,7 @@ impl Discv4Service {
|
||||
}
|
||||
|
||||
/// Sends a Neighbours packet for `target` to the given addr
|
||||
fn respond_closest(&mut self, target: NodeId, to: SocketAddr) {
|
||||
fn respond_closest(&mut self, target: PeerId, to: SocketAddr) {
|
||||
let key = kad_key(target);
|
||||
let expire = self.send_neighbours_timeout();
|
||||
let all_nodes = self.kbuckets.closest_values(&key).collect::<Vec<_>>();
|
||||
@@ -746,7 +744,7 @@ impl Discv4Service {
|
||||
}
|
||||
|
||||
/// Returns the current status of the node
|
||||
fn node_status(&mut self, node: NodeId, addr: SocketAddr) -> NodeEntryStatus {
|
||||
fn node_status(&mut self, node: PeerId, addr: SocketAddr) -> NodeEntryStatus {
|
||||
if node == self.local_enr.id {
|
||||
debug!(?node, target = "net::disc", "Got an incoming discovery request from self");
|
||||
return NodeEntryStatus::IsLocal
|
||||
@@ -807,7 +805,7 @@ impl Discv4Service {
|
||||
}
|
||||
|
||||
/// Removes the node from the table
|
||||
fn expire_node_request(&mut self, node_id: NodeId) {
|
||||
fn expire_node_request(&mut self, node_id: PeerId) {
|
||||
let key = kad_key(node_id);
|
||||
self.kbuckets.remove(&key);
|
||||
}
|
||||
@@ -976,7 +974,7 @@ pub(crate) async fn send_loop(udp: Arc<UdpSocket>, rx: EgressReceiver) {
|
||||
}
|
||||
|
||||
/// Continuously awaits new incoming messages and sends them back through the channel.
|
||||
pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_id: NodeId) {
|
||||
pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_id: PeerId) {
|
||||
loop {
|
||||
let mut buf = [0; MAX_PACKET_SIZE];
|
||||
let res = udp.recv_from(&mut buf).await;
|
||||
@@ -1010,7 +1008,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
||||
|
||||
/// The commands sent from the frontend to the service
|
||||
enum Discv4Command {
|
||||
Lookup { node_id: Option<NodeId>, tx: Option<NodeRecordSender> },
|
||||
Lookup { node_id: Option<PeerId>, tx: Option<NodeRecordSender> },
|
||||
Updates(OneshotSender<ReceiverStream<TableUpdate>>),
|
||||
}
|
||||
|
||||
@@ -1036,7 +1034,7 @@ struct PingRequest {
|
||||
reason: PingReason,
|
||||
}
|
||||
|
||||
/// Rotates the NodeId that is periodically looked up.
|
||||
/// Rotates the PeerId that is periodically looked up.
|
||||
///
|
||||
/// By selecting different targets, the lookups will be seeded with different ALPHA seed nodes.
|
||||
#[derive(Debug)]
|
||||
@@ -1066,13 +1064,13 @@ impl Default for LookupTargetRotator {
|
||||
|
||||
impl LookupTargetRotator {
|
||||
/// this will return the next node id to lookup
|
||||
fn next(&mut self, local: &NodeId) -> NodeId {
|
||||
fn next(&mut self, local: &PeerId) -> PeerId {
|
||||
self.counter += 1;
|
||||
self.counter %= self.interval;
|
||||
if self.counter == 0 {
|
||||
return *local
|
||||
}
|
||||
NodeId::random()
|
||||
PeerId::random()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1087,7 +1085,7 @@ struct LookupContext {
|
||||
impl LookupContext {
|
||||
/// Create new context for a recursive lookup
|
||||
fn new(
|
||||
target: NodeId,
|
||||
target: PeerId,
|
||||
nearest_nodes: impl IntoIterator<Item = (Distance, NodeRecord)>,
|
||||
listener: Option<NodeRecordSender>,
|
||||
) -> Self {
|
||||
@@ -1107,7 +1105,7 @@ impl LookupContext {
|
||||
}
|
||||
|
||||
/// Returns the target of this lookup
|
||||
fn target(&self) -> NodeId {
|
||||
fn target(&self) -> PeerId {
|
||||
self.inner.target
|
||||
}
|
||||
|
||||
@@ -1132,7 +1130,7 @@ impl LookupContext {
|
||||
}
|
||||
|
||||
/// Marks the node as queried
|
||||
fn mark_queried(&self, id: NodeId) {
|
||||
fn mark_queried(&self, id: PeerId) {
|
||||
if let Some((_, node)) =
|
||||
self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id)
|
||||
{
|
||||
@@ -1141,7 +1139,7 @@ impl LookupContext {
|
||||
}
|
||||
|
||||
/// Marks the node as responded
|
||||
fn mark_responded(&self, id: NodeId) {
|
||||
fn mark_responded(&self, id: PeerId) {
|
||||
if let Some((_, node)) =
|
||||
self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id)
|
||||
{
|
||||
@@ -1159,7 +1157,7 @@ impl LookupContext {
|
||||
unsafe impl Send for LookupContext {}
|
||||
|
||||
struct LookupContextInner {
|
||||
target: NodeId,
|
||||
target: PeerId,
|
||||
/// The closest nodes
|
||||
closest_nodes: RefCell<BTreeMap<Distance, QueryNode>>,
|
||||
/// A listener for all the nodes retrieved in this lookup
|
||||
@@ -1249,7 +1247,7 @@ enum PingReason {
|
||||
///
|
||||
/// Once the expected PONG is received, the endpoint proof is complete and the find node can be
|
||||
/// answered.
|
||||
FindNode(NodeId, NodeEntryStatus),
|
||||
FindNode(PeerId, NodeEntryStatus),
|
||||
/// Part of a lookup to ensure endpoint is proven.
|
||||
Lookup(NodeRecord, LookupContext),
|
||||
}
|
||||
@@ -1260,7 +1258,7 @@ pub enum TableUpdate {
|
||||
/// A new node was inserted to the table.
|
||||
Added(NodeRecord),
|
||||
/// Node that was removed from the table
|
||||
Removed(NodeId),
|
||||
Removed(PeerId),
|
||||
/// A series of updates
|
||||
Batch(Vec<TableUpdate>),
|
||||
}
|
||||
@@ -1276,7 +1274,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_local_rotator() {
|
||||
let id = NodeId::random();
|
||||
let id = PeerId::random();
|
||||
let mut rotator = LookupTargetRotator::local_only();
|
||||
assert_eq!(rotator.next(&id), id);
|
||||
assert_eq!(rotator.next(&id), id);
|
||||
@@ -1284,7 +1282,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_rotator() {
|
||||
let id = NodeId::random();
|
||||
let id = PeerId::random();
|
||||
let mut rotator = LookupTargetRotator::default();
|
||||
assert_eq!(rotator.next(&id), id);
|
||||
assert_ne!(rotator.next(&id), id);
|
||||
@@ -1301,7 +1299,7 @@ mod tests {
|
||||
let local_addr = service.local_addr();
|
||||
|
||||
for idx in 0..MAX_NODES_PING {
|
||||
let node = NodeRecord::new(local_addr, NodeId::random());
|
||||
let node = NodeRecord::new(local_addr, PeerId::random());
|
||||
service.add_node(node);
|
||||
assert!(service.pending_pings.contains_key(&node.id));
|
||||
assert_eq!(service.pending_pings.len(), idx + 1);
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::{
|
||||
node::NodeRecord,
|
||||
proto::{FindNode, Message, Neighbours, NodeEndpoint, Packet, Ping, Pong},
|
||||
receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent,
|
||||
IngressReceiver, NodeId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
|
||||
IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
|
||||
};
|
||||
use rand::{thread_rng, Rng, RngCore};
|
||||
use reth_primitives::H256;
|
||||
@@ -40,8 +40,8 @@ pub struct MockDiscovery {
|
||||
ingress: IngressReceiver,
|
||||
/// Sender for sending outgoing messages
|
||||
egress: EgressSender,
|
||||
pending_pongs: HashSet<NodeId>,
|
||||
pending_neighbours: HashMap<NodeId, Vec<NodeRecord>>,
|
||||
pending_pongs: HashSet<PeerId>,
|
||||
pending_neighbours: HashMap<PeerId, Vec<NodeRecord>>,
|
||||
command_rx: mpsc::Receiver<MockCommand>,
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ impl MockDiscovery {
|
||||
let mut rng = thread_rng();
|
||||
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
||||
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||
let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let socket = Arc::new(UdpSocket::bind(socket).await?);
|
||||
let local_addr = socket.local_addr()?;
|
||||
let local_enr = NodeRecord {
|
||||
@@ -95,12 +95,12 @@ impl MockDiscovery {
|
||||
}
|
||||
|
||||
/// Queue a pending pong.
|
||||
pub fn queue_pong(&mut self, from: NodeId) {
|
||||
pub fn queue_pong(&mut self, from: PeerId) {
|
||||
self.pending_pongs.insert(from);
|
||||
}
|
||||
|
||||
/// Queue a pending Neighbours response.
|
||||
pub fn queue_neighbours(&mut self, target: NodeId, nodes: Vec<NodeRecord>) {
|
||||
pub fn queue_neighbours(&mut self, target: PeerId, nodes: Vec<NodeRecord>) {
|
||||
self.pending_neighbours.insert(target, nodes);
|
||||
}
|
||||
|
||||
@@ -195,8 +195,8 @@ pub enum MockEvent {
|
||||
|
||||
/// Command for interacting with the `MockDiscovery` service
|
||||
pub enum MockCommand {
|
||||
MockPong { node_id: NodeId },
|
||||
MockNeighbours { target: NodeId, nodes: Vec<NodeRecord> },
|
||||
MockPong { node_id: PeerId },
|
||||
MockNeighbours { target: PeerId, nodes: Vec<NodeRecord> },
|
||||
}
|
||||
|
||||
/// Creates a new testing instance for [`Discv4`] and its service
|
||||
@@ -209,7 +209,7 @@ pub async fn create_discv4_with_config(config: Discv4Config) -> (Discv4, Discv4S
|
||||
let mut rng = thread_rng();
|
||||
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
|
||||
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||
let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let external_addr = public_ip::addr().await.unwrap_or_else(|| socket.ip());
|
||||
let local_enr =
|
||||
NodeRecord { address: external_addr, tcp_port: socket.port(), udp_port: socket.port(), id };
|
||||
@@ -231,21 +231,21 @@ pub fn rng_endpoint(rng: &mut impl Rng) -> NodeEndpoint {
|
||||
|
||||
pub fn rng_record(rng: &mut impl RngCore) -> NodeRecord {
|
||||
let NodeEndpoint { address, udp_port, tcp_port } = rng_endpoint(rng);
|
||||
NodeRecord { address, tcp_port, udp_port, id: NodeId::random() }
|
||||
NodeRecord { address, tcp_port, udp_port, id: PeerId::random() }
|
||||
}
|
||||
|
||||
pub fn rng_ipv6_record(rng: &mut impl RngCore) -> NodeRecord {
|
||||
let mut ip = [0u8; 16];
|
||||
rng.fill_bytes(&mut ip);
|
||||
let address = IpAddr::V6(ip.into());
|
||||
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: NodeId::random() }
|
||||
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() }
|
||||
}
|
||||
|
||||
pub fn rng_ipv4_record(rng: &mut impl RngCore) -> NodeRecord {
|
||||
let mut ip = [0u8; 4];
|
||||
rng.fill_bytes(&mut ip);
|
||||
let address = IpAddr::V4(ip.into());
|
||||
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: NodeId::random() }
|
||||
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() }
|
||||
}
|
||||
|
||||
pub fn rng_message(rng: &mut impl RngCore) -> Message {
|
||||
@@ -256,7 +256,7 @@ pub fn rng_message(rng: &mut impl RngCore) -> Message {
|
||||
expire: rng.gen(),
|
||||
}),
|
||||
2 => Message::Pong(Pong { to: rng_endpoint(rng), echo: H256::random(), expire: rng.gen() }),
|
||||
3 => Message::FindNode(FindNode { id: NodeId::random(), expire: rng.gen() }),
|
||||
3 => Message::FindNode(FindNode { id: PeerId::random(), expire: rng.gen() }),
|
||||
4 => {
|
||||
let num: usize = rng.gen_range(1..=SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS);
|
||||
Message::Neighbours(Neighbours {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::{proto::Octets, NodeId};
|
||||
use crate::{proto::Octets, PeerId};
|
||||
use bytes::{Buf, BufMut};
|
||||
use generic_array::GenericArray;
|
||||
use reth_primitives::keccak256;
|
||||
@@ -13,10 +13,10 @@ use url::{Host, Url};
|
||||
|
||||
/// The key type for the table.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub(crate) struct NodeKey(pub(crate) NodeId);
|
||||
pub(crate) struct NodeKey(pub(crate) PeerId);
|
||||
|
||||
impl From<NodeId> for NodeKey {
|
||||
fn from(value: NodeId) -> Self {
|
||||
impl From<PeerId> for NodeKey {
|
||||
fn from(value: PeerId) -> Self {
|
||||
NodeKey(value)
|
||||
}
|
||||
}
|
||||
@@ -29,9 +29,9 @@ impl From<NodeKey> for discv5::Key<NodeKey> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a `NodeId` into the required `Key` type for the table
|
||||
/// Converts a `PeerId` into the required `Key` type for the table
|
||||
#[inline]
|
||||
pub(crate) fn kad_key(node: NodeId) -> discv5::Key<NodeKey> {
|
||||
pub(crate) fn kad_key(node: PeerId) -> discv5::Key<NodeKey> {
|
||||
discv5::kbucket::Key::from(NodeKey::from(node))
|
||||
}
|
||||
|
||||
@@ -45,20 +45,20 @@ pub struct NodeRecord {
|
||||
/// UDP discovery port.
|
||||
pub udp_port: u16,
|
||||
/// Public key of the discovery service
|
||||
pub id: NodeId,
|
||||
pub id: PeerId,
|
||||
}
|
||||
|
||||
impl NodeRecord {
|
||||
/// Derive the [`NodeRecord`] from the secret key and addr
|
||||
pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self {
|
||||
let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk);
|
||||
let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
Self::new(addr, id)
|
||||
}
|
||||
|
||||
/// Creates a new record
|
||||
#[allow(unused)]
|
||||
pub(crate) fn new(addr: SocketAddr, id: NodeId) -> Self {
|
||||
pub(crate) fn new(addr: SocketAddr, id: PeerId) -> Self {
|
||||
Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id }
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ impl FromStr for NodeRecord {
|
||||
|
||||
let id = url
|
||||
.username()
|
||||
.parse::<NodeId>()
|
||||
.parse::<PeerId>()
|
||||
.map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?;
|
||||
|
||||
Ok(Self { address, id, tcp_port: port, udp_port: port })
|
||||
@@ -126,7 +126,7 @@ impl Encodable for NodeRecord {
|
||||
octets: Octets,
|
||||
udp_port: u16,
|
||||
tcp_port: u16,
|
||||
id: NodeId,
|
||||
id: PeerId,
|
||||
}
|
||||
|
||||
let octets = match self.address {
|
||||
@@ -185,7 +185,7 @@ mod tests {
|
||||
address: IpAddr::V4(ip.into()),
|
||||
tcp_port: rng.gen(),
|
||||
udp_port: rng.gen(),
|
||||
id: NodeId::random(),
|
||||
id: PeerId::random(),
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
@@ -206,7 +206,7 @@ mod tests {
|
||||
address: IpAddr::V6(ip.into()),
|
||||
tcp_port: rng.gen(),
|
||||
udp_port: rng.gen(),
|
||||
id: NodeId::random(),
|
||||
id: PeerId::random(),
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::{error::DecodePacketError, node::NodeRecord, NodeId, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
|
||||
use crate::{error::DecodePacketError, node::NodeRecord, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use reth_primitives::{keccak256, H256};
|
||||
use reth_rlp::{Decodable, DecodeError, Encodable, Header};
|
||||
@@ -136,7 +136,7 @@ impl Message {
|
||||
let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_bytes())?;
|
||||
|
||||
let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?;
|
||||
let node_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
|
||||
let msg_type = packet[97];
|
||||
let payload = &mut &packet[98..];
|
||||
@@ -156,7 +156,7 @@ impl Message {
|
||||
#[derive(Debug)]
|
||||
pub struct Packet {
|
||||
pub msg: Message,
|
||||
pub node_id: NodeId,
|
||||
pub node_id: PeerId,
|
||||
pub hash: H256,
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ impl From<NodeRecord> for NodeEndpoint {
|
||||
/// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03).).
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)]
|
||||
pub struct FindNode {
|
||||
pub id: NodeId,
|
||||
pub id: PeerId,
|
||||
pub expire: u64,
|
||||
}
|
||||
|
||||
@@ -499,7 +499,7 @@ mod tests {
|
||||
for _ in 0..100 {
|
||||
let msg = rng_message(&mut rng);
|
||||
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
|
||||
let sender_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let sender_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
|
||||
let (buf, _) = msg.encode(&secret_key);
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ pub enum EgressECIESValue {
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
/// Raw ingress values for an ECIES protocol
|
||||
pub enum IngressECIESValue {
|
||||
/// Receiving a message from a [`peerId`]
|
||||
/// Receiving a message from a [`PeerId`]
|
||||
AuthReceive(PeerId),
|
||||
/// Receiving an ACK message
|
||||
Ack,
|
||||
|
||||
@@ -17,6 +17,9 @@ reth-ecies = { path = "../ecies" }
|
||||
reth-primitives = { path = "../../primitives" }
|
||||
reth-rlp = { path = "../../common/rlp", features = ["alloc", "derive", "std", "ethereum-types", "smol_str"] }
|
||||
|
||||
# used for Chain and builders
|
||||
ethers-core = { git = "https://github.com/gakonst/ethers-rs", default-features = false }
|
||||
|
||||
#used for forkid
|
||||
crc = "1"
|
||||
maplit = "1"
|
||||
|
||||
145
crates/net/eth-wire/src/builder.rs
Normal file
145
crates/net/eth-wire/src/builder.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
//! Builder structs for [`Status`](crate::types::Status) and [`Hello`](crate::types::Hello)
|
||||
//! messages.
|
||||
|
||||
use crate::{
|
||||
capability::Capability,
|
||||
p2pstream::{HelloMessage, ProtocolVersion},
|
||||
EthVersion, Status,
|
||||
};
|
||||
use reth_primitives::{Chain, ForkId, PeerId, H256, U256};
|
||||
|
||||
/// Builder for [`Status`](crate::types::Status) messages.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use reth_eth_wire::EthVersion;
|
||||
/// use reth_primitives::{Chain, U256, H256, MAINNET_GENESIS, Hardfork};
|
||||
/// use reth_eth_wire::types::Status;
|
||||
///
|
||||
/// // this is just an example status message!
|
||||
/// let status = Status::builder()
|
||||
/// .version(EthVersion::Eth66.into())
|
||||
/// .chain(Chain::Named(ethers_core::types::Chain::Mainnet))
|
||||
/// .total_difficulty(U256::from(100))
|
||||
/// .blockhash(H256::from(MAINNET_GENESIS))
|
||||
/// .genesis(H256::from(MAINNET_GENESIS))
|
||||
/// .forkid(Hardfork::Latest.fork_id())
|
||||
/// .build();
|
||||
///
|
||||
/// assert_eq!(
|
||||
/// status,
|
||||
/// Status {
|
||||
/// version: EthVersion::Eth66.into(),
|
||||
/// chain: Chain::Named(ethers_core::types::Chain::Mainnet),
|
||||
/// total_difficulty: U256::from(100),
|
||||
/// blockhash: H256::from(MAINNET_GENESIS),
|
||||
/// genesis: H256::from(MAINNET_GENESIS),
|
||||
/// forkid: Hardfork::Latest.fork_id(),
|
||||
/// }
|
||||
/// );
|
||||
/// ```
|
||||
#[derive(Debug, Default)]
|
||||
pub struct StatusBuilder {
|
||||
status: Status,
|
||||
}
|
||||
|
||||
impl StatusBuilder {
|
||||
/// Consumes the type and creates the actual [`Status`](crate::types::Status) message.
|
||||
pub fn build(self) -> Status {
|
||||
self.status
|
||||
}
|
||||
|
||||
/// Sets the protocol version.
|
||||
pub fn version(mut self, version: u8) -> Self {
|
||||
self.status.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the chain id.
|
||||
pub fn chain(mut self, chain: Chain) -> Self {
|
||||
self.status.chain = chain;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the total difficulty.
|
||||
pub fn total_difficulty(mut self, total_difficulty: U256) -> Self {
|
||||
self.status.total_difficulty = total_difficulty;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the block hash.
|
||||
pub fn blockhash(mut self, blockhash: H256) -> Self {
|
||||
self.status.blockhash = blockhash;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the genesis hash.
|
||||
pub fn genesis(mut self, genesis: H256) -> Self {
|
||||
self.status.genesis = genesis;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the fork id.
|
||||
pub fn forkid(mut self, forkid: ForkId) -> Self {
|
||||
self.status.forkid = forkid;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`Hello`](crate::types::Hello) messages.
|
||||
pub struct HelloBuilder {
|
||||
hello: HelloMessage,
|
||||
}
|
||||
|
||||
impl HelloBuilder {
|
||||
/// Creates a new [`HelloBuilder`](crate::builder::HelloBuilder) with default [`Hello`] values,
|
||||
/// and a `PeerId` corresponding to the given pubkey.
|
||||
pub fn new(pubkey: PeerId) -> Self {
|
||||
Self {
|
||||
hello: HelloMessage {
|
||||
protocol_version: ProtocolVersion::V5,
|
||||
// TODO: proper client versioning
|
||||
client_version: "Ethereum/1.0.0".to_string(),
|
||||
capabilities: vec![EthVersion::Eth67.into()],
|
||||
// TODO: default port config
|
||||
port: 30303,
|
||||
id: pubkey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the type and creates the actual [`Hello`](crate::types::Hello) message.
|
||||
pub fn build(self) -> HelloMessage {
|
||||
self.hello
|
||||
}
|
||||
|
||||
/// Sets the protocol version.
|
||||
pub fn protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {
|
||||
self.hello.protocol_version = protocol_version;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the client version.
|
||||
pub fn client_version(mut self, client_version: String) -> Self {
|
||||
self.hello.client_version = client_version;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the capabilities.
|
||||
pub fn capabilities(mut self, capabilities: Vec<Capability>) -> Self {
|
||||
self.hello.capabilities = capabilities;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the port.
|
||||
pub fn port(mut self, port: u16) -> Self {
|
||||
self.hello.port = port;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the node id.
|
||||
pub fn id(mut self, id: PeerId) -> Self {
|
||||
self.hello.id = id;
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -91,6 +91,16 @@ impl Capabilities {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Capability>> for Capabilities {
|
||||
fn from(value: Vec<Capability>) -> Self {
|
||||
Self {
|
||||
eth_66: value.iter().any(Capability::is_eth_v66),
|
||||
eth_67: value.iter().any(Capability::is_eth_v67),
|
||||
inner: value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for Capabilities {
|
||||
fn encode(&self, out: &mut dyn BufMut) {
|
||||
self.inner.encode(out)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
//! Error cases when handling a [`crate::EthStream`]
|
||||
use std::io;
|
||||
|
||||
use reth_primitives::{Chain, H256};
|
||||
use reth_primitives::{Chain, ValidationError, H256};
|
||||
|
||||
use crate::{capability::SharedCapabilityError, types::forkid::ValidationError};
|
||||
use crate::capability::SharedCapabilityError;
|
||||
|
||||
/// Errors when sending/receiving messages
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use crate::{
|
||||
error::{EthStreamError, HandshakeError},
|
||||
types::{forkid::ForkFilter, EthMessage, ProtocolMessage, Status},
|
||||
types::{EthMessage, ProtocolMessage, Status},
|
||||
};
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{ready, Sink, SinkExt, StreamExt};
|
||||
use pin_project::pin_project;
|
||||
use reth_primitives::ForkFilter;
|
||||
use reth_rlp::{Decodable, Encodable};
|
||||
use std::{
|
||||
pin::Pin,
|
||||
@@ -117,6 +118,7 @@ where
|
||||
/// An `EthStream` wraps over any `Stream` that yields bytes and makes it
|
||||
/// compatible with eth-networking protocol messages, which get RLP encoded/decoded.
|
||||
#[pin_project]
|
||||
#[derive(Debug)]
|
||||
pub struct EthStream<S> {
|
||||
#[pin]
|
||||
inner: S,
|
||||
@@ -203,7 +205,7 @@ where
|
||||
mod tests {
|
||||
use crate::{
|
||||
p2pstream::{HelloMessage, ProtocolVersion, UnauthedP2PStream},
|
||||
types::{broadcast::BlockHashNumber, forkid::ForkFilter, EthMessage, Status},
|
||||
types::{broadcast::BlockHashNumber, EthMessage, Status},
|
||||
EthStream, PassthroughCodec,
|
||||
};
|
||||
use futures::{SinkExt, StreamExt};
|
||||
@@ -214,7 +216,7 @@ mod tests {
|
||||
|
||||
use crate::{capability::Capability, types::EthVersion};
|
||||
use ethers_core::types::Chain;
|
||||
use reth_primitives::{H256, U256};
|
||||
use reth_primitives::{ForkFilter, H256, U256};
|
||||
|
||||
use super::UnauthedEthStream;
|
||||
|
||||
|
||||
@@ -11,12 +11,17 @@
|
||||
pub use tokio_util::codec::{
|
||||
LengthDelimitedCodec as PassthroughCodec, LengthDelimitedCodecError as PassthroughCodecError,
|
||||
};
|
||||
pub mod builder;
|
||||
pub mod capability;
|
||||
pub mod error;
|
||||
mod ethstream;
|
||||
mod p2pstream;
|
||||
mod pinger;
|
||||
pub use builder::*;
|
||||
pub mod types;
|
||||
pub use types::*;
|
||||
|
||||
pub use ethstream::{EthStream, UnauthedEthStream};
|
||||
pub use crate::{
|
||||
ethstream::{EthStream, UnauthedEthStream},
|
||||
p2pstream::{HelloMessage, P2PStream, UnauthedP2PStream},
|
||||
};
|
||||
|
||||
@@ -138,6 +138,7 @@ where
|
||||
/// A P2PStream wraps over any `Stream` that yields bytes and makes it compatible with `p2p`
|
||||
/// protocol messages.
|
||||
#[pin_project]
|
||||
#[derive(Debug)]
|
||||
pub struct P2PStream<S> {
|
||||
#[pin]
|
||||
inner: S,
|
||||
|
||||
@@ -1,59 +1,8 @@
|
||||
//! Implements the `GetBlockHeaders`, `GetBlockBodies`, `BlockHeaders`, and `BlockBodies` message
|
||||
//! types.
|
||||
use reth_primitives::{Header, TransactionSigned, H256};
|
||||
use reth_rlp::{
|
||||
Decodable, DecodeError, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable,
|
||||
RlpEncodableWrapper,
|
||||
};
|
||||
|
||||
use super::RawBlockBody;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
/// Either a block hash _or_ a block number
|
||||
pub enum BlockHashOrNumber {
|
||||
/// A block hash
|
||||
Hash(H256),
|
||||
/// A block number
|
||||
Number(u64),
|
||||
}
|
||||
|
||||
/// Allows for RLP encoding of either a block hash or block number
|
||||
impl Encodable for BlockHashOrNumber {
|
||||
fn length(&self) -> usize {
|
||||
match self {
|
||||
Self::Hash(block_hash) => block_hash.length(),
|
||||
Self::Number(block_number) => block_number.length(),
|
||||
}
|
||||
}
|
||||
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
||||
match self {
|
||||
Self::Hash(block_hash) => block_hash.encode(out),
|
||||
Self::Number(block_number) => block_number.encode(out),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows for RLP decoding of a block hash or block number
|
||||
impl Decodable for BlockHashOrNumber {
|
||||
fn decode(buf: &mut &[u8]) -> Result<Self, DecodeError> {
|
||||
let header: u8 = *buf.first().ok_or(DecodeError::InputTooShort)?;
|
||||
// if the byte string is exactly 32 bytes, decode it into a Hash
|
||||
// 0xa0 = 0x80 (start of string) + 0x20 (32, length of string)
|
||||
if header == 0xa0 {
|
||||
// strip the first byte, parsing the rest of the string.
|
||||
// If the rest of the string fails to decode into 32 bytes, we'll bubble up the
|
||||
// decoding error.
|
||||
let hash = H256::decode(buf)?;
|
||||
Ok(Self::Hash(hash))
|
||||
} else {
|
||||
// a block number when encoded as bytes ranges from 0 to any number of bytes - we're
|
||||
// going to accept numbers which fit in less than 64 bytes.
|
||||
// Any data larger than this which is not caught by the Hash decoding should error and
|
||||
// is considered an invalid block number.
|
||||
Ok(Self::Number(u64::decode(buf)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
use reth_primitives::{BlockHashOrNumber, Header, TransactionSigned, H256};
|
||||
use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper};
|
||||
|
||||
/// A request for a peer to return block headers starting at the requested block.
|
||||
/// The peer must return at most [`limit`](#structfield.limit) headers.
|
||||
@@ -108,6 +57,7 @@ impl From<Vec<H256>> for GetBlockBodies {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(onbjerg): We should have this type in primitives
|
||||
/// A response to [`GetBlockBodies`], containing bodies if any bodies were found.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||
pub struct BlockBody {
|
||||
@@ -151,11 +101,11 @@ mod test {
|
||||
};
|
||||
use hex_literal::hex;
|
||||
use reth_primitives::{
|
||||
Header, Signature, Transaction, TransactionKind, TransactionSigned, U256,
|
||||
BlockHashOrNumber, Header, Signature, Transaction, TransactionKind, TransactionSigned, U256,
|
||||
};
|
||||
use reth_rlp::{Decodable, Encodable};
|
||||
|
||||
use super::{BlockBody, BlockHashOrNumber};
|
||||
use super::BlockBody;
|
||||
|
||||
#[test]
|
||||
fn decode_hash() {
|
||||
|
||||
@@ -10,6 +10,20 @@ pub struct NewBlockHashes(
|
||||
pub Vec<BlockHashNumber>,
|
||||
);
|
||||
|
||||
// === impl NewBlockHashes ===
|
||||
|
||||
impl NewBlockHashes {
|
||||
/// Returns the latest block in the list of blocks.
|
||||
pub fn latest(&self) -> Option<&BlockHashNumber> {
|
||||
self.0.iter().fold(None, |latest, block| {
|
||||
if let Some(latest) = latest {
|
||||
return if latest.number > block.number { Some(latest) } else { Some(block) }
|
||||
}
|
||||
Some(block)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A block hash _and_ a block number.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||
pub struct BlockHashNumber {
|
||||
@@ -87,3 +101,20 @@ impl From<Vec<H256>> for NewPooledTransactionHashes {
|
||||
NewPooledTransactionHashes(v)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn can_return_latest_block() {
|
||||
let mut blocks = NewBlockHashes(vec![BlockHashNumber { hash: H256::random(), number: 0 }]);
|
||||
let latest = blocks.latest().unwrap();
|
||||
assert_eq!(latest.number, 0);
|
||||
|
||||
blocks.0.push(BlockHashNumber { hash: H256::random(), number: 100 });
|
||||
blocks.0.push(BlockHashNumber { hash: H256::random(), number: 2 });
|
||||
let latest = blocks.latest().unwrap();
|
||||
assert_eq!(latest.number, 100);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ pub use status::Status;
|
||||
pub mod version;
|
||||
pub use version::EthVersion;
|
||||
|
||||
pub mod forkid;
|
||||
|
||||
pub mod message;
|
||||
pub use message::{EthMessage, EthMessageID, ProtocolMessage};
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use super::forkid::ForkId;
|
||||
use reth_primitives::{Chain, H256, U256};
|
||||
use crate::{EthVersion, StatusBuilder};
|
||||
|
||||
use reth_primitives::{Chain, ForkId, Hardfork, H256, MAINNET_GENESIS, U256};
|
||||
use reth_rlp::{RlpDecodable, RlpEncodable};
|
||||
use std::fmt::{Debug, Display};
|
||||
|
||||
@@ -37,6 +38,13 @@ pub struct Status {
|
||||
pub forkid: ForkId,
|
||||
}
|
||||
|
||||
impl Status {
|
||||
/// Helper for returning a builder for the status message.
|
||||
pub fn builder() -> StatusBuilder {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Status {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let hexed_blockhash = hex::encode(self.blockhash);
|
||||
@@ -84,18 +92,28 @@ impl Debug for Status {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Status {
|
||||
fn default() -> Self {
|
||||
Status {
|
||||
version: EthVersion::Eth67 as u8,
|
||||
chain: Chain::Named(ethers_core::types::Chain::Mainnet),
|
||||
total_difficulty: U256::zero(),
|
||||
blockhash: MAINNET_GENESIS,
|
||||
genesis: MAINNET_GENESIS,
|
||||
forkid: Hardfork::Homestead.fork_id(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ethers_core::types::Chain as NamedChain;
|
||||
use hex_literal::hex;
|
||||
use reth_primitives::{Chain, H256, U256};
|
||||
use reth_primitives::{Chain, ForkHash, ForkId, H256, U256};
|
||||
use reth_rlp::{Decodable, Encodable};
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::types::{
|
||||
forkid::{ForkHash, ForkId},
|
||||
EthVersion, Status,
|
||||
};
|
||||
use crate::types::{EthVersion, Status};
|
||||
|
||||
#[test]
|
||||
fn encode_eth_status_message() {
|
||||
|
||||
@@ -5,10 +5,11 @@ use reth_interfaces::{
|
||||
consensus::Consensus,
|
||||
p2p::headers::{
|
||||
client::{HeadersClient, HeadersStream},
|
||||
downloader::{DownloadError, Downloader},
|
||||
downloader::HeaderDownloader,
|
||||
error::DownloadError,
|
||||
},
|
||||
};
|
||||
use reth_primitives::{rpc::BlockId, SealedHeader};
|
||||
use reth_primitives::SealedHeader;
|
||||
use reth_rpc_types::engine::ForkchoiceState;
|
||||
|
||||
/// Download headers in batches
|
||||
@@ -27,7 +28,7 @@ pub struct LinearDownloader<C, H> {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<C: Consensus, H: HeadersClient> Downloader for LinearDownloader<C, H> {
|
||||
impl<C: Consensus, H: HeadersClient> HeaderDownloader for LinearDownloader<C, H> {
|
||||
type Consensus = C;
|
||||
type Client = H;
|
||||
|
||||
@@ -100,8 +101,7 @@ impl<C: Consensus, H: HeadersClient> LinearDownloader<C, H> {
|
||||
) -> Result<LinearDownloadResult, DownloadError> {
|
||||
// Request headers starting from tip or earliest cached
|
||||
let start = earliest.map_or(forkchoice.head_block_hash, |h| h.parent_hash);
|
||||
let mut headers =
|
||||
self.download_headers(stream, BlockId::Hash(start), self.batch_size).await?;
|
||||
let mut headers = self.download_headers(stream, start.into(), self.batch_size).await?;
|
||||
headers.sort_unstable_by_key(|h| h.number);
|
||||
|
||||
let mut out = Vec::with_capacity(headers.len());
|
||||
@@ -161,11 +161,6 @@ impl Default for LinearDownloadBuilder {
|
||||
}
|
||||
|
||||
impl LinearDownloadBuilder {
|
||||
/// Initialize a new builder
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Set the request batch size
|
||||
pub fn batch_size(mut self, size: u64) -> Self {
|
||||
self.batch_size = size;
|
||||
@@ -207,10 +202,11 @@ mod tests {
|
||||
use reth_interfaces::{
|
||||
p2p::headers::client::HeadersRequest,
|
||||
test_utils::{
|
||||
gen_random_header, gen_random_header_range, TestConsensus, TestHeadersClient,
|
||||
generators::{random_header, random_header_range},
|
||||
TestConsensus, TestHeadersClient,
|
||||
},
|
||||
};
|
||||
use reth_primitives::{rpc::BlockId, SealedHeader};
|
||||
use reth_primitives::{BlockHashOrNumber, SealedHeader};
|
||||
|
||||
use assert_matches::assert_matches;
|
||||
use once_cell::sync::Lazy;
|
||||
@@ -233,7 +229,7 @@ mod tests {
|
||||
let retries = 5;
|
||||
let (tx, rx) = oneshot::channel();
|
||||
tokio::spawn(async move {
|
||||
let downloader = LinearDownloadBuilder::new()
|
||||
let downloader = LinearDownloadBuilder::default()
|
||||
.retries(retries)
|
||||
.build(CONSENSUS.clone(), CLIENT.clone());
|
||||
let result =
|
||||
@@ -257,7 +253,7 @@ mod tests {
|
||||
let retries = 5;
|
||||
let (tx, rx) = oneshot::channel();
|
||||
tokio::spawn(async move {
|
||||
let downloader = LinearDownloadBuilder::new()
|
||||
let downloader = LinearDownloadBuilder::default()
|
||||
.retries(retries)
|
||||
.build(CONSENSUS.clone(), CLIENT.clone());
|
||||
let result =
|
||||
@@ -286,14 +282,14 @@ mod tests {
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn download_propagates_consensus_validation_error() {
|
||||
let tip_parent = gen_random_header(1, None);
|
||||
let tip = gen_random_header(2, Some(tip_parent.hash()));
|
||||
let tip_parent = random_header(1, None);
|
||||
let tip = random_header(2, Some(tip_parent.hash()));
|
||||
let tip_hash = tip.hash();
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
tokio::spawn(async move {
|
||||
let downloader =
|
||||
LinearDownloadBuilder::new().build(CONSENSUS_FAIL.clone(), CLIENT.clone());
|
||||
LinearDownloadBuilder::default().build(CONSENSUS_FAIL.clone(), CLIENT.clone());
|
||||
let forkchoice = ForkchoiceState { head_block_hash: tip_hash, ..Default::default() };
|
||||
let result = downloader.download(&SealedHeader::default(), &forkchoice).await;
|
||||
tx.send(result).expect("failed to forward download response");
|
||||
@@ -304,7 +300,7 @@ mod tests {
|
||||
assert_matches!(
|
||||
request,
|
||||
Some((_, HeadersRequest { start, .. }))
|
||||
if matches!(start, BlockId::Hash(hash) if *hash == tip_hash)
|
||||
if matches!(start, BlockHashOrNumber::Hash(hash) if *hash == tip_hash)
|
||||
);
|
||||
|
||||
let request = request.unwrap();
|
||||
@@ -322,14 +318,15 @@ mod tests {
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn download_starts_with_chain_tip() {
|
||||
let head = gen_random_header(1, None);
|
||||
let tip = gen_random_header(2, Some(head.hash()));
|
||||
let head = random_header(1, None);
|
||||
let tip = random_header(2, Some(head.hash()));
|
||||
|
||||
let tip_hash = tip.hash();
|
||||
let chain_head = head.clone();
|
||||
let (tx, mut rx) = oneshot::channel();
|
||||
tokio::spawn(async move {
|
||||
let downloader = LinearDownloadBuilder::new().build(CONSENSUS.clone(), CLIENT.clone());
|
||||
let downloader =
|
||||
LinearDownloadBuilder::default().build(CONSENSUS.clone(), CLIENT.clone());
|
||||
let forkchoice = ForkchoiceState { head_block_hash: tip_hash, ..Default::default() };
|
||||
let result = downloader.download(&chain_head, &forkchoice).await;
|
||||
tx.send(result).expect("failed to forward download response");
|
||||
@@ -359,15 +356,16 @@ mod tests {
|
||||
#[serial]
|
||||
async fn download_returns_headers_desc() {
|
||||
let (start, end) = (100, 200);
|
||||
let head = gen_random_header(start, None);
|
||||
let mut headers = gen_random_header_range(start + 1..end, head.hash());
|
||||
let head = random_header(start, None);
|
||||
let mut headers = random_header_range(start + 1..end, head.hash());
|
||||
headers.reverse();
|
||||
|
||||
let tip_hash = headers.first().unwrap().hash();
|
||||
let chain_head = head.clone();
|
||||
let (tx, rx) = oneshot::channel();
|
||||
tokio::spawn(async move {
|
||||
let downloader = LinearDownloadBuilder::new().build(CONSENSUS.clone(), CLIENT.clone());
|
||||
let downloader =
|
||||
LinearDownloadBuilder::default().build(CONSENSUS.clone(), CLIENT.clone());
|
||||
let forkchoice = ForkchoiceState { head_block_hash: tip_hash, ..Default::default() };
|
||||
let result = downloader.download(&chain_head, &forkchoice).await;
|
||||
tx.send(result).expect("failed to forward download response");
|
||||
|
||||
@@ -35,6 +35,7 @@ parking_lot = "0.12"
|
||||
async-trait = "0.1"
|
||||
bytes = "1.2"
|
||||
either = "1.8"
|
||||
linked_hash_set = "0.1"
|
||||
|
||||
secp256k1 = { version = "0.24", features = [
|
||||
"global-context",
|
||||
|
||||
58
crates/net/network/src/cache.rs
Normal file
58
crates/net/network/src/cache.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use linked_hash_set::LinkedHashSet;
|
||||
use std::{borrow::Borrow, hash::Hash, num::NonZeroUsize};
|
||||
|
||||
/// A minimal LRU cache based on a `LinkedHashSet` with limited capacity.
|
||||
///
|
||||
/// If the length exceeds the set capacity, the oldest element will be removed
|
||||
/// In the limit, for each element inserted the oldest existing element will be removed.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LruCache<T: Hash + Eq> {
|
||||
limit: NonZeroUsize,
|
||||
inner: LinkedHashSet<T>,
|
||||
}
|
||||
|
||||
impl<T: Hash + Eq> LruCache<T> {
|
||||
/// Creates a new `LruCache` using the given limit
|
||||
pub fn new(limit: NonZeroUsize) -> Self {
|
||||
Self { inner: LinkedHashSet::new(), limit }
|
||||
}
|
||||
|
||||
/// Insert an element into the set.
|
||||
///
|
||||
/// If the element is new (did not exist before [`LruCache::insert()`]) was called, then the
|
||||
/// given length will be enforced and the oldest element will be removed if the limit was
|
||||
/// exceeded.
|
||||
///
|
||||
/// If the set did not have this value present, true is returned.
|
||||
/// If the set did have this value present, false is returned.
|
||||
pub fn insert(&mut self, entry: T) -> bool {
|
||||
if self.inner.insert(entry) {
|
||||
if self.limit.get() == self.inner.len() {
|
||||
// remove the oldest element in the set
|
||||
self.inner.pop_front();
|
||||
}
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns `true` if the set contains a value.
|
||||
pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
|
||||
where
|
||||
T: Borrow<Q>,
|
||||
Q: Hash + Eq,
|
||||
{
|
||||
self.inner.contains(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Extend<T> for LruCache<T>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
{
|
||||
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
|
||||
for item in iter.into_iter() {
|
||||
self.insert(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
use crate::{peers::PeersConfig, session::SessionsConfig};
|
||||
use crate::{
|
||||
import::{BlockImport, NoopBlockImport},
|
||||
peers::PeersConfig,
|
||||
session::SessionsConfig,
|
||||
};
|
||||
use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT};
|
||||
use reth_eth_wire::forkid::ForkId;
|
||||
use reth_primitives::{Chain, H256};
|
||||
use reth_primitives::{Chain, ForkId, H256};
|
||||
use secp256k1::SecretKey;
|
||||
use std::{
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
||||
@@ -31,6 +34,8 @@ pub struct NetworkConfig<C> {
|
||||
pub chain: Chain,
|
||||
/// Genesis hash of the network
|
||||
pub genesis_hash: H256,
|
||||
/// The block importer type.
|
||||
pub block_import: Box<dyn BlockImport>,
|
||||
}
|
||||
|
||||
// === impl NetworkConfig ===
|
||||
@@ -76,9 +81,15 @@ pub struct NetworkConfigBuilder<C> {
|
||||
peers_config: Option<PeersConfig>,
|
||||
/// How to configure the sessions manager
|
||||
sessions_config: Option<SessionsConfig>,
|
||||
/// A fork identifier as defined by EIP-2124.
|
||||
/// Serves as the chain compatibility identifier.
|
||||
fork_id: Option<ForkId>,
|
||||
/// The network's chain id
|
||||
chain: Chain,
|
||||
/// Network genesis hash
|
||||
genesis_hash: H256,
|
||||
/// The block importer type.
|
||||
block_import: Box<dyn BlockImport>,
|
||||
}
|
||||
|
||||
// === impl NetworkConfigBuilder ===
|
||||
@@ -97,6 +108,7 @@ impl<C> NetworkConfigBuilder<C> {
|
||||
fork_id: None,
|
||||
chain: Chain::Named(reth_primitives::rpc::Chain::Mainnet),
|
||||
genesis_hash: Default::default(),
|
||||
block_import: Box::<NoopBlockImport>::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,6 +118,12 @@ impl<C> NetworkConfigBuilder<C> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the [`BlockImport`] type to configure.
|
||||
pub fn block_import<T: BlockImport + 'static>(mut self, block_import: T) -> Self {
|
||||
self.block_import = Box::new(block_import);
|
||||
self
|
||||
}
|
||||
|
||||
/// Consumes the type and creates the actual [`NetworkConfig`]
|
||||
pub fn build(self) -> NetworkConfig<C> {
|
||||
let Self {
|
||||
@@ -119,6 +137,7 @@ impl<C> NetworkConfigBuilder<C> {
|
||||
fork_id,
|
||||
chain,
|
||||
genesis_hash,
|
||||
block_import,
|
||||
} = self;
|
||||
NetworkConfig {
|
||||
client,
|
||||
@@ -135,6 +154,7 @@ impl<C> NetworkConfigBuilder<C> {
|
||||
fork_id,
|
||||
chain,
|
||||
genesis_hash,
|
||||
block_import,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
//! Discovery support for the network.
|
||||
|
||||
use crate::{error::NetworkError, NodeId};
|
||||
use crate::error::NetworkError;
|
||||
use futures::StreamExt;
|
||||
use reth_discv4::{Discv4, Discv4Config, NodeRecord, TableUpdate};
|
||||
use reth_primitives::PeerId;
|
||||
use secp256k1::SecretKey;
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap, VecDeque},
|
||||
@@ -19,7 +20,7 @@ pub struct Discovery {
|
||||
/// All nodes discovered via discovery protocol.
|
||||
///
|
||||
/// These nodes can be ephemeral and are updated via the discovery protocol.
|
||||
discovered_nodes: HashMap<NodeId, SocketAddr>,
|
||||
discovered_nodes: HashMap<PeerId, SocketAddr>,
|
||||
/// Local ENR of the discovery service.
|
||||
local_enr: NodeRecord,
|
||||
/// Handler to interact with the Discovery v4 service
|
||||
@@ -66,12 +67,12 @@ impl Discovery {
|
||||
}
|
||||
|
||||
/// Returns the id with which the local identifies itself in the network
|
||||
pub(crate) fn local_id(&self) -> NodeId {
|
||||
pub(crate) fn local_id(&self) -> PeerId {
|
||||
self.local_enr.id
|
||||
}
|
||||
|
||||
/// Manually adds an address to the set.
|
||||
pub(crate) fn add_known_address(&mut self, node_id: NodeId, addr: SocketAddr) {
|
||||
pub(crate) fn add_known_address(&mut self, node_id: PeerId, addr: SocketAddr) {
|
||||
self.on_discv4_update(TableUpdate::Added(NodeRecord {
|
||||
address: addr.ip(),
|
||||
tcp_port: addr.port(),
|
||||
@@ -81,7 +82,7 @@ impl Discovery {
|
||||
}
|
||||
|
||||
/// Returns all nodes we know exist in the network.
|
||||
pub fn known_nodes(&mut self) -> &HashMap<NodeId, SocketAddr> {
|
||||
pub fn known_nodes(&mut self) -> &HashMap<PeerId, SocketAddr> {
|
||||
&self.discovered_nodes
|
||||
}
|
||||
|
||||
@@ -131,7 +132,7 @@ impl Discovery {
|
||||
/// Events produced by the [`Discovery`] manager.
|
||||
pub enum DiscoveryEvent {
|
||||
/// A new node was discovered
|
||||
Discovered(NodeId, SocketAddr),
|
||||
Discovered(PeerId, SocketAddr),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
//! Fetch data from the network.
|
||||
|
||||
use crate::{message::BlockRequest, NodeId};
|
||||
use crate::{message::BlockRequest, peers::ReputationChange};
|
||||
use futures::StreamExt;
|
||||
use reth_eth_wire::{BlockBody, EthMessage};
|
||||
use reth_interfaces::p2p::{error::RequestResult, headers::client::HeadersRequest};
|
||||
use reth_primitives::{Header, H256, U256};
|
||||
use reth_eth_wire::{BlockBody, GetBlockBodies};
|
||||
use reth_interfaces::p2p::{
|
||||
error::{RequestError, RequestResult},
|
||||
headers::client::HeadersRequest,
|
||||
};
|
||||
use reth_primitives::{Header, PeerId, H256};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
task::{Context, Poll},
|
||||
@@ -19,9 +22,11 @@ use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
/// peers and sends the response once ready.
|
||||
pub struct StateFetcher {
|
||||
/// Currently active [`GetBlockHeaders`] requests
|
||||
inflight_headers_requests: HashMap<NodeId, Request<HeadersRequest, RequestResult<Vec<Header>>>>,
|
||||
inflight_headers_requests: HashMap<PeerId, Request<HeadersRequest, RequestResult<Vec<Header>>>>,
|
||||
/// Currently active [`GetBlockBodies`] requests
|
||||
inflight_bodies_requests: HashMap<PeerId, Request<Vec<H256>, RequestResult<Vec<BlockBody>>>>,
|
||||
/// The list of available peers for requests.
|
||||
peers: HashMap<NodeId, Peer>,
|
||||
peers: HashMap<PeerId, Peer>,
|
||||
/// Requests queued for processing
|
||||
queued_requests: VecDeque<DownloadRequest>,
|
||||
/// Receiver for new incoming download requests
|
||||
@@ -34,26 +39,69 @@ pub struct StateFetcher {
|
||||
|
||||
impl StateFetcher {
|
||||
/// Invoked when connected to a new peer.
|
||||
pub(crate) fn new_connected_peer(&mut self, _node_id: NodeId, _best_hash: H256) {}
|
||||
pub(crate) fn new_connected_peer(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
best_hash: H256,
|
||||
best_number: u64,
|
||||
) {
|
||||
self.peers.insert(peer_id, Peer { state: PeerState::Idle, best_hash, best_number });
|
||||
}
|
||||
|
||||
/// Invoked when an active session was closed.
|
||||
pub(crate) fn on_session_closed(&mut self, _peer: &NodeId) {}
|
||||
///
|
||||
/// This cancels als inflight request and sends an error to the receiver.
|
||||
pub(crate) fn on_session_closed(&mut self, peer: &PeerId) {
|
||||
self.peers.remove(peer);
|
||||
if let Some(req) = self.inflight_headers_requests.remove(peer) {
|
||||
let _ = req.response.send(Err(RequestError::ConnectionDropped));
|
||||
}
|
||||
if let Some(req) = self.inflight_bodies_requests.remove(peer) {
|
||||
let _ = req.response.send(Err(RequestError::ConnectionDropped));
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the block information for the peer.
|
||||
///
|
||||
/// Returns `true` if this a newer block
|
||||
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: H256, number: u64) -> bool {
|
||||
if let Some(peer) = self.peers.get_mut(peer_id) {
|
||||
if number > peer.best_number {
|
||||
peer.best_hash = hash;
|
||||
peer.best_number = number;
|
||||
return true
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Invoked when an active session is about to be disconnected.
|
||||
pub(crate) fn on_pending_disconnect(&mut self, _peer: &NodeId) {}
|
||||
pub(crate) fn on_pending_disconnect(&mut self, peer_id: &PeerId) {
|
||||
if let Some(peer) = self.peers.get_mut(peer_id) {
|
||||
peer.state = PeerState::Closing;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the next idle peer that's ready to accept a request
|
||||
fn next_peer(&mut self) -> Option<(&PeerId, &mut Peer)> {
|
||||
self.peers.iter_mut().find(|(_, peer)| peer.state.is_idle())
|
||||
}
|
||||
|
||||
/// Returns the next action to return
|
||||
fn poll_action(&mut self) -> Option<FetchAction> {
|
||||
// TODO find matching peers
|
||||
if self.queued_requests.is_empty() {
|
||||
return None
|
||||
}
|
||||
|
||||
// if let Some(request) = self.queued_requests.pop_front() {
|
||||
// if let Some(action) = self.on_download_request(request) {
|
||||
// return Poll::Ready(action)
|
||||
// }
|
||||
// }
|
||||
None
|
||||
let peer_id = *self.next_peer()?.0;
|
||||
|
||||
let request = self.queued_requests.pop_front().expect("not empty; qed");
|
||||
let request = self.prepare_block_request(peer_id, request);
|
||||
|
||||
Some(FetchAction::BlockRequest { peer_id, request })
|
||||
}
|
||||
|
||||
/// Received a request via a downloader
|
||||
fn on_download_request(&mut self, request: DownloadRequest) -> Option<FetchAction> {
|
||||
match request {
|
||||
DownloadRequest::GetBlockHeaders { request: _, response: _ } => {}
|
||||
@@ -91,21 +139,79 @@ impl StateFetcher {
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
/// Handles a new request to a peer.
|
||||
///
|
||||
/// Caution: this assumes the peer exists and is idle
|
||||
fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest {
|
||||
// update the peer's state
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
peer.state = req.peer_state();
|
||||
}
|
||||
|
||||
let started = Instant::now();
|
||||
match req {
|
||||
DownloadRequest::GetBlockHeaders { request, response } => {
|
||||
let inflight = Request { request, response, started };
|
||||
self.inflight_headers_requests.insert(peer_id, inflight);
|
||||
|
||||
unimplemented!("unify start types");
|
||||
|
||||
// BlockRequest::GetBlockHeaders(GetBlockHeaders {
|
||||
// // TODO: this should be converted
|
||||
// start_block: BlockHashOrNumber::Number(0),
|
||||
// limit: request.limit,
|
||||
// skip: 0,
|
||||
// reverse: request.reverse,
|
||||
// })
|
||||
}
|
||||
DownloadRequest::GetBlockBodies { request, response } => {
|
||||
let inflight = Request { request: request.clone(), response, started };
|
||||
self.inflight_bodies_requests.insert(peer_id, inflight);
|
||||
BlockRequest::GetBlockBodies(GetBlockBodies(request))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a new followup request for the peer.
|
||||
///
|
||||
/// Caution: this expects that the peer is _not_ closed
|
||||
fn followup_request(&mut self, peer_id: PeerId) -> Option<BlockResponseOutcome> {
|
||||
let req = self.queued_requests.pop_front()?;
|
||||
let req = self.prepare_block_request(peer_id, req);
|
||||
Some(BlockResponseOutcome::Request(peer_id, req))
|
||||
}
|
||||
|
||||
/// Called on a `GetBlockHeaders` response from a peer
|
||||
pub(crate) fn on_block_headers_response(
|
||||
&mut self,
|
||||
_peer: NodeId,
|
||||
_res: RequestResult<Vec<Header>>,
|
||||
peer_id: PeerId,
|
||||
res: RequestResult<Vec<Header>>,
|
||||
) -> Option<BlockResponseOutcome> {
|
||||
if let Some(resp) = self.inflight_headers_requests.remove(&peer_id) {
|
||||
let _ = resp.response.send(res);
|
||||
}
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
if peer.state.on_request_finished() {
|
||||
return self.followup_request(peer_id)
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Called on a `GetBlockBodies` response from a peer
|
||||
pub(crate) fn on_block_bodies_response(
|
||||
&mut self,
|
||||
_peer: NodeId,
|
||||
_res: RequestResult<Vec<BlockBody>>,
|
||||
peer_id: PeerId,
|
||||
res: RequestResult<Vec<BlockBody>>,
|
||||
) -> Option<BlockResponseOutcome> {
|
||||
if let Some(resp) = self.inflight_bodies_requests.remove(&peer_id) {
|
||||
let _ = resp.response.send(res);
|
||||
}
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
if peer.state.on_request_finished() {
|
||||
return self.followup_request(peer_id)
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -120,6 +226,7 @@ impl Default for StateFetcher {
|
||||
let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel();
|
||||
Self {
|
||||
inflight_headers_requests: Default::default(),
|
||||
inflight_bodies_requests: Default::default(),
|
||||
peers: Default::default(),
|
||||
queued_requests: Default::default(),
|
||||
download_requests_rx: UnboundedReceiverStream::new(download_requests_rx),
|
||||
@@ -148,14 +255,12 @@ impl HeadersDownloader {
|
||||
|
||||
/// Represents a connected peer
|
||||
struct Peer {
|
||||
/// Identifier for requests.
|
||||
request_id: u64,
|
||||
/// The state this peer currently resides in.
|
||||
state: PeerState,
|
||||
/// Best known hash that the peer has
|
||||
best_hash: H256,
|
||||
/// Best known number the peer has.
|
||||
best_number: U256,
|
||||
/// Tracks the best number of the peer.
|
||||
best_number: u64,
|
||||
}
|
||||
|
||||
/// Tracks the state of an individual peer
|
||||
@@ -164,6 +269,32 @@ enum PeerState {
|
||||
Idle,
|
||||
/// Peer is handling a `GetBlockHeaders` request.
|
||||
GetBlockHeaders,
|
||||
/// Peer is handling a `GetBlockBodies` request.
|
||||
GetBlockBodies,
|
||||
/// Peer session is about to close
|
||||
Closing,
|
||||
}
|
||||
|
||||
// === impl PeerState ===
|
||||
|
||||
impl PeerState {
|
||||
/// Returns true if the peer is currently idle.
|
||||
fn is_idle(&self) -> bool {
|
||||
matches!(self, PeerState::Idle)
|
||||
}
|
||||
|
||||
/// Resets the state on a received response.
|
||||
///
|
||||
/// If the state was already marked as `Closing` do nothing.
|
||||
///
|
||||
/// Returns `true` if the peer is ready for another request.
|
||||
fn on_request_finished(&mut self) -> bool {
|
||||
if !matches!(self, PeerState::Closing) {
|
||||
*self = PeerState::Idle;
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// A request that waits for a response from the network so it can send it back through the response
|
||||
@@ -185,13 +316,26 @@ enum DownloadRequest {
|
||||
GetBlockBodies { request: Vec<H256>, response: oneshot::Sender<RequestResult<Vec<BlockBody>>> },
|
||||
}
|
||||
|
||||
// === impl DownloadRequest ===
|
||||
|
||||
impl DownloadRequest {
|
||||
/// Returns the corresponding state for a peer that handles the request.
|
||||
fn peer_state(&self) -> PeerState {
|
||||
match self {
|
||||
DownloadRequest::GetBlockHeaders { .. } => PeerState::GetBlockHeaders,
|
||||
DownloadRequest::GetBlockBodies { .. } => PeerState::GetBlockBodies,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An action the syncer can emit.
|
||||
pub(crate) enum FetchAction {
|
||||
/// Dispatch an eth request to the given peer.
|
||||
EthRequest {
|
||||
node_id: NodeId,
|
||||
BlockRequest {
|
||||
/// The targeted recipient for the request
|
||||
peer_id: PeerId,
|
||||
/// The request to send
|
||||
request: EthMessage,
|
||||
request: BlockRequest,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -201,8 +345,7 @@ pub(crate) enum FetchAction {
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum BlockResponseOutcome {
|
||||
/// Continue with another request to the peer.
|
||||
Request(NodeId, BlockRequest),
|
||||
/// How to handle a bad response
|
||||
// TODO this should include some form of reputation change
|
||||
BadResponse(NodeId),
|
||||
Request(PeerId, BlockRequest),
|
||||
/// How to handle a bad response and the reputation change to apply.
|
||||
BadResponse(PeerId, ReputationChange),
|
||||
}
|
||||
|
||||
64
crates/net/network/src/import.rs
Normal file
64
crates/net/network/src/import.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use crate::message::NewBlockMessage;
|
||||
use reth_primitives::PeerId;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
/// Abstraction over block import.
|
||||
pub trait BlockImport: Send + Sync {
|
||||
/// Invoked for a received `NewBlock` broadcast message from the peer.
|
||||
///
|
||||
/// > When a `NewBlock` announcement message is received from a peer, the client first verifies
|
||||
/// > the basic header validity of the block, checking whether the proof-of-work value is valid.
|
||||
///
|
||||
/// This is supposed to start verification. The results are then expected to be returned via
|
||||
/// [`BlockImport::poll`].
|
||||
fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage);
|
||||
|
||||
/// Returns the results of a [`BlockImport::on_new_block`]
|
||||
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<BlockImportOutcome>;
|
||||
}
|
||||
|
||||
/// Outcome of the [`BlockImport`]'s block handling.
|
||||
pub struct BlockImportOutcome {
|
||||
/// Sender of the `NewBlock` message.
|
||||
pub peer: PeerId,
|
||||
/// The result after validating the block
|
||||
pub result: Result<BlockValidation, BlockImportError>,
|
||||
}
|
||||
|
||||
/// Represents the successful validation of a received `NewBlock` message.
|
||||
#[derive(Debug)]
|
||||
pub enum BlockValidation {
|
||||
/// Basic Header validity check, after which the block should be relayed to peers via a
|
||||
/// `NewBlock` message
|
||||
ValidHeader {
|
||||
/// received block
|
||||
block: NewBlockMessage,
|
||||
},
|
||||
/// Successfully imported: state-root matches after execution. The block should be relayed via
|
||||
/// `NewBlockHashes`
|
||||
ValidBlock {
|
||||
/// validated block.
|
||||
block: NewBlockMessage,
|
||||
},
|
||||
}
|
||||
|
||||
/// Represents the error case of a failed block import
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum BlockImportError {
|
||||
/// Consensus error
|
||||
#[error(transparent)]
|
||||
Consensus(#[from] reth_interfaces::consensus::Error),
|
||||
}
|
||||
|
||||
/// An implementation of `BlockImport` that does nothing
|
||||
#[derive(Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct NoopBlockImport;
|
||||
|
||||
impl BlockImport for NoopBlockImport {
|
||||
fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {}
|
||||
|
||||
fn poll(&mut self, _cx: &mut Context<'_>) -> Poll<BlockImportOutcome> {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
|
||||
))]
|
||||
// TODO remove later
|
||||
#![allow(dead_code)]
|
||||
#![allow(dead_code, clippy::too_many_arguments)]
|
||||
|
||||
//! reth P2P networking.
|
||||
//!
|
||||
@@ -15,10 +15,12 @@
|
||||
//! port of that network. This includes public identities (public key) and addresses (where to reach
|
||||
//! them).
|
||||
|
||||
mod cache;
|
||||
mod config;
|
||||
mod discovery;
|
||||
pub mod error;
|
||||
mod fetch;
|
||||
mod import;
|
||||
mod listener;
|
||||
mod manager;
|
||||
mod message;
|
||||
@@ -29,9 +31,6 @@ mod state;
|
||||
mod swarm;
|
||||
mod transactions;
|
||||
|
||||
/// Identifier for a unique node
|
||||
pub type NodeId = reth_discv4::NodeId;
|
||||
|
||||
pub use config::NetworkConfig;
|
||||
pub use manager::NetworkManager;
|
||||
pub use network::NetworkHandle;
|
||||
|
||||
@@ -19,21 +19,23 @@ use crate::{
|
||||
config::NetworkConfig,
|
||||
discovery::Discovery,
|
||||
error::NetworkError,
|
||||
import::{BlockImport, BlockImportOutcome, BlockValidation},
|
||||
listener::ConnectionListener,
|
||||
message::{NewBlockMessage, PeerMessage, PeerRequest, PeerRequestSender},
|
||||
network::{NetworkHandle, NetworkHandleMessage},
|
||||
peers::PeersManager,
|
||||
session::SessionManager,
|
||||
state::NetworkState,
|
||||
swarm::{Swarm, SwarmEvent},
|
||||
NodeId,
|
||||
};
|
||||
use futures::{Future, StreamExt};
|
||||
use parking_lot::Mutex;
|
||||
use reth_eth_wire::{
|
||||
capability::{Capabilities, CapabilityMessage},
|
||||
EthMessage,
|
||||
GetPooledTransactions, NewPooledTransactionHashes, PooledTransactions, Transactions,
|
||||
};
|
||||
use reth_interfaces::provider::BlockProvider;
|
||||
use reth_interfaces::{p2p::error::RequestResult, provider::BlockProvider};
|
||||
use reth_primitives::PeerId;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
pin::Pin,
|
||||
@@ -43,7 +45,7 @@ use std::{
|
||||
},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
use tracing::{error, trace};
|
||||
|
||||
@@ -77,8 +79,8 @@ pub struct NetworkManager<C> {
|
||||
handle: NetworkHandle,
|
||||
/// Receiver half of the command channel set up between this type and the [`NetworkHandle`]
|
||||
from_handle_rx: UnboundedReceiverStream<NetworkHandleMessage>,
|
||||
/// Handles block imports.
|
||||
block_import_sink: (),
|
||||
/// Handles block imports according to the `eth` protocol.
|
||||
block_import: Box<dyn BlockImport>,
|
||||
/// The address of this node that listens for incoming connections.
|
||||
listener_address: Arc<Mutex<SocketAddr>>,
|
||||
/// All listeners for [`Network`] events.
|
||||
@@ -88,8 +90,8 @@ pub struct NetworkManager<C> {
|
||||
/// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`]
|
||||
/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
|
||||
num_active_peers: Arc<AtomicUsize>,
|
||||
/// Local copy of the `NodeId` of the local node.
|
||||
local_node_id: NodeId,
|
||||
/// Local copy of the `PeerId` of the local node.
|
||||
local_node_id: PeerId,
|
||||
}
|
||||
|
||||
// === impl NetworkManager ===
|
||||
@@ -112,6 +114,7 @@ where
|
||||
peers_config,
|
||||
sessions_config,
|
||||
genesis_hash,
|
||||
block_import,
|
||||
..
|
||||
} = config;
|
||||
|
||||
@@ -145,7 +148,7 @@ where
|
||||
swarm,
|
||||
handle,
|
||||
from_handle_rx: UnboundedReceiverStream::new(from_handle_rx),
|
||||
block_import_sink: (),
|
||||
block_import,
|
||||
listener_address,
|
||||
event_listeners: Default::default(),
|
||||
num_active_peers,
|
||||
@@ -163,7 +166,7 @@ where
|
||||
/// Event hook for an unexpected message from the peer.
|
||||
fn on_invalid_message(
|
||||
&self,
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
_capabilities: Arc<Capabilities>,
|
||||
_message: CapabilityMessage,
|
||||
) {
|
||||
@@ -171,41 +174,67 @@ where
|
||||
// TODO: disconnect?
|
||||
}
|
||||
|
||||
/// Handles a received [`CapabilityMessage`] from the peer.
|
||||
fn on_capability_message(&mut self, _node_id: NodeId, msg: CapabilityMessage) {
|
||||
match msg {
|
||||
CapabilityMessage::Eth(eth) => {
|
||||
match eth {
|
||||
EthMessage::Status(_) => {}
|
||||
EthMessage::NewBlockHashes(_) => {
|
||||
// update peer's state, to track what blocks this peer has seen
|
||||
}
|
||||
EthMessage::NewBlock(_) => {
|
||||
// emit new block and track that the peer knows this block
|
||||
}
|
||||
EthMessage::Transactions(_) => {
|
||||
// need to emit this as event/send to tx handler
|
||||
}
|
||||
EthMessage::NewPooledTransactionHashes(_) => {
|
||||
// need to emit this as event/send to tx handler
|
||||
}
|
||||
/// Handle an incoming request from the peer
|
||||
fn on_eth_request(&mut self, peer_id: PeerId, req: PeerRequest) {
|
||||
match req {
|
||||
PeerRequest::GetBlockHeaders { .. } => {}
|
||||
PeerRequest::GetBlockBodies { .. } => {}
|
||||
PeerRequest::GetPooledTransactions { request, response } => {
|
||||
// notify listeners about this request
|
||||
self.event_listeners.send(NetworkEvent::GetPooledTransactions {
|
||||
peer_id,
|
||||
request,
|
||||
response: Arc::new(response),
|
||||
});
|
||||
}
|
||||
PeerRequest::GetNodeData { .. } => {}
|
||||
PeerRequest::GetReceipts { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: should remove the response types here, as they are handled separately
|
||||
EthMessage::GetBlockHeaders(_) => {}
|
||||
EthMessage::BlockHeaders(_) => {}
|
||||
EthMessage::GetBlockBodies(_) => {}
|
||||
EthMessage::BlockBodies(_) => {}
|
||||
EthMessage::GetPooledTransactions(_) => {}
|
||||
EthMessage::PooledTransactions(_) => {}
|
||||
EthMessage::GetNodeData(_) => {}
|
||||
EthMessage::NodeData(_) => {}
|
||||
EthMessage::GetReceipts(_) => {}
|
||||
EthMessage::Receipts(_) => {}
|
||||
/// Invoked after a `NewBlock` message from the peer was validated
|
||||
fn on_block_import_result(&mut self, outcome: BlockImportOutcome) {
|
||||
let BlockImportOutcome { peer, result } = outcome;
|
||||
match result {
|
||||
Ok(validated_block) => match validated_block {
|
||||
BlockValidation::ValidHeader { block } => {
|
||||
self.swarm.state_mut().update_peer_block(&peer, block.hash, block.number());
|
||||
self.swarm.state_mut().announce_new_block(block);
|
||||
}
|
||||
BlockValidation::ValidBlock { block } => {
|
||||
self.swarm.state_mut().announce_new_block_hash(block);
|
||||
}
|
||||
},
|
||||
Err(_err) => {
|
||||
// TODO report peer for bad block
|
||||
}
|
||||
CapabilityMessage::Other(_) => {
|
||||
// other subprotocols
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles a received Message from the peer.
|
||||
fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) {
|
||||
match msg {
|
||||
PeerMessage::NewBlockHashes(hashes) => {
|
||||
let hashes = Arc::try_unwrap(hashes).unwrap_or_else(|arc| (*arc).clone());
|
||||
// update peer's state, to track what blocks this peer has seen
|
||||
self.swarm.state_mut().on_new_block_hashes(peer_id, hashes.0)
|
||||
}
|
||||
PeerMessage::NewBlock(block) => {
|
||||
self.swarm.state_mut().on_new_block(peer_id, block.hash);
|
||||
// start block import process
|
||||
self.block_import.on_new_block(peer_id, block);
|
||||
}
|
||||
PeerMessage::PooledTransactions(msg) => {
|
||||
self.event_listeners
|
||||
.send(NetworkEvent::IncomingPooledTransactionHashes { peer_id, msg });
|
||||
}
|
||||
PeerMessage::Transactions(msg) => {
|
||||
self.event_listeners.send(NetworkEvent::IncomingTransactions { peer_id, msg });
|
||||
}
|
||||
PeerMessage::EthRequest(req) => {
|
||||
self.on_eth_request(peer_id, req);
|
||||
}
|
||||
PeerMessage::Other(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,8 +244,20 @@ where
|
||||
NetworkHandleMessage::EventListener(tx) => {
|
||||
self.event_listeners.listeners.push(tx);
|
||||
}
|
||||
NetworkHandleMessage::NewestBlock(_, _) => {}
|
||||
_ => {}
|
||||
NetworkHandleMessage::AnnounceBlock(block, hash) => {
|
||||
let msg = NewBlockMessage { hash, block: Arc::new(block) };
|
||||
self.swarm.state_mut().announce_new_block(msg);
|
||||
}
|
||||
NetworkHandleMessage::EthRequest { peer_id, request } => {
|
||||
self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::EthRequest(request))
|
||||
}
|
||||
NetworkHandleMessage::SendTransaction { peer_id, msg } => {
|
||||
self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::Transactions(msg))
|
||||
}
|
||||
NetworkHandleMessage::SendPooledTransactionHashes { peer_id, msg } => self
|
||||
.swarm
|
||||
.sessions_mut()
|
||||
.send_message(&peer_id, PeerMessage::PooledTransactions(msg)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -230,6 +271,11 @@ where
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
// poll new block imports
|
||||
while let Poll::Ready(outcome) = this.block_import.poll(cx) {
|
||||
this.on_block_import_result(outcome);
|
||||
}
|
||||
|
||||
// process incoming messages from a handle
|
||||
loop {
|
||||
match this.from_handle_rx.poll_next_unpin(cx) {
|
||||
@@ -248,8 +294,8 @@ where
|
||||
while let Poll::Ready(Some(event)) = this.swarm.poll_next_unpin(cx) {
|
||||
// handle event
|
||||
match event {
|
||||
SwarmEvent::CapabilityMessage { node_id, message } => {
|
||||
this.on_capability_message(node_id, message)
|
||||
SwarmEvent::ValidMessage { node_id, message } => {
|
||||
this.on_peer_message(node_id, message)
|
||||
}
|
||||
SwarmEvent::InvalidCapabilityMessage { node_id, capabilities, message } => {
|
||||
this.on_invalid_message(node_id, capabilities, message)
|
||||
@@ -266,25 +312,38 @@ where
|
||||
SwarmEvent::OutgoingTcpConnection { remote_addr } => {
|
||||
trace!(?remote_addr, target = "net", "Starting outbound connection.");
|
||||
}
|
||||
SwarmEvent::SessionEstablished { node_id, remote_addr } => {
|
||||
SwarmEvent::SessionEstablished {
|
||||
node_id: peer_id,
|
||||
remote_addr,
|
||||
capabilities,
|
||||
messages,
|
||||
} => {
|
||||
let total_active = this.num_active_peers.fetch_add(1, Ordering::Relaxed) + 1;
|
||||
trace!(
|
||||
?remote_addr,
|
||||
?node_id,
|
||||
?peer_id,
|
||||
?total_active,
|
||||
target = "net",
|
||||
"Session established"
|
||||
);
|
||||
|
||||
this.event_listeners.send(NetworkEvent::SessionEstablished {
|
||||
peer_id,
|
||||
capabilities,
|
||||
messages,
|
||||
});
|
||||
}
|
||||
SwarmEvent::SessionClosed { node_id, remote_addr } => {
|
||||
SwarmEvent::SessionClosed { node_id: peer_id, remote_addr } => {
|
||||
let total_active = this.num_active_peers.fetch_sub(1, Ordering::Relaxed) - 1;
|
||||
trace!(
|
||||
?remote_addr,
|
||||
?node_id,
|
||||
?peer_id,
|
||||
?total_active,
|
||||
target = "net",
|
||||
"Session disconnected"
|
||||
);
|
||||
|
||||
this.event_listeners.send(NetworkEvent::SessionClosed { peer_id });
|
||||
}
|
||||
SwarmEvent::IncomingPendingSessionClosed { .. } => {}
|
||||
SwarmEvent::OutgoingPendingSessionClosed { .. } => {}
|
||||
@@ -292,14 +351,33 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
todo!()
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// Events emitted by the network that are of interest for subscribers.
|
||||
///
|
||||
/// This includes any event types that may be relevant to tasks
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum NetworkEvent {
|
||||
EthMessage { node_id: NodeId, message: EthMessage },
|
||||
/// Closed the peer session.
|
||||
SessionClosed { peer_id: PeerId },
|
||||
/// Established a new session with the given peer.
|
||||
SessionEstablished {
|
||||
peer_id: PeerId,
|
||||
capabilities: Arc<Capabilities>,
|
||||
messages: PeerRequestSender,
|
||||
},
|
||||
/// Received list of transactions to the given peer.
|
||||
IncomingTransactions { peer_id: PeerId, msg: Arc<Transactions> },
|
||||
/// Received list of transactions hashes to the given peer.
|
||||
IncomingPooledTransactionHashes { peer_id: PeerId, msg: Arc<NewPooledTransactionHashes> },
|
||||
/// Incoming `GetPooledTransactions` request from a peer.
|
||||
GetPooledTransactions {
|
||||
peer_id: PeerId,
|
||||
request: GetPooledTransactions,
|
||||
response: Arc<oneshot::Sender<RequestResult<PooledTransactions>>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Bundles all listeners for [`NetworkEvent`]s.
|
||||
|
||||
@@ -5,27 +5,47 @@
|
||||
|
||||
use futures::FutureExt;
|
||||
use reth_eth_wire::{
|
||||
BlockBodies, BlockBody, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData,
|
||||
GetPooledTransactions, GetReceipts, NewBlock, NewBlockHashes, NodeData, PooledTransactions,
|
||||
Receipts, Transactions,
|
||||
capability::CapabilityMessage, BlockBodies, BlockBody, BlockHeaders, GetBlockBodies,
|
||||
GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewBlockHashes,
|
||||
NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, Transactions,
|
||||
};
|
||||
use std::task::{ready, Context, Poll};
|
||||
|
||||
use crate::NodeId;
|
||||
use reth_eth_wire::capability::CapabilityMessage;
|
||||
use reth_interfaces::p2p::error::RequestResult;
|
||||
use reth_primitives::{Header, Receipt, TransactionSigned};
|
||||
use reth_primitives::{Header, PeerId, Receipt, TransactionSigned, H256};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
use tokio::sync::{mpsc, mpsc::error::TrySendError, oneshot};
|
||||
|
||||
/// Internal form of a `NewBlock` message
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NewBlockMessage {
|
||||
/// Hash of the block
|
||||
pub hash: H256,
|
||||
/// Raw received message
|
||||
pub block: Arc<NewBlock>,
|
||||
}
|
||||
|
||||
// === impl NewBlockMessage ===
|
||||
|
||||
impl NewBlockMessage {
|
||||
/// Returns the block number of the block
|
||||
pub fn number(&self) -> u64 {
|
||||
self.block.block.header.number
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents all messages that can be sent to a peer session
|
||||
#[derive(Debug)]
|
||||
pub enum PeerMessage {
|
||||
/// Announce new block hashes
|
||||
NewBlockHashes(NewBlockHashes),
|
||||
NewBlockHashes(Arc<NewBlockHashes>),
|
||||
/// Broadcast new block.
|
||||
NewBlock(Box<NewBlock>),
|
||||
NewBlock(NewBlockMessage),
|
||||
/// Broadcast transactions.
|
||||
Transactions(Transactions),
|
||||
Transactions(Arc<Transactions>),
|
||||
///
|
||||
PooledTransactions(Arc<NewPooledTransactionHashes>),
|
||||
/// All `eth` request variants.
|
||||
EthRequest(PeerRequest),
|
||||
/// Other than eth namespace message
|
||||
@@ -180,7 +200,7 @@ impl PeerResponseResult {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PeerRequestSender {
|
||||
/// id of the remote node.
|
||||
pub(crate) peer: NodeId,
|
||||
pub(crate) peer: PeerId,
|
||||
/// The Sender half connected to a session.
|
||||
pub(crate) to_session_tx: mpsc::Sender<PeerRequest>,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::{manager::NetworkEvent, peers::PeersHandle, NodeId};
|
||||
use crate::{manager::NetworkEvent, message::PeerRequest, peers::PeersHandle};
|
||||
use parking_lot::Mutex;
|
||||
use reth_primitives::{H256, U256};
|
||||
use reth_eth_wire::{NewBlock, NewPooledTransactionHashes, Transactions};
|
||||
use reth_primitives::{PeerId, H256};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{atomic::AtomicUsize, Arc},
|
||||
@@ -24,7 +25,7 @@ impl NetworkHandle {
|
||||
num_active_peers: Arc<AtomicUsize>,
|
||||
listener_address: Arc<Mutex<SocketAddr>>,
|
||||
to_manager_tx: UnboundedSender<NetworkHandleMessage>,
|
||||
local_node_id: NodeId,
|
||||
local_node_id: PeerId,
|
||||
peers: PeersHandle,
|
||||
) -> Self {
|
||||
let inner = NetworkInner {
|
||||
@@ -47,6 +48,16 @@ impl NetworkHandle {
|
||||
let _ = self.manager().send(NetworkHandleMessage::EventListener(tx));
|
||||
rx
|
||||
}
|
||||
|
||||
/// Sends a [`NetworkHandleMessage`] to the manager
|
||||
pub(crate) fn send_message(&self, msg: NetworkHandleMessage) {
|
||||
let _ = self.inner.to_manager_tx.send(msg);
|
||||
}
|
||||
|
||||
/// Sends a [`PeerRequest`] to the given peer's session.
|
||||
pub fn send_request(&mut self, peer_id: PeerId, request: PeerRequest) {
|
||||
self.send_message(NetworkHandleMessage::EthRequest { peer_id, request })
|
||||
}
|
||||
}
|
||||
|
||||
struct NetworkInner {
|
||||
@@ -57,17 +68,27 @@ struct NetworkInner {
|
||||
/// The local address that accepts incoming connections.
|
||||
listener_address: Arc<Mutex<SocketAddr>>,
|
||||
/// The identifier used by this node.
|
||||
local_node_id: NodeId,
|
||||
local_node_id: PeerId,
|
||||
/// Access to the all the nodes
|
||||
peers: PeersHandle, // TODO need something to access
|
||||
peers: PeersHandle,
|
||||
}
|
||||
|
||||
/// Internal messages that can be passed to the [`NetworkManager`](crate::NetworkManager).
|
||||
#[allow(missing_docs)]
|
||||
pub(crate) enum NetworkHandleMessage {
|
||||
/// Add a new listener for [`NetworkEvent`].
|
||||
EventListener(UnboundedSender<NetworkEvent>),
|
||||
/// Broadcast event to announce a new block to all nodes.
|
||||
AnnounceBlock,
|
||||
/// Returns the newest imported block by the network.
|
||||
NewestBlock(H256, U256),
|
||||
AnnounceBlock(NewBlock, H256),
|
||||
/// Sends the list of transactions to the given peer.
|
||||
SendTransaction { peer_id: PeerId, msg: Arc<Transactions> },
|
||||
/// Sends the list of transactions hashes to the given peer.
|
||||
SendPooledTransactionHashes { peer_id: PeerId, msg: Arc<NewPooledTransactionHashes> },
|
||||
/// Send an `eth` protocol request to the peer.
|
||||
EthRequest {
|
||||
/// The peer to send the request to.
|
||||
peer_id: PeerId,
|
||||
/// The request to send to the peer's sessions.
|
||||
request: PeerRequest,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use futures::StreamExt;
|
||||
use reth_discv4::NodeId;
|
||||
use reth_primitives::PeerId;
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap, VecDeque},
|
||||
net::SocketAddr,
|
||||
@@ -32,7 +32,7 @@ pub struct PeersHandle {
|
||||
/// The [`PeersManager`] will be notified on peer related changes
|
||||
pub(crate) struct PeersManager {
|
||||
/// All peers known to the network
|
||||
peers: HashMap<NodeId, Peer>,
|
||||
peers: HashMap<PeerId, Peer>,
|
||||
/// Copy of the receiver half, so new [`PeersHandle`] can be created on demand.
|
||||
manager_tx: mpsc::UnboundedSender<PeerCommand>,
|
||||
/// Receiver half of the command channel.
|
||||
@@ -74,7 +74,7 @@ impl PeersManager {
|
||||
///
|
||||
/// If the reputation of the peer is below the `BANNED_REPUTATION` threshold, a disconnect will
|
||||
/// be scheduled.
|
||||
pub(crate) fn on_active_session(&mut self, peer_id: NodeId, addr: SocketAddr) {
|
||||
pub(crate) fn on_active_session(&mut self, peer_id: PeerId, addr: SocketAddr) {
|
||||
match self.peers.entry(peer_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
let value = entry.get_mut();
|
||||
@@ -96,7 +96,7 @@ impl PeersManager {
|
||||
/// Called when a session to a peer was disconnected.
|
||||
///
|
||||
/// Accepts an additional [`ReputationChange`] value to apply to the peer.
|
||||
pub(crate) fn on_disconnected(&mut self, peer: NodeId, reputation_change: ReputationChange) {
|
||||
pub(crate) fn on_disconnected(&mut self, peer: PeerId, reputation_change: ReputationChange) {
|
||||
if let Some(mut peer) = self.peers.get_mut(&peer) {
|
||||
self.connection_info.decr_state(peer.state);
|
||||
peer.state = PeerConnectionState::Idle;
|
||||
@@ -108,7 +108,7 @@ impl PeersManager {
|
||||
///
|
||||
/// If the peer already exists, then the address will e updated. If the addresses differ, the
|
||||
/// old address is returned
|
||||
pub(crate) fn add_discovered_node(&mut self, peer_id: NodeId, addr: SocketAddr) {
|
||||
pub(crate) fn add_discovered_node(&mut self, peer_id: PeerId, addr: SocketAddr) {
|
||||
match self.peers.entry(peer_id) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
let node = entry.get_mut();
|
||||
@@ -121,7 +121,7 @@ impl PeersManager {
|
||||
}
|
||||
|
||||
/// Removes the tracked node from the set.
|
||||
pub(crate) fn remove_discovered_node(&mut self, peer_id: NodeId) {
|
||||
pub(crate) fn remove_discovered_node(&mut self, peer_id: PeerId) {
|
||||
if let Some(entry) = self.peers.remove(&peer_id) {
|
||||
if entry.state.is_connected() {
|
||||
self.connection_info.decr_state(entry.state);
|
||||
@@ -133,11 +133,11 @@ impl PeersManager {
|
||||
/// Returns the idle peer with the highest reputation.
|
||||
///
|
||||
/// Returns `None` if no peer is available.
|
||||
fn best_unconnected(&mut self) -> Option<(NodeId, &mut Peer)> {
|
||||
fn best_unconnected(&mut self) -> Option<(PeerId, &mut Peer)> {
|
||||
self.peers
|
||||
.iter_mut()
|
||||
.filter(|(_, peer)| peer.state.is_unconnected())
|
||||
.fold(None::<(&NodeId, &mut Peer)>, |mut best_peer, candidate| {
|
||||
.fold(None::<(&PeerId, &mut Peer)>, |mut best_peer, candidate| {
|
||||
if let Some(best_peer) = best_peer.take() {
|
||||
if best_peer.1.reputation >= candidate.1.reputation {
|
||||
return Some(best_peer)
|
||||
@@ -331,14 +331,14 @@ pub(crate) enum PeerCommand {
|
||||
/// Command for manually add
|
||||
Add {
|
||||
/// Identifier of the peer.
|
||||
peer_id: NodeId,
|
||||
peer_id: PeerId,
|
||||
/// The address of the peer
|
||||
addr: SocketAddr,
|
||||
},
|
||||
/// Remove a peer from the set
|
||||
///
|
||||
/// If currently connected this will disconnect the sessin
|
||||
Remove(NodeId),
|
||||
Remove(PeerId),
|
||||
}
|
||||
|
||||
/// Actions the peer manager can trigger.
|
||||
@@ -347,17 +347,17 @@ pub enum PeerAction {
|
||||
/// Start a new connection to a peer.
|
||||
Connect {
|
||||
/// The peer to connect to.
|
||||
peer_id: NodeId,
|
||||
peer_id: PeerId,
|
||||
/// Where to reach the node
|
||||
remote_addr: SocketAddr,
|
||||
},
|
||||
/// Disconnect an existing connection.
|
||||
Disconnect { peer_id: NodeId },
|
||||
Disconnect { peer_id: PeerId },
|
||||
/// Disconnect an existing incoming connection, because the peers reputation is below the
|
||||
/// banned threshold.
|
||||
DisconnectBannedIncoming {
|
||||
/// Peer id of the established connection.
|
||||
peer_id: NodeId,
|
||||
peer_id: PeerId,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -6,13 +6,16 @@ use crate::{
|
||||
handle::{ActiveSessionMessage, SessionCommand},
|
||||
SessionId,
|
||||
},
|
||||
NodeId,
|
||||
};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::{stream::Fuse, Sink, Stream};
|
||||
use pin_project::pin_project;
|
||||
use reth_ecies::stream::ECIESStream;
|
||||
use reth_eth_wire::capability::{Capabilities, CapabilityMessage};
|
||||
use reth_eth_wire::{
|
||||
capability::{Capabilities, CapabilityMessage},
|
||||
EthStream, P2PStream,
|
||||
};
|
||||
use reth_primitives::PeerId;
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
future::Future,
|
||||
@@ -31,9 +34,9 @@ pub(crate) struct ActiveSession {
|
||||
pub(crate) next_id: usize,
|
||||
/// The underlying connection.
|
||||
#[pin]
|
||||
pub(crate) conn: ECIESStream<TcpStream>,
|
||||
pub(crate) conn: EthStream<P2PStream<ECIESStream<TcpStream>>>,
|
||||
/// Identifier of the node we're connected to.
|
||||
pub(crate) remote_node_id: NodeId,
|
||||
pub(crate) remote_node_id: PeerId,
|
||||
/// All capabilities the peer announced
|
||||
pub(crate) remote_capabilities: Arc<Capabilities>,
|
||||
/// Internal identifier of this session
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
//! Session handles
|
||||
use crate::{
|
||||
message::PeerMessage,
|
||||
session::{Direction, SessionId},
|
||||
NodeId,
|
||||
};
|
||||
use reth_ecies::{stream::ECIESStream, ECIESError};
|
||||
use reth_eth_wire::{
|
||||
capability::{Capabilities, CapabilityMessage},
|
||||
Status,
|
||||
error::EthStreamError,
|
||||
EthStream, P2PStream, Status,
|
||||
};
|
||||
use reth_primitives::PeerId;
|
||||
use std::{io, net::SocketAddr, sync::Arc, time::Instant};
|
||||
use tokio::{
|
||||
net::TcpStream,
|
||||
@@ -33,7 +35,7 @@ pub(crate) struct ActiveSessionHandle {
|
||||
/// The assigned id for this session
|
||||
pub(crate) session_id: SessionId,
|
||||
/// The identifier of the remote peer
|
||||
pub(crate) remote_id: NodeId,
|
||||
pub(crate) remote_id: PeerId,
|
||||
/// The timestamp when the session has been established.
|
||||
pub(crate) established: Instant,
|
||||
/// Announced capabilities of the peer.
|
||||
@@ -65,23 +67,24 @@ pub(crate) enum PendingSessionEvent {
|
||||
Established {
|
||||
session_id: SessionId,
|
||||
remote_addr: SocketAddr,
|
||||
node_id: NodeId,
|
||||
/// The remote node's public key
|
||||
node_id: PeerId,
|
||||
capabilities: Arc<Capabilities>,
|
||||
status: Status,
|
||||
conn: ECIESStream<TcpStream>,
|
||||
conn: EthStream<P2PStream<ECIESStream<TcpStream>>>,
|
||||
},
|
||||
/// Handshake unsuccessful, session was disconnected.
|
||||
Disconnected {
|
||||
remote_addr: SocketAddr,
|
||||
session_id: SessionId,
|
||||
direction: Direction,
|
||||
error: Option<ECIESError>,
|
||||
error: Option<EthStreamError>,
|
||||
},
|
||||
/// Thrown when unable to establish a [`TcpStream`].
|
||||
OutgoingConnectionError {
|
||||
remote_addr: SocketAddr,
|
||||
session_id: SessionId,
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
error: io::Error,
|
||||
},
|
||||
/// Thrown when authentication via Ecies failed.
|
||||
@@ -93,7 +96,8 @@ pub(crate) enum PendingSessionEvent {
|
||||
pub(crate) enum SessionCommand {
|
||||
/// Disconnect the connection
|
||||
Disconnect,
|
||||
Message(CapabilityMessage),
|
||||
/// Sends a message to the peer
|
||||
Message(PeerMessage),
|
||||
}
|
||||
|
||||
/// Message variants an active session can produce and send back to the
|
||||
@@ -101,18 +105,18 @@ pub(crate) enum SessionCommand {
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ActiveSessionMessage {
|
||||
/// Session disconnected.
|
||||
Closed { node_id: NodeId, remote_addr: SocketAddr },
|
||||
Closed { node_id: PeerId, remote_addr: SocketAddr },
|
||||
/// A session received a valid message via RLPx.
|
||||
ValidMessage {
|
||||
/// Identifier of the remote peer.
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// Message received from the peer.
|
||||
message: CapabilityMessage,
|
||||
message: PeerMessage,
|
||||
},
|
||||
/// Received a message that does not match the announced capabilities of the peer.
|
||||
InvalidMessage {
|
||||
/// Identifier of the remote peer.
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// Announced capabilities of the remote peer.
|
||||
capabilities: Arc<Capabilities>,
|
||||
/// Message received from the peer.
|
||||
|
||||
@@ -1,21 +1,24 @@
|
||||
//! Support for handling peer sessions.
|
||||
pub use crate::message::PeerRequestSender;
|
||||
use crate::{
|
||||
message::PeerMessage,
|
||||
session::{
|
||||
active::ActiveSession,
|
||||
handle::{
|
||||
ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle,
|
||||
SessionCommand,
|
||||
},
|
||||
},
|
||||
NodeId,
|
||||
};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::{future::Either, io, FutureExt, StreamExt};
|
||||
use reth_ecies::{stream::ECIESStream, ECIESError};
|
||||
use reth_ecies::stream::ECIESStream;
|
||||
use reth_eth_wire::{
|
||||
capability::{Capabilities, CapabilityMessage},
|
||||
Status, UnauthedEthStream,
|
||||
error::EthStreamError,
|
||||
HelloBuilder, HelloMessage, Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream,
|
||||
};
|
||||
use reth_primitives::{ForkFilter, Hardfork, PeerId};
|
||||
use secp256k1::{SecretKey, SECP256K1};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
@@ -48,7 +51,13 @@ pub(crate) struct SessionManager {
|
||||
/// The secret key used for authenticating sessions.
|
||||
secret_key: SecretKey,
|
||||
/// The node id of node
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// The `Status` message to send to peers.
|
||||
status: Status,
|
||||
/// THe `Hello` message to send to peers.
|
||||
hello: HelloMessage,
|
||||
/// The [`ForkFilter`] used to validate the peer's `Status` message.
|
||||
fork_filter: ForkFilter,
|
||||
/// Size of the command buffer per session.
|
||||
session_command_buffer: usize,
|
||||
/// All spawned session tasks.
|
||||
@@ -61,7 +70,7 @@ pub(crate) struct SessionManager {
|
||||
/// session is authenticated, it can be moved to the `active_session` set.
|
||||
pending_sessions: FnvHashMap<SessionId, PendingSessionHandle>,
|
||||
/// All active sessions that are ready to exchange messages.
|
||||
active_sessions: HashMap<NodeId, ActiveSessionHandle>,
|
||||
active_sessions: HashMap<PeerId, ActiveSessionHandle>,
|
||||
/// The original Sender half of the [`PendingSessionEvent`] channel.
|
||||
///
|
||||
/// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will
|
||||
@@ -87,12 +96,21 @@ impl SessionManager {
|
||||
let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer);
|
||||
|
||||
let pk = secret_key.public_key(SECP256K1);
|
||||
let node_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
|
||||
|
||||
// TODO: make sure this is the right place to put these builders - maybe per-Network rather
|
||||
// than per-Session?
|
||||
let hello = HelloBuilder::new(node_id).build();
|
||||
let status = StatusBuilder::default().build();
|
||||
let fork_filter = Hardfork::Frontier.fork_filter();
|
||||
|
||||
Self {
|
||||
next_id: 0,
|
||||
secret_key,
|
||||
node_id,
|
||||
status,
|
||||
hello,
|
||||
fork_filter,
|
||||
session_command_buffer: config.session_command_buffer,
|
||||
spawned_tasks: Default::default(),
|
||||
pending_sessions: Default::default(),
|
||||
@@ -139,6 +157,9 @@ impl SessionManager {
|
||||
pending_events,
|
||||
remote_addr,
|
||||
self.secret_key,
|
||||
self.hello.clone(),
|
||||
self.status,
|
||||
self.fork_filter.clone(),
|
||||
));
|
||||
|
||||
let handle = PendingSessionHandle { disconnect_tx };
|
||||
@@ -147,7 +168,7 @@ impl SessionManager {
|
||||
}
|
||||
|
||||
/// Starts a new pending session from the local node to the given remote node.
|
||||
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_node_id: NodeId) {
|
||||
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_node_id: PeerId) {
|
||||
let session_id = self.next_id();
|
||||
let (disconnect_tx, disconnect_rx) = oneshot::channel();
|
||||
let pending_events = self.pending_sessions_tx.clone();
|
||||
@@ -158,6 +179,9 @@ impl SessionManager {
|
||||
remote_addr,
|
||||
remote_node_id,
|
||||
self.secret_key,
|
||||
self.hello.clone(),
|
||||
self.status,
|
||||
self.fork_filter.clone(),
|
||||
));
|
||||
|
||||
let handle = PendingSessionHandle { disconnect_tx };
|
||||
@@ -168,12 +192,19 @@ impl SessionManager {
|
||||
///
|
||||
/// This will trigger the disconnect on the session task to gracefully terminate. The result
|
||||
/// will be picked up by the receiver.
|
||||
pub(crate) fn disconnect(&self, node: NodeId) {
|
||||
pub(crate) fn disconnect(&self, node: PeerId) {
|
||||
if let Some(session) = self.active_sessions.get(&node) {
|
||||
session.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a message to the peer's session
|
||||
pub(crate) fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) {
|
||||
if let Some(session) = self.active_sessions.get_mut(peer_id) {
|
||||
let _ = session.commands_to_session.try_send(SessionCommand::Message(msg));
|
||||
}
|
||||
}
|
||||
|
||||
/// This polls all the session handles and returns [`SessionEvent`].
|
||||
///
|
||||
/// Active sessions are prioritized.
|
||||
@@ -376,7 +407,7 @@ pub(crate) enum SessionEvent {
|
||||
///
|
||||
/// This session is now able to exchange data.
|
||||
SessionEstablished {
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
remote_addr: SocketAddr,
|
||||
capabilities: Arc<Capabilities>,
|
||||
status: Status,
|
||||
@@ -384,30 +415,30 @@ pub(crate) enum SessionEvent {
|
||||
},
|
||||
/// A session received a valid message via RLPx.
|
||||
ValidMessage {
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// Message received from the peer.
|
||||
message: CapabilityMessage,
|
||||
message: PeerMessage,
|
||||
},
|
||||
/// Received a message that does not match the announced capabilities of the peer.
|
||||
InvalidMessage {
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// Announced capabilities of the remote peer.
|
||||
capabilities: Arc<Capabilities>,
|
||||
/// Message received from the peer.
|
||||
message: CapabilityMessage,
|
||||
},
|
||||
/// Closed an incoming pending session during authentication.
|
||||
IncomingPendingSessionClosed { remote_addr: SocketAddr, error: Option<ECIESError> },
|
||||
IncomingPendingSessionClosed { remote_addr: SocketAddr, error: Option<EthStreamError> },
|
||||
/// Closed an outgoing pending session during authentication.
|
||||
OutgoingPendingSessionClosed {
|
||||
remote_addr: SocketAddr,
|
||||
node_id: NodeId,
|
||||
error: Option<ECIESError>,
|
||||
node_id: PeerId,
|
||||
error: Option<EthStreamError>,
|
||||
},
|
||||
/// Failed to establish a tcp stream
|
||||
OutgoingConnectionError { remote_addr: SocketAddr, node_id: NodeId, error: io::Error },
|
||||
OutgoingConnectionError { remote_addr: SocketAddr, node_id: PeerId, error: io::Error },
|
||||
/// Active session was disconnected.
|
||||
Disconnected { node_id: NodeId, remote_addr: SocketAddr },
|
||||
Disconnected { node_id: PeerId, remote_addr: SocketAddr },
|
||||
}
|
||||
|
||||
/// The error thrown when the max configured limit has been reached and no more connections are
|
||||
@@ -426,6 +457,9 @@ async fn start_pending_incoming_session(
|
||||
events: mpsc::Sender<PendingSessionEvent>,
|
||||
remote_addr: SocketAddr,
|
||||
secret_key: SecretKey,
|
||||
hello: HelloMessage,
|
||||
status: Status,
|
||||
fork_filter: ForkFilter,
|
||||
) {
|
||||
authenticate(
|
||||
disconnect_rx,
|
||||
@@ -435,6 +469,9 @@ async fn start_pending_incoming_session(
|
||||
remote_addr,
|
||||
secret_key,
|
||||
Direction::Incoming,
|
||||
hello,
|
||||
status,
|
||||
fork_filter,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -446,8 +483,11 @@ async fn start_pending_outbound_session(
|
||||
events: mpsc::Sender<PendingSessionEvent>,
|
||||
session_id: SessionId,
|
||||
remote_addr: SocketAddr,
|
||||
remote_node_id: NodeId,
|
||||
remote_node_id: PeerId,
|
||||
secret_key: SecretKey,
|
||||
hello: HelloMessage,
|
||||
status: Status,
|
||||
fork_filter: ForkFilter,
|
||||
) {
|
||||
let stream = match TcpStream::connect(remote_addr).await {
|
||||
Ok(stream) => stream,
|
||||
@@ -471,6 +511,9 @@ async fn start_pending_outbound_session(
|
||||
remote_addr,
|
||||
secret_key,
|
||||
Direction::Outgoing(remote_node_id),
|
||||
hello,
|
||||
status,
|
||||
fork_filter,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -481,7 +524,7 @@ pub(crate) enum Direction {
|
||||
/// Incoming connection.
|
||||
Incoming,
|
||||
/// Outgoing connection to a specific node.
|
||||
Outgoing(NodeId),
|
||||
Outgoing(PeerId),
|
||||
}
|
||||
|
||||
async fn authenticate(
|
||||
@@ -492,6 +535,9 @@ async fn authenticate(
|
||||
remote_addr: SocketAddr,
|
||||
secret_key: SecretKey,
|
||||
direction: Direction,
|
||||
hello: HelloMessage,
|
||||
status: Status,
|
||||
fork_filter: ForkFilter,
|
||||
) {
|
||||
let stream = match direction {
|
||||
Direction::Incoming => match ECIESStream::incoming(stream, secret_key).await {
|
||||
@@ -520,8 +566,17 @@ async fn authenticate(
|
||||
}
|
||||
};
|
||||
|
||||
let unauthed = UnauthedEthStream::new(stream);
|
||||
let auth = authenticate_stream(unauthed, session_id, remote_addr, direction).boxed();
|
||||
let unauthed = UnauthedP2PStream::new(stream);
|
||||
let auth = authenticate_stream(
|
||||
unauthed,
|
||||
session_id,
|
||||
remote_addr,
|
||||
direction,
|
||||
hello,
|
||||
status,
|
||||
fork_filter,
|
||||
)
|
||||
.boxed();
|
||||
|
||||
match futures::future::select(disconnect_rx, auth).await {
|
||||
Either::Left((_, _)) => {
|
||||
@@ -544,10 +599,47 @@ async fn authenticate(
|
||||
///
|
||||
/// On Success return the authenticated stream as [`PendingSessionEvent`]
|
||||
async fn authenticate_stream(
|
||||
_stream: UnauthedEthStream<ECIESStream<TcpStream>>,
|
||||
_session_id: SessionId,
|
||||
_remote_addr: SocketAddr,
|
||||
_direction: Direction,
|
||||
stream: UnauthedP2PStream<ECIESStream<TcpStream>>,
|
||||
session_id: SessionId,
|
||||
remote_addr: SocketAddr,
|
||||
direction: Direction,
|
||||
hello: HelloMessage,
|
||||
status: Status,
|
||||
fork_filter: ForkFilter,
|
||||
) -> PendingSessionEvent {
|
||||
todo!()
|
||||
// conduct the p2p handshake and return the authenticated stream
|
||||
let (p2p_stream, their_hello) = match stream.handshake(hello).await {
|
||||
Ok(stream_res) => stream_res,
|
||||
Err(err) => {
|
||||
return PendingSessionEvent::Disconnected {
|
||||
remote_addr,
|
||||
session_id,
|
||||
direction,
|
||||
error: Some(err.into()),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// if the hello handshake was successful we can try status handshake
|
||||
let eth_unauthed = UnauthedEthStream::new(p2p_stream);
|
||||
let (eth_stream, their_status) = match eth_unauthed.handshake(status, fork_filter).await {
|
||||
Ok(stream_res) => stream_res,
|
||||
Err(err) => {
|
||||
return PendingSessionEvent::Disconnected {
|
||||
remote_addr,
|
||||
session_id,
|
||||
direction,
|
||||
error: Some(err),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
PendingSessionEvent::Established {
|
||||
session_id,
|
||||
remote_addr,
|
||||
node_id: their_hello.id,
|
||||
capabilities: Arc::new(Capabilities::from(their_hello.capabilities)),
|
||||
status: their_status,
|
||||
conn: eth_stream,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,30 +1,31 @@
|
||||
//! Keeps track of the state of the network.
|
||||
|
||||
use crate::{
|
||||
cache::LruCache,
|
||||
discovery::{Discovery, DiscoveryEvent},
|
||||
fetch::StateFetcher,
|
||||
message::{PeerRequestSender, PeerResponse},
|
||||
fetch::{BlockResponseOutcome, StateFetcher},
|
||||
message::{
|
||||
BlockRequest, NewBlockMessage, PeerRequest, PeerRequestSender, PeerResponse,
|
||||
PeerResponseResult,
|
||||
},
|
||||
peers::{PeerAction, PeersManager},
|
||||
NodeId,
|
||||
};
|
||||
|
||||
use reth_eth_wire::{capability::Capabilities, Status};
|
||||
use reth_eth_wire::{capability::Capabilities, BlockHashNumber, NewBlockHashes, Status};
|
||||
use reth_interfaces::provider::BlockProvider;
|
||||
use reth_primitives::H256;
|
||||
use reth_primitives::{PeerId, H256};
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
net::SocketAddr,
|
||||
num::NonZeroUsize,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::{
|
||||
fetch::BlockResponseOutcome,
|
||||
message::{BlockRequest, PeerRequest, PeerResponseResult},
|
||||
};
|
||||
use tracing::trace;
|
||||
|
||||
/// Cache limit of blocks to keep track of for a single peer.
|
||||
const PEER_BLOCK_CACHE_LIMIT: usize = 512;
|
||||
|
||||
/// The [`NetworkState`] keeps track of the state of all peers in the network.
|
||||
///
|
||||
/// This includes:
|
||||
@@ -37,7 +38,7 @@ use tracing::trace;
|
||||
/// This type is also responsible for responding for received request.
|
||||
pub struct NetworkState<C> {
|
||||
/// All connected peers and their state.
|
||||
connected_peers: HashMap<NodeId, ConnectedPeer>,
|
||||
connected_peers: HashMap<PeerId, ConnectedPeer>,
|
||||
/// Manages connections to peers.
|
||||
peers_manager: PeersManager,
|
||||
/// Buffered messages until polled.
|
||||
@@ -83,7 +84,7 @@ where
|
||||
/// should be rejected.
|
||||
pub(crate) fn on_session_activated(
|
||||
&mut self,
|
||||
peer: NodeId,
|
||||
peer: PeerId,
|
||||
capabilities: Arc<Capabilities>,
|
||||
status: Status,
|
||||
request_tx: PeerRequestSender,
|
||||
@@ -91,7 +92,10 @@ where
|
||||
// TODO add capacity check
|
||||
debug_assert!(self.connected_peers.contains_key(&peer), "Already connected; not possible");
|
||||
|
||||
self.state_fetcher.new_connected_peer(peer, status.blockhash);
|
||||
// find the corresponding block number
|
||||
let block_number =
|
||||
self.client.block_number(status.blockhash).ok().flatten().unwrap_or_default();
|
||||
self.state_fetcher.new_connected_peer(peer, status.blockhash, block_number);
|
||||
|
||||
self.connected_peers.insert(
|
||||
peer,
|
||||
@@ -100,6 +104,7 @@ where
|
||||
capabilities,
|
||||
request_tx,
|
||||
pending_response: None,
|
||||
blocks: LruCache::new(NonZeroUsize::new(PEER_BLOCK_CACHE_LIMIT).unwrap()),
|
||||
},
|
||||
);
|
||||
|
||||
@@ -107,17 +112,100 @@ where
|
||||
}
|
||||
|
||||
/// Event hook for a disconnected session for the peer.
|
||||
pub(crate) fn on_session_closed(&mut self, peer: NodeId) {
|
||||
pub(crate) fn on_session_closed(&mut self, peer: PeerId) {
|
||||
self.connected_peers.remove(&peer);
|
||||
self.state_fetcher.on_session_closed(&peer);
|
||||
}
|
||||
|
||||
/// Propagates Block to peers.
|
||||
pub(crate) fn announce_block(&mut self, _hash: H256, _block: ()) {
|
||||
// TODO propagate the newblock messages to all connected peers that haven't seen the block
|
||||
// yet
|
||||
/// Starts propagating the new block to peers that haven't reported the block yet.
|
||||
///
|
||||
/// This is supposed to be invoked after the block was validated.
|
||||
///
|
||||
/// > It then sends the block to a small fraction of connected peers (usually the square root of
|
||||
/// > the total number of peers) using the `NewBlock` message.
|
||||
///
|
||||
/// See also <https://github.com/ethereum/devp2p/blob/master/caps/eth.md>
|
||||
pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) {
|
||||
// send a `NewBlock` message to a fraction fo the connected peers (square root of the total
|
||||
// number of peers)
|
||||
let num_propagate = (self.connected_peers.len() as f64).sqrt() as u64 + 1;
|
||||
|
||||
todo!()
|
||||
let number = msg.block.block.header.number;
|
||||
let mut count = 0;
|
||||
for (peer_id, peer) in self.connected_peers.iter_mut() {
|
||||
if peer.blocks.contains(&msg.hash) {
|
||||
// skip peers which already reported the block
|
||||
continue
|
||||
}
|
||||
|
||||
// Queue a `NewBlock` message for the peer
|
||||
if count < num_propagate {
|
||||
self.queued_messages
|
||||
.push_back(StateAction::NewBlock { peer_id: *peer_id, block: msg.clone() });
|
||||
|
||||
// update peer block info
|
||||
if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) {
|
||||
peer.best_hash = msg.hash;
|
||||
}
|
||||
|
||||
// mark the block as seen by the peer
|
||||
peer.blocks.insert(msg.hash);
|
||||
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if count >= num_propagate {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Completes the block propagation process started in [`NetworkState::announce_new_block()`]
|
||||
/// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet.
|
||||
pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) {
|
||||
let number = msg.block.block.header.number;
|
||||
let hashes = Arc::new(NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]));
|
||||
for (peer_id, peer) in self.connected_peers.iter_mut() {
|
||||
if peer.blocks.contains(&msg.hash) {
|
||||
// skip peers which already reported the block
|
||||
continue
|
||||
}
|
||||
|
||||
if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) {
|
||||
peer.best_hash = msg.hash;
|
||||
}
|
||||
|
||||
self.queued_messages.push_back(StateAction::NewBlockHashes {
|
||||
peer_id: *peer_id,
|
||||
hashes: Arc::clone(&hashes),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the block information for the peer.
|
||||
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: H256, number: u64) {
|
||||
if let Some(peer) = self.connected_peers.get_mut(peer_id) {
|
||||
peer.best_hash = hash;
|
||||
}
|
||||
self.state_fetcher.update_peer_block(peer_id, hash, number);
|
||||
}
|
||||
|
||||
/// Invoked after a `NewBlock` message was received by the peer.
|
||||
///
|
||||
/// This will keep track of blocks we know a peer has
|
||||
pub(crate) fn on_new_block(&mut self, peer_id: PeerId, hash: H256) {
|
||||
// Mark the blocks as seen
|
||||
if let Some(peer) = self.connected_peers.get_mut(&peer_id) {
|
||||
peer.blocks.insert(hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// Invoked for a `NewBlockHashes` broadcast message.
|
||||
pub(crate) fn on_new_block_hashes(&mut self, peer_id: PeerId, hashes: Vec<BlockHashNumber>) {
|
||||
// Mark the blocks as seen
|
||||
if let Some(peer) = self.connected_peers.get_mut(&peer_id) {
|
||||
peer.blocks.extend(hashes.into_iter().map(|b| b.hash));
|
||||
}
|
||||
}
|
||||
|
||||
/// Event hook for events received from the discovery service.
|
||||
@@ -149,7 +237,7 @@ where
|
||||
}
|
||||
|
||||
/// Disconnect the session
|
||||
fn on_session_disconnected(&mut self, peer: NodeId) {
|
||||
fn on_session_disconnected(&mut self, peer: PeerId) {
|
||||
self.connected_peers.remove(&peer);
|
||||
}
|
||||
|
||||
@@ -157,7 +245,7 @@ where
|
||||
///
|
||||
/// Caution: this will replace an already pending response. It's the responsibility of the
|
||||
/// caller to select the peer.
|
||||
fn handle_block_request(&mut self, peer: NodeId, request: BlockRequest) {
|
||||
fn handle_block_request(&mut self, peer: PeerId, request: BlockRequest) {
|
||||
if let Some(ref mut peer) = self.connected_peers.get_mut(&peer) {
|
||||
let (request, response) = match request {
|
||||
BlockRequest::GetBlockHeaders(request) => {
|
||||
@@ -184,7 +272,7 @@ where
|
||||
BlockResponseOutcome::Request(peer, request) => {
|
||||
self.handle_block_request(peer, request);
|
||||
}
|
||||
BlockResponseOutcome::BadResponse(_) => {
|
||||
BlockResponseOutcome::BadResponse(_peer, _reputation_change) => {
|
||||
// TODO handle reputation change
|
||||
}
|
||||
}
|
||||
@@ -192,7 +280,7 @@ where
|
||||
}
|
||||
|
||||
/// Invoked when received a response from a connected peer.
|
||||
fn on_eth_response(&mut self, peer: NodeId, resp: PeerResponseResult) -> Option<StateAction> {
|
||||
fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option<StateAction> {
|
||||
match resp {
|
||||
PeerResponseResult::BlockHeaders(res) => {
|
||||
let outcome = self.state_fetcher.on_block_headers_response(peer, res)?;
|
||||
@@ -278,14 +366,29 @@ pub struct ConnectedPeer {
|
||||
pub(crate) request_tx: PeerRequestSender,
|
||||
/// The response receiver for a currently active request to that peer.
|
||||
pub(crate) pending_response: Option<PeerResponse>,
|
||||
/// Blocks we know the peer has.
|
||||
pub(crate) blocks: LruCache<H256>,
|
||||
}
|
||||
|
||||
/// Message variants triggered by the [`State`]
|
||||
pub enum StateAction {
|
||||
/// Dispatch a `NewBlock` message to the peer
|
||||
NewBlock {
|
||||
/// Target of the message
|
||||
peer_id: PeerId,
|
||||
/// The `NewBlock` message
|
||||
block: NewBlockMessage,
|
||||
},
|
||||
NewBlockHashes {
|
||||
/// Target of the message
|
||||
peer_id: PeerId,
|
||||
/// `NewBlockHashes` message to send to the peer.
|
||||
hashes: Arc<NewBlockHashes>,
|
||||
},
|
||||
/// Create a new connection to the given node.
|
||||
Connect { remote_addr: SocketAddr, node_id: NodeId },
|
||||
Connect { remote_addr: SocketAddr, node_id: PeerId },
|
||||
/// Disconnect an existing connection
|
||||
Disconnect { node_id: NodeId },
|
||||
Disconnect { node_id: PeerId },
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
@@ -293,6 +396,6 @@ pub enum AddSessionError {
|
||||
#[error("No capacity for new sessions")]
|
||||
AtCapacity {
|
||||
/// The peer of the session
|
||||
peer: NodeId,
|
||||
peer: PeerId,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
use crate::{
|
||||
listener::{ConnectionListener, ListenerEvent},
|
||||
message::{PeerMessage, PeerRequestSender},
|
||||
session::{SessionEvent, SessionId, SessionManager},
|
||||
state::{AddSessionError, NetworkState, StateAction},
|
||||
NodeId,
|
||||
};
|
||||
use futures::Stream;
|
||||
use reth_ecies::ECIESError;
|
||||
use reth_eth_wire::capability::{Capabilities, CapabilityMessage};
|
||||
use reth_eth_wire::{
|
||||
capability::{Capabilities, CapabilityMessage},
|
||||
error::EthStreamError,
|
||||
};
|
||||
use reth_interfaces::provider::BlockProvider;
|
||||
use reth_primitives::PeerId;
|
||||
use std::{
|
||||
io,
|
||||
net::SocketAddr,
|
||||
@@ -54,8 +57,13 @@ where
|
||||
&mut self.state
|
||||
}
|
||||
|
||||
/// Mutable access to the [`SessionManager`].
|
||||
pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager {
|
||||
&mut self.sessions
|
||||
}
|
||||
|
||||
/// Triggers a new outgoing connection to the given node
|
||||
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: NodeId) {
|
||||
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) {
|
||||
self.sessions.dial_outbound(remote_addr, remote_id)
|
||||
}
|
||||
|
||||
@@ -68,8 +76,18 @@ where
|
||||
capabilities,
|
||||
status,
|
||||
messages,
|
||||
} => match self.state.on_session_activated(node_id, capabilities, status, messages) {
|
||||
Ok(_) => Some(SwarmEvent::SessionEstablished { node_id, remote_addr }),
|
||||
} => match self.state.on_session_activated(
|
||||
node_id,
|
||||
capabilities.clone(),
|
||||
status,
|
||||
messages.clone(),
|
||||
) {
|
||||
Ok(_) => Some(SwarmEvent::SessionEstablished {
|
||||
node_id,
|
||||
remote_addr,
|
||||
capabilities,
|
||||
messages,
|
||||
}),
|
||||
Err(err) => {
|
||||
match err {
|
||||
AddSessionError::AtCapacity { peer } => self.sessions.disconnect(peer),
|
||||
@@ -78,7 +96,7 @@ where
|
||||
}
|
||||
},
|
||||
SessionEvent::ValidMessage { node_id, message } => {
|
||||
Some(SwarmEvent::CapabilityMessage { node_id, message })
|
||||
Some(SwarmEvent::ValidMessage { node_id, message })
|
||||
}
|
||||
SessionEvent::InvalidMessage { node_id, capabilities, message } => {
|
||||
Some(SwarmEvent::InvalidCapabilityMessage { node_id, capabilities, message })
|
||||
@@ -131,6 +149,14 @@ where
|
||||
StateAction::Disconnect { node_id } => {
|
||||
self.sessions.disconnect(node_id);
|
||||
}
|
||||
StateAction::NewBlock { peer_id, block: msg } => {
|
||||
let msg = PeerMessage::NewBlock(msg);
|
||||
self.sessions.send_message(&peer_id, msg);
|
||||
}
|
||||
StateAction::NewBlockHashes { peer_id, hashes } => {
|
||||
let msg = PeerMessage::NewBlockHashes(hashes);
|
||||
self.sessions.send_message(&peer_id, msg);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
@@ -189,15 +215,15 @@ where
|
||||
/// network.
|
||||
pub enum SwarmEvent {
|
||||
/// Events related to the actual network protocol.
|
||||
CapabilityMessage {
|
||||
ValidMessage {
|
||||
/// The peer that sent the message
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// Message received from the peer
|
||||
message: CapabilityMessage,
|
||||
message: PeerMessage,
|
||||
},
|
||||
/// Received a message that does not match the announced capabilities of the peer.
|
||||
InvalidCapabilityMessage {
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
/// Announced capabilities of the remote peer.
|
||||
capabilities: Arc<Capabilities>,
|
||||
/// Message received from the peer.
|
||||
@@ -226,28 +252,30 @@ pub enum SwarmEvent {
|
||||
remote_addr: SocketAddr,
|
||||
},
|
||||
SessionEstablished {
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
remote_addr: SocketAddr,
|
||||
capabilities: Arc<Capabilities>,
|
||||
messages: PeerRequestSender,
|
||||
},
|
||||
SessionClosed {
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
remote_addr: SocketAddr,
|
||||
},
|
||||
/// Closed an incoming pending session during authentication.
|
||||
IncomingPendingSessionClosed {
|
||||
remote_addr: SocketAddr,
|
||||
error: Option<ECIESError>,
|
||||
error: Option<EthStreamError>,
|
||||
},
|
||||
/// Closed an outgoing pending session during authentication.
|
||||
OutgoingPendingSessionClosed {
|
||||
remote_addr: SocketAddr,
|
||||
node_id: NodeId,
|
||||
error: Option<ECIESError>,
|
||||
node_id: PeerId,
|
||||
error: Option<EthStreamError>,
|
||||
},
|
||||
/// Failed to establish a tcp stream to the given address/node
|
||||
OutgoingConnectionError {
|
||||
remote_addr: SocketAddr,
|
||||
node_id: NodeId,
|
||||
node_id: PeerId,
|
||||
error: io::Error,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,11 +1,35 @@
|
||||
//! Transaction management for the p2p network.
|
||||
|
||||
use crate::{manager::NetworkEvent, NetworkHandle};
|
||||
use reth_primitives::{Transaction, H256};
|
||||
use reth_transaction_pool::TransactionPool;
|
||||
use std::collections::HashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::UnboundedReceiverStream;
|
||||
use crate::{
|
||||
cache::LruCache,
|
||||
manager::NetworkEvent,
|
||||
message::{PeerRequest, PeerRequestSender},
|
||||
network::NetworkHandleMessage,
|
||||
NetworkHandle,
|
||||
};
|
||||
use futures::{stream::FuturesUnordered, FutureExt, StreamExt};
|
||||
use reth_eth_wire::{GetPooledTransactions, NewPooledTransactionHashes, PooledTransactions};
|
||||
use reth_interfaces::p2p::error::RequestResult;
|
||||
use reth_primitives::{
|
||||
FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, H256,
|
||||
};
|
||||
use reth_transaction_pool::{error::PoolResult, TransactionPool};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
future::Future,
|
||||
num::NonZeroUsize,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tokio::sync::{mpsc, oneshot, oneshot::Sender};
|
||||
use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream};
|
||||
|
||||
/// Cache limit of transactions to keep track of for a single peer.
|
||||
const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024 * 10;
|
||||
|
||||
/// The future for inserting a function into the pool
|
||||
pub type PoolImportFuture = Pin<Box<dyn Future<Output = PoolResult<TxHash>> + Send + 'static>>;
|
||||
|
||||
/// Api to interact with [`TransactionsManager`] task.
|
||||
pub struct TransactionsHandle {
|
||||
@@ -39,35 +63,51 @@ pub struct TransactionsManager<Pool> {
|
||||
///
|
||||
/// From which we get all new incoming transaction related messages.
|
||||
network_events: UnboundedReceiverStream<NetworkEvent>,
|
||||
/// All currently pending transactions
|
||||
pending_transactions: (),
|
||||
/// All the peers that have sent the same transactions.
|
||||
peers: HashMap<H256, Vec<()>>,
|
||||
/// All currently active requests for pooled transactions.
|
||||
inflight_requests: Vec<GetPooledTxRequest>,
|
||||
/// All currently pending transactions grouped by peers.
|
||||
///
|
||||
/// This way we can track incoming transactions and prevent multiple pool imports for the same
|
||||
/// transaction
|
||||
transactions_by_peers: HashMap<TxHash, Vec<PeerId>>,
|
||||
/// Transactions that are currently imported into the `Pool`
|
||||
pool_imports: FuturesUnordered<PoolImportFuture>,
|
||||
/// All the connected peers.
|
||||
peers: HashMap<PeerId, Peer>,
|
||||
/// Send half for the command channel.
|
||||
command_tx: mpsc::UnboundedSender<TransactionsCommand>,
|
||||
/// Incoming commands from [`TransactionsHandle`].
|
||||
command_rx: UnboundedReceiverStream<TransactionsCommand>,
|
||||
/// Incoming commands from [`TransactionsHandle`].
|
||||
pending_transactions: ReceiverStream<TxHash>,
|
||||
}
|
||||
|
||||
// === impl TransactionsManager ===
|
||||
|
||||
impl<Pool> TransactionsManager<Pool>
|
||||
where
|
||||
Pool: TransactionPool<Transaction = Transaction>,
|
||||
Pool: TransactionPool + Clone,
|
||||
<Pool as TransactionPool>::Transaction: IntoRecoveredTransaction,
|
||||
{
|
||||
/// Sets up a new instance.
|
||||
pub fn new(network: NetworkHandle, pool: Pool) -> Self {
|
||||
let network_events = network.event_listener();
|
||||
let (command_tx, command_rx) = mpsc::unbounded_channel();
|
||||
|
||||
// install a listener for new transactions
|
||||
let pending = pool.pending_transactions_listener();
|
||||
|
||||
Self {
|
||||
pool,
|
||||
network,
|
||||
network_events: UnboundedReceiverStream::new(network_events),
|
||||
pending_transactions: (),
|
||||
inflight_requests: Default::default(),
|
||||
transactions_by_peers: Default::default(),
|
||||
pool_imports: Default::default(),
|
||||
peers: Default::default(),
|
||||
command_tx,
|
||||
command_rx: UnboundedReceiverStream::new(command_rx),
|
||||
pending_transactions: ReceiverStream::new(pending),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,8 +116,222 @@ where
|
||||
TransactionsHandle { manager_tx: self.command_tx.clone() }
|
||||
}
|
||||
|
||||
/// Executes an endless future
|
||||
pub async fn run(self) {}
|
||||
/// Request handler for an incoming request for transactions
|
||||
fn on_get_pooled_transactions(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: GetPooledTransactions,
|
||||
response: Sender<RequestResult<PooledTransactions>>,
|
||||
) {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
let transactions = self
|
||||
.pool
|
||||
.get_all(request.0)
|
||||
.into_iter()
|
||||
.map(|tx| tx.transaction.to_recovered_transaction().into_signed())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// we sent a response at which point we assume that the peer is aware of the transaction
|
||||
peer.transactions.extend(transactions.iter().map(|tx| tx.hash()));
|
||||
|
||||
let resp = PooledTransactions(transactions);
|
||||
let _ = response.send(Ok(resp));
|
||||
}
|
||||
}
|
||||
|
||||
/// Request handler for an incoming `NewPooledTransactionHashes`
|
||||
fn on_new_pooled_transactions(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
msg: Arc<NewPooledTransactionHashes>,
|
||||
) {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
let mut transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone()).0;
|
||||
|
||||
// keep track of the transactions the peer knows
|
||||
peer.transactions.extend(transactions.clone());
|
||||
|
||||
self.pool.retain_unknown(&mut transactions);
|
||||
|
||||
if transactions.is_empty() {
|
||||
// nothing to request
|
||||
return
|
||||
}
|
||||
|
||||
// request the missing transactions
|
||||
let (response, rx) = oneshot::channel();
|
||||
let req = PeerRequest::GetPooledTransactions {
|
||||
request: GetPooledTransactions(transactions),
|
||||
response,
|
||||
};
|
||||
|
||||
if peer.request_tx.try_send(req).is_ok() {
|
||||
self.inflight_requests.push(GetPooledTxRequest { peer_id, response: rx })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles a received event
|
||||
fn on_event(&mut self, event: NetworkEvent) {
|
||||
match event {
|
||||
NetworkEvent::SessionClosed { peer_id } => {
|
||||
// remove the peer
|
||||
self.peers.remove(&peer_id);
|
||||
}
|
||||
NetworkEvent::SessionEstablished { peer_id, messages, .. } => {
|
||||
// insert a new peer
|
||||
self.peers.insert(
|
||||
peer_id,
|
||||
Peer {
|
||||
transactions: LruCache::new(
|
||||
NonZeroUsize::new(PEER_TRANSACTION_CACHE_LIMIT).unwrap(),
|
||||
),
|
||||
request_tx: messages,
|
||||
},
|
||||
);
|
||||
|
||||
// Send a `NewPooledTransactionHashes` to the peer with _all_ transactions in the
|
||||
// pool
|
||||
let msg = Arc::new(NewPooledTransactionHashes(self.pool.pooled_transactions()));
|
||||
self.network.send_message(NetworkHandleMessage::SendPooledTransactionHashes {
|
||||
peer_id,
|
||||
msg,
|
||||
})
|
||||
}
|
||||
NetworkEvent::IncomingTransactions { peer_id, msg } => {
|
||||
let transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone());
|
||||
self.import_transactions(peer_id, transactions.0);
|
||||
}
|
||||
NetworkEvent::IncomingPooledTransactionHashes { peer_id, msg } => {
|
||||
self.on_new_pooled_transactions(peer_id, msg)
|
||||
}
|
||||
NetworkEvent::GetPooledTransactions { peer_id, request, response } => {
|
||||
if let Ok(response) = Arc::try_unwrap(response) {
|
||||
// TODO(mattsse): there should be a dedicated channel for the transaction
|
||||
// manager instead
|
||||
self.on_get_pooled_transactions(peer_id, request, response)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts the import process for the given transactions.
|
||||
fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec<TransactionSigned>) {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
for tx in transactions {
|
||||
// recover transaction
|
||||
let tx = if let Some(tx) = tx.into_ecrecovered() {
|
||||
tx
|
||||
} else {
|
||||
// TODO: report peer?
|
||||
continue
|
||||
};
|
||||
|
||||
// track that the peer knows this transaction
|
||||
peer.transactions.insert(tx.hash);
|
||||
|
||||
match self.transactions_by_peers.entry(tx.hash) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
// transaction was already inserted
|
||||
entry.get_mut().push(peer_id);
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
// this is a new transaction that should be imported into the pool
|
||||
let pool_transaction = <Pool::Transaction as FromRecoveredTransaction>::from_recovered_transaction(tx);
|
||||
|
||||
let pool = self.pool.clone();
|
||||
let import = Box::pin(async move {
|
||||
pool.add_external_transaction(pool_transaction).await
|
||||
});
|
||||
|
||||
self.pool_imports.push(import);
|
||||
entry.insert(vec![peer_id]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_good_import(&mut self, hash: TxHash) {
|
||||
if let Some(_peers) = self.transactions_by_peers.remove(&hash) {
|
||||
// TODO report good peer?
|
||||
}
|
||||
}
|
||||
|
||||
fn on_bad_import(&mut self, hash: TxHash) {
|
||||
if let Some(_peers) = self.transactions_by_peers.remove(&hash) {
|
||||
// TODO report bad peer?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An endless future.
|
||||
///
|
||||
/// This should be spawned or used as part of `tokio::select!`.
|
||||
impl<Pool> Future for TransactionsManager<Pool>
|
||||
where
|
||||
Pool: TransactionPool + Clone + Unpin,
|
||||
<Pool as TransactionPool>::Transaction: IntoRecoveredTransaction,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
// Advance all imports
|
||||
while let Poll::Ready(Some(import_res)) = this.pool_imports.poll_next_unpin(cx) {
|
||||
match import_res {
|
||||
Ok(hash) => {
|
||||
this.on_good_import(hash);
|
||||
}
|
||||
Err(err) => {
|
||||
this.on_bad_import(*err.hash());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle new transactions
|
||||
while let Poll::Ready(Some(_hash)) = this.pending_transactions.poll_next_unpin(cx) {
|
||||
// TODO(mattsse): propagate new transactions
|
||||
}
|
||||
|
||||
// Advance all requests.
|
||||
// We remove each request one by one and add them back.
|
||||
for idx in (0..this.inflight_requests.len()).rev() {
|
||||
let mut req = this.inflight_requests.swap_remove(idx);
|
||||
match req.response.poll_unpin(cx) {
|
||||
Poll::Pending => {
|
||||
this.inflight_requests.push(req);
|
||||
}
|
||||
Poll::Ready(Ok(Ok(txs))) => {
|
||||
this.import_transactions(req.peer_id, txs.0);
|
||||
}
|
||||
Poll::Ready(Ok(Err(_))) => {
|
||||
// TODO report bad peer
|
||||
}
|
||||
Poll::Ready(Err(_)) => {
|
||||
// TODO report bad peer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// An inflight request for `PooledTransactions` from a peer
|
||||
#[allow(missing_docs)]
|
||||
struct GetPooledTxRequest {
|
||||
peer_id: PeerId,
|
||||
response: oneshot::Receiver<RequestResult<PooledTransactions>>,
|
||||
}
|
||||
|
||||
/// Tracks a single peer
|
||||
struct Peer {
|
||||
/// Keeps track of transactions that we know the peer has seen.
|
||||
transactions: LruCache<H256>,
|
||||
/// A communication channel directly to the session task.
|
||||
request_tx: PeerRequestSender,
|
||||
}
|
||||
|
||||
/// Commands to send to the [`TransactionManager`]
|
||||
|
||||
@@ -34,7 +34,11 @@ hex = "0.4"
|
||||
hex-literal = "0.3"
|
||||
derive_more = "0.99"
|
||||
|
||||
|
||||
# proof related
|
||||
triehash = "0.8"
|
||||
# See to replace hashers to simplify libraries
|
||||
plain_hasher = "0.2"
|
||||
hash-db = "0.15"
|
||||
|
||||
[dev-dependencies]
|
||||
arbitrary = { version = "1.1.7", features = ["derive"]}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::{Header, Receipt, SealedHeader, Transaction, TransactionSigned, H256};
|
||||
use reth_rlp::{Decodable, DecodeError, Encodable};
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Ethereum full block.
|
||||
@@ -47,3 +48,62 @@ impl Deref for BlockLocked {
|
||||
self.header.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Either a block hash _or_ a block number
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum BlockHashOrNumber {
|
||||
/// A block hash
|
||||
Hash(H256),
|
||||
/// A block number
|
||||
Number(u64),
|
||||
}
|
||||
|
||||
impl From<H256> for BlockHashOrNumber {
|
||||
fn from(value: H256) -> Self {
|
||||
BlockHashOrNumber::Hash(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for BlockHashOrNumber {
|
||||
fn from(value: u64) -> Self {
|
||||
BlockHashOrNumber::Number(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows for RLP encoding of either a block hash or block number
|
||||
impl Encodable for BlockHashOrNumber {
|
||||
fn length(&self) -> usize {
|
||||
match self {
|
||||
Self::Hash(block_hash) => block_hash.length(),
|
||||
Self::Number(block_number) => block_number.length(),
|
||||
}
|
||||
}
|
||||
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
||||
match self {
|
||||
Self::Hash(block_hash) => block_hash.encode(out),
|
||||
Self::Number(block_number) => block_number.encode(out),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows for RLP decoding of a block hash or block number
|
||||
impl Decodable for BlockHashOrNumber {
|
||||
fn decode(buf: &mut &[u8]) -> Result<Self, DecodeError> {
|
||||
let header: u8 = *buf.first().ok_or(DecodeError::InputTooShort)?;
|
||||
// if the byte string is exactly 32 bytes, decode it into a Hash
|
||||
// 0xa0 = 0x80 (start of string) + 0x20 (32, length of string)
|
||||
if header == 0xa0 {
|
||||
// strip the first byte, parsing the rest of the string.
|
||||
// If the rest of the string fails to decode into 32 bytes, we'll bubble up the
|
||||
// decoding error.
|
||||
let hash = H256::decode(buf)?;
|
||||
Ok(Self::Hash(hash))
|
||||
} else {
|
||||
// a block number when encoded as bytes ranges from 0 to any number of bytes - we're
|
||||
// going to accept numbers which fit in less than 64 bytes.
|
||||
// Any data larger than this which is not caught by the Hash decoding should error and
|
||||
// is considered an invalid block number.
|
||||
Ok(Self::Number(u64::decode(buf)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
5
crates/primitives/src/constants.rs
Normal file
5
crates/primitives/src/constants.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
use crate::H256;
|
||||
|
||||
/// The Ethereum mainnet genesis hash.
|
||||
pub const MAINNET_GENESIS: H256 =
|
||||
H256(hex_literal::hex!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"));
|
||||
@@ -4,8 +4,8 @@ use thiserror::Error;
|
||||
/// Primitives error type.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
/// Input provided is invalid.
|
||||
#[error("Input provided is invalid.")]
|
||||
/// The provided input is invalid.
|
||||
#[error("The provided input is invalid.")]
|
||||
InvalidInput,
|
||||
/// Failed to deserialize data into type.
|
||||
#[error("Failed to deserialize data into type.")]
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
#![deny(missing_docs)]
|
||||
#![allow(clippy::redundant_else, clippy::too_many_lines)]
|
||||
|
||||
use crate::{BlockNumber, H256};
|
||||
use crc::crc32;
|
||||
use maplit::btreemap;
|
||||
use reth_primitives::{BlockNumber, H256};
|
||||
use reth_rlp::*;
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
225
crates/primitives/src/hardfork.rs
Normal file
225
crates/primitives/src/hardfork.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
use crate::{BlockNumber, ForkFilter, ForkHash, ForkId, MAINNET_GENESIS};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// Ethereum mainnet hardforks
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub enum Hardfork {
|
||||
Frontier,
|
||||
Homestead,
|
||||
Dao,
|
||||
Tangerine,
|
||||
SpuriousDragon,
|
||||
Byzantium,
|
||||
Constantinople,
|
||||
Petersburg,
|
||||
Istanbul,
|
||||
Muirglacier,
|
||||
Berlin,
|
||||
London,
|
||||
ArrowGlacier,
|
||||
GrayGlacier,
|
||||
Latest,
|
||||
}
|
||||
|
||||
impl Hardfork {
|
||||
/// Get the first block number of the hardfork.
|
||||
pub fn fork_block(&self) -> u64 {
|
||||
match *self {
|
||||
Hardfork::Frontier => 0,
|
||||
Hardfork::Homestead => 1150000,
|
||||
Hardfork::Dao => 1920000,
|
||||
Hardfork::Tangerine => 2463000,
|
||||
Hardfork::SpuriousDragon => 2675000,
|
||||
Hardfork::Byzantium => 4370000,
|
||||
Hardfork::Constantinople | Hardfork::Petersburg => 7280000,
|
||||
Hardfork::Istanbul => 9069000,
|
||||
Hardfork::Muirglacier => 9200000,
|
||||
Hardfork::Berlin => 12244000,
|
||||
Hardfork::London => 12965000,
|
||||
Hardfork::ArrowGlacier => 13773000,
|
||||
Hardfork::GrayGlacier | Hardfork::Latest => 15050000,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the EIP-2124 fork id for a given hardfork
|
||||
///
|
||||
/// The [`ForkId`](ethereum_forkid::ForkId) includes a CRC32 checksum of the all fork block
|
||||
/// numbers from genesis, and the next upcoming fork block number.
|
||||
/// If the next fork block number is not yet known, it is set to 0.
|
||||
pub fn fork_id(&self) -> ForkId {
|
||||
match *self {
|
||||
Hardfork::Frontier => {
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }
|
||||
}
|
||||
Hardfork::Homestead => {
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }
|
||||
}
|
||||
Hardfork::Dao => ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
Hardfork::Tangerine => {
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }
|
||||
}
|
||||
Hardfork::SpuriousDragon => {
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }
|
||||
}
|
||||
Hardfork::Byzantium => {
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }
|
||||
}
|
||||
Hardfork::Constantinople | Hardfork::Petersburg => {
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }
|
||||
}
|
||||
Hardfork::Istanbul => {
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }
|
||||
}
|
||||
Hardfork::Muirglacier => {
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }
|
||||
}
|
||||
Hardfork::Berlin => ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
Hardfork::London => ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
Hardfork::ArrowGlacier => {
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }
|
||||
}
|
||||
Hardfork::Latest | Hardfork::GrayGlacier => {
|
||||
// update `next` when another fork block num is known
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 0 }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This returns all known hardforks in order.
|
||||
pub fn all_forks() -> Vec<Self> {
|
||||
vec![
|
||||
Hardfork::Homestead,
|
||||
Hardfork::Dao,
|
||||
Hardfork::Tangerine,
|
||||
Hardfork::SpuriousDragon,
|
||||
Hardfork::Byzantium,
|
||||
Hardfork::Constantinople, /* petersburg is skipped because it's the same block num
|
||||
* as constantinople */
|
||||
Hardfork::Istanbul,
|
||||
Hardfork::Muirglacier,
|
||||
Hardfork::Berlin,
|
||||
Hardfork::London,
|
||||
Hardfork::ArrowGlacier,
|
||||
Hardfork::GrayGlacier,
|
||||
]
|
||||
}
|
||||
|
||||
/// This returns all known hardfork block numbers as a vector.
|
||||
pub fn all_fork_blocks() -> Vec<BlockNumber> {
|
||||
Hardfork::all_forks().iter().map(|f| f.fork_block()).collect()
|
||||
}
|
||||
|
||||
/// Creates a [`ForkFilter`](crate::ForkFilter) for the given hardfork.
|
||||
/// This assumes the current hardfork's block number is the current head and uses all known
|
||||
/// future hardforks to initialize the filter.
|
||||
pub fn fork_filter(&self) -> ForkFilter {
|
||||
let all_forks = Hardfork::all_forks();
|
||||
let future_forks: Vec<BlockNumber> = all_forks
|
||||
.iter()
|
||||
.filter(|f| f.fork_block() > self.fork_block())
|
||||
.map(|f| f.fork_block())
|
||||
.collect();
|
||||
|
||||
// this data structure is not chain-agnostic, so we can pass in the constant mainnet
|
||||
// genesis
|
||||
ForkFilter::new(self.fork_block(), MAINNET_GENESIS, future_forks)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Hardfork {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let s = s.to_lowercase();
|
||||
let hardfork = match s.as_str() {
|
||||
"frontier" | "1" => Hardfork::Frontier,
|
||||
"homestead" | "2" => Hardfork::Homestead,
|
||||
"dao" | "3" => Hardfork::Dao,
|
||||
"tangerine" | "4" => Hardfork::Tangerine,
|
||||
"spuriousdragon" | "5" => Hardfork::SpuriousDragon,
|
||||
"byzantium" | "6" => Hardfork::Byzantium,
|
||||
"constantinople" | "7" => Hardfork::Constantinople,
|
||||
"petersburg" | "8" => Hardfork::Petersburg,
|
||||
"istanbul" | "9" => Hardfork::Istanbul,
|
||||
"muirglacier" | "10" => Hardfork::Muirglacier,
|
||||
"berlin" | "11" => Hardfork::Berlin,
|
||||
"london" | "12" => Hardfork::London,
|
||||
"arrowglacier" | "13" => Hardfork::ArrowGlacier,
|
||||
"grayglacier" => Hardfork::GrayGlacier,
|
||||
"latest" | "14" => Hardfork::Latest,
|
||||
_ => return Err(format!("Unknown hardfork {s}")),
|
||||
};
|
||||
Ok(hardfork)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Hardfork {
|
||||
fn default() -> Self {
|
||||
Hardfork::Latest
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BlockNumber> for Hardfork {
|
||||
fn from(num: BlockNumber) -> Hardfork {
|
||||
match num {
|
||||
_i if num < 1_150_000 => Hardfork::Frontier,
|
||||
_i if num < 1_920_000 => Hardfork::Dao,
|
||||
_i if num < 2_463_000 => Hardfork::Homestead,
|
||||
_i if num < 2_675_000 => Hardfork::Tangerine,
|
||||
_i if num < 4_370_000 => Hardfork::SpuriousDragon,
|
||||
_i if num < 7_280_000 => Hardfork::Byzantium,
|
||||
_i if num < 9_069_000 => Hardfork::Constantinople,
|
||||
_i if num < 9_200_000 => Hardfork::Istanbul,
|
||||
_i if num < 12_244_000 => Hardfork::Muirglacier,
|
||||
_i if num < 12_965_000 => Hardfork::Berlin,
|
||||
_i if num < 13_773_000 => Hardfork::London,
|
||||
_i if num < 15_050_000 => Hardfork::ArrowGlacier,
|
||||
|
||||
_ => Hardfork::Latest,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{forkid::ForkHash, hardfork::Hardfork};
|
||||
use crc::crc32;
|
||||
|
||||
#[test]
|
||||
fn test_hardfork_blocks() {
|
||||
let hf: Hardfork = 12_965_000u64.into();
|
||||
assert_eq!(hf, Hardfork::London);
|
||||
|
||||
let hf: Hardfork = 4370000u64.into();
|
||||
assert_eq!(hf, Hardfork::Byzantium);
|
||||
|
||||
let hf: Hardfork = 12244000u64.into();
|
||||
assert_eq!(hf, Hardfork::Berlin);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// this test checks that the fork hash assigned to forks accurately map to the fork_id method
|
||||
fn test_forkhash_from_fork_blocks() {
|
||||
// set the genesis hash
|
||||
let genesis =
|
||||
hex::decode("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
|
||||
.unwrap();
|
||||
|
||||
// set the frontier forkhash
|
||||
let mut curr_forkhash = ForkHash(crc32::checksum_ieee(&genesis[..]).to_be_bytes());
|
||||
|
||||
// now we go through enum members
|
||||
let frontier_forkid = Hardfork::Frontier.fork_id();
|
||||
assert_eq!(curr_forkhash, frontier_forkid.hash);
|
||||
|
||||
// list of the above hardforks
|
||||
let hardforks = Hardfork::all_forks();
|
||||
|
||||
// check that the curr_forkhash we compute matches the output of each fork_id returned
|
||||
for hardfork in hardforks {
|
||||
curr_forkhash += hardfork.fork_block();
|
||||
assert_eq!(curr_forkhash, hardfork.fork_id().hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,7 @@
|
||||
use crate::{BlockHash, BlockNumber, Bloom, H160, H256, U256};
|
||||
use crate::{
|
||||
proofs::{EMPTY_LIST_HASH, EMPTY_ROOT},
|
||||
BlockHash, BlockNumber, Bloom, H160, H256, U256,
|
||||
};
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use ethers_core::{types::H64, utils::keccak256};
|
||||
use reth_codecs::main_codec;
|
||||
@@ -7,7 +10,7 @@ use std::ops::Deref;
|
||||
|
||||
/// Block header
|
||||
#[main_codec]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default, Hash)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Header {
|
||||
/// The Keccak 256-bit hash of the parent
|
||||
/// block’s header, in its entirety; formally Hp.
|
||||
@@ -64,6 +67,29 @@ pub struct Header {
|
||||
pub base_fee_per_gas: Option<u64>,
|
||||
}
|
||||
|
||||
impl Default for Header {
|
||||
fn default() -> Self {
|
||||
Header {
|
||||
parent_hash: Default::default(),
|
||||
ommers_hash: EMPTY_LIST_HASH,
|
||||
beneficiary: Default::default(),
|
||||
state_root: EMPTY_ROOT,
|
||||
transactions_root: EMPTY_ROOT,
|
||||
receipts_root: EMPTY_ROOT,
|
||||
logs_bloom: Default::default(),
|
||||
difficulty: Default::default(),
|
||||
number: 0,
|
||||
gas_limit: 0,
|
||||
gas_used: 0,
|
||||
timestamp: 0,
|
||||
extra_data: Default::default(),
|
||||
mix_hash: Default::default(),
|
||||
nonce: 0,
|
||||
base_fee_per_gas: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Header {
|
||||
/// Heavy function that will calculate hash of data and will *not* save the change to metadata.
|
||||
/// Use [`Header::seal`], [`SealedHeader`] and unlock if you need hash to be persistent.
|
||||
@@ -239,6 +265,10 @@ mod tests {
|
||||
gas_used: 0x15b3_u64,
|
||||
timestamp: 0x1a0a_u64,
|
||||
extra_data: Bytes::from_str("7788").unwrap().0,
|
||||
ommers_hash: H256::zero(),
|
||||
state_root: H256::zero(),
|
||||
transactions_root: H256::zero(),
|
||||
receipts_root: H256::zero(),
|
||||
..Default::default()
|
||||
};
|
||||
let mut data = vec![];
|
||||
@@ -285,6 +315,10 @@ mod tests {
|
||||
gas_used: 0x15b3u64,
|
||||
timestamp: 0x1a0au64,
|
||||
extra_data: Bytes::from_str("7788").unwrap().0,
|
||||
ommers_hash: H256::zero(),
|
||||
state_root: H256::zero(),
|
||||
transactions_root: H256::zero(),
|
||||
receipts_root: H256::zero(),
|
||||
..Default::default()
|
||||
};
|
||||
let header = <Header as Decodable>::decode(&mut data.as_slice()).unwrap();
|
||||
|
||||
@@ -6,11 +6,16 @@
|
||||
))]
|
||||
|
||||
//! Commonly used types in reth.
|
||||
//!
|
||||
//! This crate contains Ethereum primitive types and helper functions.
|
||||
|
||||
mod account;
|
||||
mod block;
|
||||
mod chain;
|
||||
mod constants;
|
||||
mod error;
|
||||
mod forkid;
|
||||
mod hardfork;
|
||||
mod header;
|
||||
mod hex_bytes;
|
||||
mod integer_list;
|
||||
@@ -20,9 +25,15 @@ mod receipt;
|
||||
mod storage;
|
||||
mod transaction;
|
||||
|
||||
/// Helper function for calculating Merkle proofs and hashes
|
||||
pub mod proofs;
|
||||
|
||||
pub use account::Account;
|
||||
pub use block::{Block, BlockLocked};
|
||||
pub use block::{Block, BlockHashOrNumber, BlockLocked};
|
||||
pub use chain::Chain;
|
||||
pub use constants::MAINNET_GENESIS;
|
||||
pub use forkid::{ForkFilter, ForkHash, ForkId, ValidationError};
|
||||
pub use hardfork::Hardfork;
|
||||
pub use header::{Header, SealedHeader};
|
||||
pub use hex_bytes::Bytes;
|
||||
pub use integer_list::IntegerList;
|
||||
@@ -31,31 +42,36 @@ pub use log::Log;
|
||||
pub use receipt::Receipt;
|
||||
pub use storage::StorageEntry;
|
||||
pub use transaction::{
|
||||
AccessList, AccessListItem, Signature, Transaction, TransactionKind, TransactionSigned,
|
||||
TransactionSignedEcRecovered, TxType,
|
||||
AccessList, AccessListItem, FromRecoveredTransaction, IntoRecoveredTransaction, Signature,
|
||||
Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxType,
|
||||
};
|
||||
|
||||
/// Block hash.
|
||||
/// A block hash.
|
||||
pub type BlockHash = H256;
|
||||
/// Block Number is height of chain
|
||||
/// A block number.
|
||||
pub type BlockNumber = u64;
|
||||
/// Ethereum address
|
||||
/// An Ethereum address.
|
||||
pub type Address = H160;
|
||||
// TODO(onbjerg): Is this not the same as [BlockHash]?
|
||||
/// BlockId is Keccak hash of the header
|
||||
pub type BlockID = H256;
|
||||
/// TxHash is Kecack hash of rlp encoded signed transaction
|
||||
/// A transaction hash is a kecack hash of an RLP encoded signed transaction.
|
||||
pub type TxHash = H256;
|
||||
/// TxNumber is sequence number of all existing transactions
|
||||
/// The sequence number of all existing transactions.
|
||||
pub type TxNumber = u64;
|
||||
/// Chain identifier type, introduced in EIP-155
|
||||
/// Chain identifier type (introduced in EIP-155).
|
||||
pub type ChainId = u64;
|
||||
|
||||
/// Storage Key
|
||||
/// An account storage key.
|
||||
pub type StorageKey = H256;
|
||||
|
||||
/// Storage value
|
||||
/// An account storage value.
|
||||
pub type StorageValue = U256;
|
||||
|
||||
// TODO: should we use `PublicKey` for this? Even when dealing with public keys we should try to
|
||||
// prevent misuse
|
||||
/// This represents an uncompressed secp256k1 public key.
|
||||
/// This encodes the concatenation of the x and y components of the affine point in bytes.
|
||||
pub type PeerId = H512;
|
||||
|
||||
pub use ethers_core::{
|
||||
types as rpc,
|
||||
types::{BigEndianHash, Bloom, H128, H160, H256, H512, H64, U128, U256, U64},
|
||||
|
||||
@@ -1,26 +1,38 @@
|
||||
use crate::{keccak256, Bytes, Header, Log, Receipt, TransactionSigned, H256};
|
||||
use ethers_core::utils::rlp::RlpStream;
|
||||
use hash_db::Hasher;
|
||||
use hex_literal::hex;
|
||||
use plain_hasher::PlainHasher;
|
||||
use reth_primitives::{Bytes, Header, Log, Receipt, TransactionSigned, H256};
|
||||
use reth_rlp::Encodable;
|
||||
use rlp::RlpStream;
|
||||
use sha3::{Digest, Keccak256};
|
||||
use triehash::sec_trie_root;
|
||||
|
||||
/// Keccak-256 hash of the RLP of an empty list, KEC("\xc0").
|
||||
pub const EMPTY_LIST_HASH: H256 =
|
||||
H256(hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"));
|
||||
|
||||
/// Root hash of an empty trie.
|
||||
pub const EMPTY_ROOT: H256 =
|
||||
H256(hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"));
|
||||
|
||||
/// A [Hasher] that calculates a keccak256 hash of the given data.
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||
struct KeccakHasher;
|
||||
|
||||
impl Hasher for KeccakHasher {
|
||||
type Out = H256;
|
||||
type StdHasher = PlainHasher;
|
||||
|
||||
const LENGTH: usize = 32;
|
||||
|
||||
fn hash(x: &[u8]) -> Self::Out {
|
||||
let out = Keccak256::digest(x);
|
||||
// TODO make more performant, H256 from slice is not good enought.
|
||||
H256::from_slice(out.as_slice())
|
||||
keccak256(x)
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate Transaction root. Iterate over transaction and create merkle trie of
|
||||
/// (rlp(index),encoded(tx)) pairs.
|
||||
/// Calculate a transaction root.
|
||||
///
|
||||
/// Iterates over the given transactions and the merkle merkle trie root of
|
||||
/// `(rlp(index), encoded(tx))` pairs.
|
||||
pub fn calculate_transaction_root<'a>(
|
||||
transactions: impl IntoIterator<Item = &'a TransactionSigned>,
|
||||
) -> H256 {
|
||||
@@ -40,7 +52,7 @@ pub fn calculate_transaction_root<'a>(
|
||||
)
|
||||
}
|
||||
|
||||
/// Create receipt root for header
|
||||
/// Calculates the receipt root for a header.
|
||||
pub fn calculate_receipt_root<'a>(receipts: impl IntoIterator<Item = &'a Receipt>) -> H256 {
|
||||
sec_trie_root::<KeccakHasher, _, _, _>(
|
||||
receipts
|
||||
@@ -57,7 +69,7 @@ pub fn calculate_receipt_root<'a>(receipts: impl IntoIterator<Item = &'a Receipt
|
||||
)
|
||||
}
|
||||
|
||||
/// Create log hash for header
|
||||
/// Calculates the log root for a header.
|
||||
pub fn calculate_log_root<'a>(logs: impl IntoIterator<Item = &'a Log>) -> H256 {
|
||||
//https://github.com/ethereum/go-ethereum/blob/356bbe343a30789e77bb38f25983c8f2f2bfbb47/cmd/evm/internal/t8ntool/execution.go#L255
|
||||
let mut stream = RlpStream::new();
|
||||
@@ -71,11 +83,10 @@ pub fn calculate_log_root<'a>(logs: impl IntoIterator<Item = &'a Log>) -> H256 {
|
||||
stream.finalize_unbounded_list();
|
||||
let out = stream.out().freeze();
|
||||
|
||||
let out = Keccak256::digest(out);
|
||||
H256::from_slice(out.as_slice())
|
||||
keccak256(out)
|
||||
}
|
||||
|
||||
/// Calculate hash for ommer/uncle headers
|
||||
/// Calculates the root hash for ommer/uncle headers.
|
||||
pub fn calculate_ommers_root<'a>(_ommers: impl IntoIterator<Item = &'a Header>) -> H256 {
|
||||
// RLP Encode
|
||||
let mut stream = RlpStream::new();
|
||||
@@ -87,8 +98,7 @@ pub fn calculate_ommers_root<'a>(_ommers: impl IntoIterator<Item = &'a Header>)
|
||||
*/
|
||||
stream.finalize_unbounded_list();
|
||||
let bytes = stream.out().freeze();
|
||||
let out = Keccak256::digest(bytes);
|
||||
H256::from_slice(out.as_slice())
|
||||
keccak256(bytes)
|
||||
}
|
||||
|
||||
// TODO state root
|
||||
@@ -1,8 +1,3 @@
|
||||
mod access_list;
|
||||
mod signature;
|
||||
mod tx_type;
|
||||
mod util;
|
||||
|
||||
use crate::{Address, Bytes, ChainId, TxHash, H256, U256};
|
||||
pub use access_list::{AccessList, AccessListItem};
|
||||
use bytes::{Buf, BytesMut};
|
||||
@@ -13,12 +8,18 @@ use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header, EMPT
|
||||
pub use signature::Signature;
|
||||
pub use tx_type::TxType;
|
||||
|
||||
/// Raw Transaction.
|
||||
/// Transaction type is introduced in EIP-2718: https://eips.ethereum.org/EIPS/eip-2718
|
||||
mod access_list;
|
||||
mod signature;
|
||||
mod tx_type;
|
||||
mod util;
|
||||
|
||||
/// A raw transaction.
|
||||
///
|
||||
/// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718).
|
||||
#[main_codec]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum Transaction {
|
||||
/// Legacy transaciton.
|
||||
/// Legacy transaction.
|
||||
Legacy {
|
||||
/// Added as EIP-155: Simple replay attack protection
|
||||
chain_id: Option<ChainId>,
|
||||
@@ -49,7 +50,7 @@ pub enum Transaction {
|
||||
/// input data of the message call, formally Td.
|
||||
input: Bytes,
|
||||
},
|
||||
/// Transaction with AccessList. https://eips.ethereum.org/EIPS/eip-2930
|
||||
/// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)).
|
||||
Eip2930 {
|
||||
/// Added as EIP-155: Simple replay attack protection
|
||||
chain_id: ChainId,
|
||||
@@ -86,7 +87,7 @@ pub enum Transaction {
|
||||
/// accessing outside the list.
|
||||
access_list: AccessList,
|
||||
},
|
||||
/// Transaction with priority fee. https://eips.ethereum.org/EIPS/eip-1559
|
||||
/// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)).
|
||||
Eip1559 {
|
||||
/// Added as EIP-155: Simple replay attack protection
|
||||
chain_id: u64,
|
||||
@@ -175,6 +176,15 @@ impl Transaction {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the gas limit of the transaction.
|
||||
pub fn gas_limit(&self) -> u64 {
|
||||
match self {
|
||||
Transaction::Legacy { gas_limit, .. } |
|
||||
Transaction::Eip2930 { gas_limit, .. } |
|
||||
Transaction::Eip1559 { gas_limit, .. } => *gas_limit,
|
||||
}
|
||||
}
|
||||
|
||||
/// Max fee per gas for eip1559 transaction, for legacy transactions this is gas_limit
|
||||
pub fn max_fee_per_gas(&self) -> u64 {
|
||||
match self {
|
||||
@@ -561,6 +571,8 @@ impl TransactionSigned {
|
||||
}
|
||||
|
||||
/// Recover signer from signature and hash.
|
||||
///
|
||||
/// Returns `None` if the transaction's signature is invalid.
|
||||
pub fn recover_signer(&self) -> Option<Address> {
|
||||
let signature_hash = self.signature_hash();
|
||||
self.signature.recover_signer(signature_hash)
|
||||
@@ -697,10 +709,41 @@ impl TransactionSignedEcRecovered {
|
||||
}
|
||||
}
|
||||
|
||||
/// A transaction type that can be created from a [`TransactionSignedEcRecovered`] transaction.
|
||||
///
|
||||
/// This is a conversion trait that'll ensure transactions received via P2P can be converted to the
|
||||
/// transaction type that the transaction pool uses.
|
||||
pub trait FromRecoveredTransaction {
|
||||
/// Converts to this type from the given [`TransactionSignedEcRecovered`].
|
||||
fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self;
|
||||
}
|
||||
|
||||
// Noop conversion
|
||||
impl FromRecoveredTransaction for TransactionSignedEcRecovered {
|
||||
#[inline]
|
||||
fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self {
|
||||
tx
|
||||
}
|
||||
}
|
||||
|
||||
/// The inverse of [`FromRecoveredTransaction`] that ensure the transaction can be sent over the
|
||||
/// network
|
||||
pub trait IntoRecoveredTransaction {
|
||||
/// Converts to this type into a [`TransactionSignedEcRecovered`].
|
||||
///
|
||||
/// Note: this takes `&self` since indented usage is via `Arc<Self>`.
|
||||
fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered;
|
||||
}
|
||||
|
||||
impl IntoRecoveredTransaction for TransactionSignedEcRecovered {
|
||||
#[inline]
|
||||
fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::{
|
||||
transaction::{signature::Signature, TransactionKind},
|
||||
AccessList, Address, Bytes, Transaction, TransactionSigned, H256, U256,
|
||||
@@ -708,6 +751,7 @@ mod tests {
|
||||
use bytes::BytesMut;
|
||||
use ethers_core::utils::hex;
|
||||
use reth_rlp::{Decodable, Encodable};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_decode_create() {
|
||||
|
||||
@@ -18,10 +18,14 @@ tracing-futures = "0.2.5"
|
||||
tokio = { version = "1.21.2", features = ["sync"] }
|
||||
aquamarine = "0.1.12"
|
||||
metrics = "0.20.1"
|
||||
futures-util = "0.3.25"
|
||||
|
||||
[dev-dependencies]
|
||||
reth-db = { path = "../db", features = ["test-utils"] }
|
||||
reth-interfaces = { path = "../interfaces", features = ["test-utils"] }
|
||||
reth-bodies-downloaders = { path = "../net/bodies-downloaders" }
|
||||
# TODO(onbjerg): We only need this for [BlockBody]
|
||||
reth-eth-wire = { path = "../net/eth-wire" }
|
||||
reth-headers-downloaders = { path = "../net/headers-downloaders" }
|
||||
tokio = { version = "*", features = ["rt", "sync", "macros"] }
|
||||
tokio-stream = "0.1.10"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::pipeline::PipelineEvent;
|
||||
use reth_interfaces::db::Error as DbError;
|
||||
use reth_interfaces::{consensus, db::Error as DbError};
|
||||
use reth_primitives::{BlockNumber, H256};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::mpsc::error::SendError;
|
||||
@@ -8,12 +8,13 @@ use tokio::sync::mpsc::error::SendError;
|
||||
#[derive(Error, Debug)]
|
||||
pub enum StageError {
|
||||
/// The stage encountered a state validation error.
|
||||
///
|
||||
/// TODO: This depends on the consensus engine and should include the validation failure reason
|
||||
#[error("Stage encountered a validation error in block {block}.")]
|
||||
#[error("Stage encountered a validation error in block {block}: {error}.")]
|
||||
Validation {
|
||||
/// The block that failed validation.
|
||||
block: BlockNumber,
|
||||
/// The underlying consensus error.
|
||||
#[source]
|
||||
error: consensus::Error,
|
||||
},
|
||||
/// The stage encountered a database error.
|
||||
#[error("An internal database error occurred.")]
|
||||
@@ -30,34 +31,41 @@ pub enum StageError {
|
||||
/// The sender stage error
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DatabaseIntegrityError {
|
||||
/// Cannonical hash is missing from db
|
||||
#[error("no cannonical hash for block #{number}")]
|
||||
CannonicalHash {
|
||||
// TODO(onbjerg): What's the difference between this and the one below?
|
||||
/// The canonical hash for a block is missing from the database.
|
||||
#[error("No canonical hash for block #{number}")]
|
||||
CanonicalHash {
|
||||
/// The block number key
|
||||
number: BlockNumber,
|
||||
},
|
||||
/// Cannonical header is missing from db
|
||||
#[error("no cannonical hash for block #{number}")]
|
||||
CannonicalHeader {
|
||||
/// The canonical header for a block is missing from the database.
|
||||
#[error("No canonical hash for block #{number}")]
|
||||
CanonicalHeader {
|
||||
/// The block number key
|
||||
number: BlockNumber,
|
||||
},
|
||||
/// Header is missing from db
|
||||
#[error("no header for block #{number} ({hash})")]
|
||||
/// A header is missing from the database.
|
||||
#[error("No header for block #{number} ({hash})")]
|
||||
Header {
|
||||
/// The block number key
|
||||
number: BlockNumber,
|
||||
/// The block hash key
|
||||
hash: H256,
|
||||
},
|
||||
/// Cumulative transaction count is missing from db
|
||||
#[error("no cumulative tx count for ${number} ({hash})")]
|
||||
/// The cumulative transaction count is missing from the database.
|
||||
#[error("No cumulative tx count for ${number} ({hash})")]
|
||||
CumulativeTxCount {
|
||||
/// The block number key
|
||||
number: BlockNumber,
|
||||
/// The block hash key
|
||||
hash: H256,
|
||||
},
|
||||
/// A block body is missing.
|
||||
#[error("Block body not found for block #{number}")]
|
||||
BlockBody {
|
||||
/// The block number key
|
||||
number: BlockNumber,
|
||||
},
|
||||
}
|
||||
|
||||
/// A pipeline execution error.
|
||||
|
||||
@@ -341,8 +341,8 @@ impl<DB: Database> QueuedStage<DB> {
|
||||
Err(err) => {
|
||||
state.events_sender.send(PipelineEvent::Error { stage_id }).await?;
|
||||
|
||||
return if let StageError::Validation { block } = err {
|
||||
debug!(stage = %stage_id, bad_block = %block, "Stage encountered a validation error.");
|
||||
return if let StageError::Validation { block, error } = err {
|
||||
debug!(stage = %stage_id, bad_block = %block, "Stage encountered a validation error: {error}");
|
||||
|
||||
// We unwind because of a validation error. If the unwind itself fails,
|
||||
// we bail entirely, otherwise we restart the execution loop from the
|
||||
@@ -362,13 +362,13 @@ impl<DB: Database> QueuedStage<DB> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::{StageId, UnwindOutput};
|
||||
use reth_db::{
|
||||
kv::{test_utils, Env, EnvKind},
|
||||
mdbx::{self, WriteMap},
|
||||
};
|
||||
use reth_interfaces::consensus;
|
||||
use tokio::sync::mpsc::channel;
|
||||
use tokio_stream::{wrappers::ReceiverStream, StreamExt};
|
||||
use utils::TestStage;
|
||||
@@ -520,7 +520,10 @@ mod tests {
|
||||
)
|
||||
.push(
|
||||
TestStage::new(StageId("B"))
|
||||
.add_exec(Err(StageError::Validation { block: 5 }))
|
||||
.add_exec(Err(StageError::Validation {
|
||||
block: 5,
|
||||
error: consensus::Error::BaseFeeMissing,
|
||||
}))
|
||||
.add_unwind(Ok(UnwindOutput { stage_progress: 0 }))
|
||||
.add_exec(Ok(ExecOutput {
|
||||
stage_progress: 10,
|
||||
|
||||
835
crates/stages/src/stages/bodies.rs
Normal file
835
crates/stages/src/stages/bodies.rs
Normal file
@@ -0,0 +1,835 @@
|
||||
use crate::{
|
||||
DatabaseIntegrityError, ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput,
|
||||
UnwindOutput,
|
||||
};
|
||||
use futures_util::TryStreamExt;
|
||||
use reth_interfaces::{
|
||||
consensus::Consensus,
|
||||
db::{
|
||||
models::StoredBlockBody, tables, DBContainer, Database, DatabaseGAT, DbCursorRO,
|
||||
DbCursorRW, DbTx, DbTxMut,
|
||||
},
|
||||
p2p::bodies::downloader::BodyDownloader,
|
||||
};
|
||||
use reth_primitives::{
|
||||
proofs::{EMPTY_LIST_HASH, EMPTY_ROOT},
|
||||
BlockLocked, BlockNumber, SealedHeader, H256,
|
||||
};
|
||||
use std::fmt::Debug;
|
||||
use tracing::warn;
|
||||
|
||||
const BODIES: StageId = StageId("Bodies");
|
||||
|
||||
// TODO(onbjerg): Metrics and events (gradual status for e.g. CLI)
|
||||
/// The body stage downloads block bodies.
|
||||
///
|
||||
/// The body stage downloads block bodies for all block headers stored locally in the database.
|
||||
///
|
||||
/// # Empty blocks
|
||||
///
|
||||
/// Blocks with an ommers hash corresponding to no ommers *and* a transaction root corresponding to
|
||||
/// no transactions will not have a block body downloaded for them, since it would be meaningless to
|
||||
/// do so.
|
||||
///
|
||||
/// This also means that if there is no body for the block in the database (assuming the
|
||||
/// block number <= the synced block of this stage), then the block can be considered empty.
|
||||
///
|
||||
/// # Tables
|
||||
///
|
||||
/// The bodies are processed and data is inserted into these tables:
|
||||
///
|
||||
/// - [`BlockBodies`][reth_interfaces::db::tables::BlockBodies]
|
||||
/// - [`Transactions`][reth_interfaces::db::tables::Transactions]
|
||||
///
|
||||
/// # Genesis
|
||||
///
|
||||
/// This stage expects that the genesis has been inserted into the appropriate tables:
|
||||
///
|
||||
/// - The header tables (see [HeadersStage][crate::stages::headers::HeadersStage])
|
||||
/// - The various indexes (e.g. [TotalTxIndex][crate::stages::tx_index::TxIndex])
|
||||
/// - The [`BlockBodies`][reth_interfaces::db::tables::BlockBodies] table
|
||||
#[derive(Debug)]
|
||||
pub struct BodyStage<D: BodyDownloader, C: Consensus> {
|
||||
/// The body downloader.
|
||||
pub downloader: D,
|
||||
/// The consensus engine.
|
||||
pub consensus: C,
|
||||
/// The maximum amount of block bodies to process in one stage execution.
|
||||
///
|
||||
/// Smaller batch sizes result in less memory usage, but more disk I/O. Larger batch sizes
|
||||
/// result in more memory usage, less disk I/O, and more infrequent checkpoints.
|
||||
pub batch_size: u64,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<DB: Database, D: BodyDownloader, C: Consensus> Stage<DB> for BodyStage<D, C> {
|
||||
/// Return the id of the stage
|
||||
fn id(&self) -> StageId {
|
||||
BODIES
|
||||
}
|
||||
|
||||
/// Download block bodies from the last checkpoint for this stage up until the latest synced
|
||||
/// header, limited by the stage's batch size.
|
||||
async fn execute(
|
||||
&mut self,
|
||||
db: &mut DBContainer<'_, DB>,
|
||||
input: ExecInput,
|
||||
) -> Result<ExecOutput, StageError> {
|
||||
let tx = db.get_mut();
|
||||
|
||||
let previous_stage_progress =
|
||||
input.previous_stage.as_ref().map(|(_, block)| *block).unwrap_or_default();
|
||||
if previous_stage_progress == 0 {
|
||||
warn!("The body stage seems to be running first, no work can be completed.");
|
||||
}
|
||||
|
||||
// The block we ended at last sync, and the one we are starting on now
|
||||
let previous_block = input.stage_progress.unwrap_or_default();
|
||||
let starting_block = previous_block + 1;
|
||||
|
||||
// Short circuit in case we already reached the target block
|
||||
let target = previous_stage_progress.min(starting_block + self.batch_size);
|
||||
if target <= previous_block {
|
||||
return Ok(ExecOutput { stage_progress: target, reached_tip: true, done: true })
|
||||
}
|
||||
|
||||
let bodies_to_download = self.bodies_to_download::<DB>(tx, starting_block, target)?;
|
||||
|
||||
// Cursors used to write bodies and transactions
|
||||
let mut bodies_cursor = tx.cursor_mut::<tables::BlockBodies>()?;
|
||||
let mut tx_cursor = tx.cursor_mut::<tables::Transactions>()?;
|
||||
let mut base_tx_id = bodies_cursor
|
||||
.last()?
|
||||
.map(|(_, body)| body.base_tx_id + body.tx_amount)
|
||||
.ok_or(DatabaseIntegrityError::BlockBody { number: starting_block })?;
|
||||
|
||||
// Cursor used to look up headers for block pre-validation
|
||||
let mut header_cursor = tx.cursor::<tables::Headers>()?;
|
||||
|
||||
// NOTE(onbjerg): The stream needs to live here otherwise it will just create a new iterator
|
||||
// on every iteration of the while loop -_-
|
||||
let mut bodies_stream = self.downloader.bodies_stream(bodies_to_download.iter());
|
||||
let mut highest_block = previous_block;
|
||||
while let Some((block_number, header_hash, body)) =
|
||||
bodies_stream.try_next().await.map_err(|err| StageError::Internal(err.into()))?
|
||||
{
|
||||
// Fetch the block header for pre-validation
|
||||
let block = BlockLocked {
|
||||
header: SealedHeader::new(
|
||||
header_cursor
|
||||
.seek_exact((block_number, header_hash).into())?
|
||||
.ok_or(DatabaseIntegrityError::Header {
|
||||
number: block_number,
|
||||
hash: header_hash,
|
||||
})?
|
||||
.1,
|
||||
header_hash,
|
||||
),
|
||||
body: body.transactions,
|
||||
// TODO: We should have a type w/o receipts probably, no reason to allocate here
|
||||
receipts: vec![],
|
||||
ommers: body.ommers.into_iter().map(|header| header.seal()).collect(),
|
||||
};
|
||||
|
||||
// Pre-validate the block and unwind if it is invalid
|
||||
self.consensus
|
||||
.pre_validate_block(&block)
|
||||
.map_err(|err| StageError::Validation { block: block_number, error: err })?;
|
||||
|
||||
// Write block
|
||||
bodies_cursor.append(
|
||||
(block_number, header_hash).into(),
|
||||
StoredBlockBody {
|
||||
base_tx_id,
|
||||
tx_amount: block.body.len() as u64,
|
||||
ommers: block.ommers.into_iter().map(|header| header.unseal()).collect(),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Write transactions
|
||||
for transaction in block.body {
|
||||
tx_cursor.append(base_tx_id, transaction)?;
|
||||
base_tx_id += 1;
|
||||
}
|
||||
|
||||
highest_block = block_number;
|
||||
}
|
||||
|
||||
// The stage is "done" if:
|
||||
// - We got fewer blocks than our target
|
||||
// - We reached our target and the target was not limited by the batch size of the stage
|
||||
let capped = target < previous_stage_progress;
|
||||
let done = highest_block < target || !capped;
|
||||
|
||||
Ok(ExecOutput { stage_progress: highest_block, reached_tip: true, done })
|
||||
}
|
||||
|
||||
/// Unwind the stage.
|
||||
async fn unwind(
|
||||
&mut self,
|
||||
db: &mut DBContainer<'_, DB>,
|
||||
input: UnwindInput,
|
||||
) -> Result<UnwindOutput, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let tx = db.get_mut();
|
||||
let mut block_body_cursor = tx.cursor_mut::<tables::BlockBodies>()?;
|
||||
let mut transaction_cursor = tx.cursor_mut::<tables::Transactions>()?;
|
||||
|
||||
let mut entry = block_body_cursor.last()?;
|
||||
while let Some((key, body)) = entry {
|
||||
if key.number() <= input.unwind_to {
|
||||
break
|
||||
}
|
||||
|
||||
for num in 0..body.tx_amount {
|
||||
let tx_id = body.base_tx_id + num;
|
||||
if transaction_cursor.seek_exact(tx_id)?.is_some() {
|
||||
transaction_cursor.delete_current()?;
|
||||
}
|
||||
}
|
||||
|
||||
block_body_cursor.delete_current()?;
|
||||
entry = block_body_cursor.prev()?;
|
||||
}
|
||||
|
||||
Ok(UnwindOutput { stage_progress: input.unwind_to })
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: BodyDownloader, C: Consensus> BodyStage<D, C> {
|
||||
/// Computes a list of `(block_number, header_hash)` for blocks that we need to download bodies
|
||||
/// for.
|
||||
///
|
||||
/// This skips empty blocks (i.e. no ommers, no transactions).
|
||||
fn bodies_to_download<DB: Database>(
|
||||
&self,
|
||||
tx: &mut <DB as DatabaseGAT<'_>>::TXMut,
|
||||
starting_block: BlockNumber,
|
||||
target: BlockNumber,
|
||||
) -> Result<Vec<(BlockNumber, H256)>, StageError> {
|
||||
let mut header_cursor = tx.cursor::<tables::Headers>()?;
|
||||
let mut header_hashes_cursor = tx.cursor::<tables::CanonicalHeaders>()?;
|
||||
let mut walker = header_hashes_cursor
|
||||
.walk(starting_block)?
|
||||
.take_while(|item| item.as_ref().map_or(false, |(num, _)| *num <= target));
|
||||
|
||||
let mut bodies_to_download = Vec::new();
|
||||
while let Some(Ok((block_number, header_hash))) = walker.next() {
|
||||
let header = header_cursor
|
||||
.seek_exact((block_number, header_hash).into())?
|
||||
.ok_or(DatabaseIntegrityError::Header { number: block_number, hash: header_hash })?
|
||||
.1;
|
||||
if header.ommers_hash == EMPTY_LIST_HASH && header.transactions_root == EMPTY_ROOT {
|
||||
continue
|
||||
}
|
||||
|
||||
bodies_to_download.push((block_number, header_hash));
|
||||
}
|
||||
|
||||
Ok(bodies_to_download)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::util::test_utils::StageTestRunner;
|
||||
use assert_matches::assert_matches;
|
||||
use reth_eth_wire::BlockBody;
|
||||
use reth_interfaces::{
|
||||
consensus,
|
||||
p2p::bodies::error::DownloadError,
|
||||
test_utils::generators::{random_block, random_block_range},
|
||||
};
|
||||
use reth_primitives::{BlockNumber, H256};
|
||||
use std::collections::HashMap;
|
||||
use test_utils::*;
|
||||
|
||||
/// Check that the execution is short-circuited if the database is empty.
|
||||
#[tokio::test]
|
||||
async fn empty_db() {
|
||||
let runner = BodyTestRunner::new(TestBodyDownloader::default);
|
||||
let rx = runner.execute(ExecInput::default());
|
||||
assert_matches!(
|
||||
rx.await.unwrap(),
|
||||
Ok(ExecOutput { stage_progress: 0, reached_tip: true, done: true })
|
||||
)
|
||||
}
|
||||
|
||||
/// Check that the execution is short-circuited if the target was already reached.
|
||||
#[tokio::test]
|
||||
async fn already_reached_target() {
|
||||
let runner = BodyTestRunner::new(TestBodyDownloader::default);
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), 100)),
|
||||
stage_progress: Some(100),
|
||||
});
|
||||
assert_matches!(
|
||||
rx.await.unwrap(),
|
||||
Ok(ExecOutput { stage_progress: 100, reached_tip: true, done: true })
|
||||
)
|
||||
}
|
||||
|
||||
/// Checks that the stage downloads at most `batch_size` blocks.
|
||||
#[tokio::test]
|
||||
async fn partial_body_download() {
|
||||
// Generate blocks
|
||||
let blocks = random_block_range(1..200, GENESIS_HASH);
|
||||
let bodies: HashMap<H256, Result<BlockBody, DownloadError>> =
|
||||
blocks.iter().map(body_by_hash).collect();
|
||||
let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone()));
|
||||
|
||||
// Set the batch size (max we sync per stage execution) to less than the number of blocks
|
||||
// the previous stage synced (10 vs 20)
|
||||
runner.set_batch_size(10);
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner
|
||||
.insert_headers(blocks.iter().map(|block| &block.header))
|
||||
.expect("Could not insert headers");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that we only synced around `batch_size` blocks even though the number of blocks
|
||||
// synced by the previous stage is higher
|
||||
let output = rx.await.unwrap();
|
||||
assert_matches!(
|
||||
output,
|
||||
Ok(ExecOutput { stage_progress, reached_tip: true, done: false }) if stage_progress < 200
|
||||
);
|
||||
runner
|
||||
.validate_db_blocks(output.unwrap().stage_progress)
|
||||
.expect("Written block data invalid");
|
||||
}
|
||||
|
||||
/// Same as [partial_body_download] except the `batch_size` is not hit.
|
||||
#[tokio::test]
|
||||
async fn full_body_download() {
|
||||
// Generate blocks #1-20
|
||||
let blocks = random_block_range(1..21, GENESIS_HASH);
|
||||
let bodies: HashMap<H256, Result<BlockBody, DownloadError>> =
|
||||
blocks.iter().map(body_by_hash).collect();
|
||||
let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone()));
|
||||
|
||||
// Set the batch size to more than what the previous stage synced (40 vs 20)
|
||||
runner.set_batch_size(40);
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner
|
||||
.insert_headers(blocks.iter().map(|block| &block.header))
|
||||
.expect("Could not insert headers");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that we synced all blocks successfully, even though our `batch_size` allows us to
|
||||
// sync more (if there were more headers)
|
||||
let output = rx.await.unwrap();
|
||||
assert_matches!(
|
||||
output,
|
||||
Ok(ExecOutput { stage_progress: 20, reached_tip: true, done: true })
|
||||
);
|
||||
runner
|
||||
.validate_db_blocks(output.unwrap().stage_progress)
|
||||
.expect("Written block data invalid");
|
||||
}
|
||||
|
||||
/// Same as [full_body_download] except we have made progress before
|
||||
#[tokio::test]
|
||||
async fn sync_from_previous_progress() {
|
||||
// Generate blocks #1-20
|
||||
let blocks = random_block_range(1..21, GENESIS_HASH);
|
||||
let bodies: HashMap<H256, Result<BlockBody, DownloadError>> =
|
||||
blocks.iter().map(body_by_hash).collect();
|
||||
let runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone()));
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner
|
||||
.insert_headers(blocks.iter().map(|block| &block.header))
|
||||
.expect("Could not insert headers");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that we synced at least 10 blocks
|
||||
let first_run = rx.await.unwrap();
|
||||
assert_matches!(
|
||||
first_run,
|
||||
Ok(ExecOutput { stage_progress, reached_tip: true, done: false }) if stage_progress >= 10
|
||||
);
|
||||
let first_run_progress = first_run.unwrap().stage_progress;
|
||||
|
||||
// Execute again on top of the previous run
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: Some(first_run_progress),
|
||||
});
|
||||
|
||||
// Check that we synced more blocks
|
||||
let output = rx.await.unwrap();
|
||||
assert_matches!(
|
||||
output,
|
||||
Ok(ExecOutput { stage_progress, reached_tip: true, done: true }) if stage_progress > first_run_progress
|
||||
);
|
||||
runner
|
||||
.validate_db_blocks(output.unwrap().stage_progress)
|
||||
.expect("Written block data invalid");
|
||||
}
|
||||
|
||||
/// Checks that the stage asks to unwind if pre-validation of the block fails.
|
||||
#[tokio::test]
|
||||
async fn pre_validation_failure() {
|
||||
// Generate blocks #1-19
|
||||
let blocks = random_block_range(1..20, GENESIS_HASH);
|
||||
let bodies: HashMap<H256, Result<BlockBody, DownloadError>> =
|
||||
blocks.iter().map(body_by_hash).collect();
|
||||
let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone()));
|
||||
|
||||
// Fail validation
|
||||
runner.set_fail_validation(true);
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner
|
||||
.insert_headers(blocks.iter().map(|block| &block.header))
|
||||
.expect("Could not insert headers");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that the error bubbles up
|
||||
assert_matches!(
|
||||
rx.await.unwrap(),
|
||||
Err(StageError::Validation { block: 1, error: consensus::Error::BaseFeeMissing })
|
||||
);
|
||||
}
|
||||
|
||||
/// Checks that the stage unwinds correctly with no data.
|
||||
#[tokio::test]
|
||||
async fn unwind_empty_db() {
|
||||
let unwind_to = 10;
|
||||
let runner = BodyTestRunner::new(TestBodyDownloader::default);
|
||||
let rx = runner.unwind(UnwindInput { bad_block: None, stage_progress: 20, unwind_to });
|
||||
assert_matches!(
|
||||
rx.await.unwrap(),
|
||||
Ok(UnwindOutput { stage_progress }) if stage_progress == unwind_to
|
||||
)
|
||||
}
|
||||
|
||||
/// Checks that the stage unwinds correctly with data.
|
||||
#[tokio::test]
|
||||
async fn unwind() {
|
||||
// Generate blocks #1-20
|
||||
let blocks = random_block_range(1..21, GENESIS_HASH);
|
||||
let bodies: HashMap<H256, Result<BlockBody, DownloadError>> =
|
||||
blocks.iter().map(body_by_hash).collect();
|
||||
let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone()));
|
||||
|
||||
// Set the batch size to more than what the previous stage synced (40 vs 20)
|
||||
runner.set_batch_size(40);
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner
|
||||
.insert_headers(blocks.iter().map(|block| &block.header))
|
||||
.expect("Could not insert headers");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that we synced all blocks successfully, even though our `batch_size` allows us to
|
||||
// sync more (if there were more headers)
|
||||
let output = rx.await.unwrap();
|
||||
assert_matches!(
|
||||
output,
|
||||
Ok(ExecOutput { stage_progress: 20, reached_tip: true, done: true })
|
||||
);
|
||||
let stage_progress = output.unwrap().stage_progress;
|
||||
runner.validate_db_blocks(stage_progress).expect("Written block data invalid");
|
||||
|
||||
// Unwind all of it
|
||||
let unwind_to = 1;
|
||||
let rx = runner.unwind(UnwindInput { bad_block: None, stage_progress, unwind_to });
|
||||
assert_matches!(
|
||||
rx.await.unwrap(),
|
||||
Ok(UnwindOutput { stage_progress }) if stage_progress == 1
|
||||
);
|
||||
|
||||
let last_body = runner.last_body().expect("Could not read last body");
|
||||
let last_tx_id = last_body.base_tx_id + last_body.tx_amount;
|
||||
runner
|
||||
.db()
|
||||
.check_no_entry_above::<tables::BlockBodies, _>(unwind_to, |key| key.number())
|
||||
.expect("Did not unwind block bodies correctly.");
|
||||
runner
|
||||
.db()
|
||||
.check_no_entry_above::<tables::Transactions, _>(last_tx_id, |key| key)
|
||||
.expect("Did not unwind transactions correctly.")
|
||||
}
|
||||
|
||||
/// Checks that the stage unwinds correctly, even if a transaction in a block is missing.
|
||||
#[tokio::test]
|
||||
async fn unwind_missing_tx() {
|
||||
// Generate blocks #1-20
|
||||
let blocks = random_block_range(1..21, GENESIS_HASH);
|
||||
let bodies: HashMap<H256, Result<BlockBody, DownloadError>> =
|
||||
blocks.iter().map(body_by_hash).collect();
|
||||
let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone()));
|
||||
|
||||
// Set the batch size to more than what the previous stage synced (40 vs 20)
|
||||
runner.set_batch_size(40);
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner
|
||||
.insert_headers(blocks.iter().map(|block| &block.header))
|
||||
.expect("Could not insert headers");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that we synced all blocks successfully, even though our `batch_size` allows us to
|
||||
// sync more (if there were more headers)
|
||||
let output = rx.await.unwrap();
|
||||
assert_matches!(
|
||||
output,
|
||||
Ok(ExecOutput { stage_progress: 20, reached_tip: true, done: true })
|
||||
);
|
||||
let stage_progress = output.unwrap().stage_progress;
|
||||
runner.validate_db_blocks(stage_progress).expect("Written block data invalid");
|
||||
|
||||
// Delete a transaction
|
||||
{
|
||||
let mut db = runner.db().container();
|
||||
let mut tx_cursor = db
|
||||
.get_mut()
|
||||
.cursor_mut::<tables::Transactions>()
|
||||
.expect("Could not get transaction cursor");
|
||||
tx_cursor
|
||||
.last()
|
||||
.expect("Could not read database")
|
||||
.expect("Could not read last transaction");
|
||||
tx_cursor.delete_current().expect("Could not delete last transaction");
|
||||
db.commit().expect("Could not commit database");
|
||||
}
|
||||
|
||||
// Unwind all of it
|
||||
let unwind_to = 1;
|
||||
let rx = runner.unwind(UnwindInput { bad_block: None, stage_progress, unwind_to });
|
||||
assert_matches!(
|
||||
rx.await.unwrap(),
|
||||
Ok(UnwindOutput { stage_progress }) if stage_progress == 1
|
||||
);
|
||||
|
||||
let last_body = runner.last_body().expect("Could not read last body");
|
||||
let last_tx_id = last_body.base_tx_id + last_body.tx_amount;
|
||||
runner
|
||||
.db()
|
||||
.check_no_entry_above::<tables::BlockBodies, _>(unwind_to, |key| key.number())
|
||||
.expect("Did not unwind block bodies correctly.");
|
||||
runner
|
||||
.db()
|
||||
.check_no_entry_above::<tables::Transactions, _>(last_tx_id, |key| key)
|
||||
.expect("Did not unwind transactions correctly.")
|
||||
}
|
||||
|
||||
/// Checks that the stage exits if the downloader times out
|
||||
/// TODO: We should probably just exit as "OK", commit the blocks we downloaded successfully and
|
||||
/// try again?
|
||||
#[tokio::test]
|
||||
async fn downloader_timeout() {
|
||||
// Generate a header
|
||||
let header = random_block(1, Some(GENESIS_HASH)).header;
|
||||
let runner = BodyTestRunner::new(|| {
|
||||
TestBodyDownloader::new(HashMap::from([(
|
||||
header.hash(),
|
||||
Err(DownloadError::Timeout { header_hash: header.hash() }),
|
||||
)]))
|
||||
});
|
||||
|
||||
// Insert required state
|
||||
runner.insert_genesis().expect("Could not insert genesis block");
|
||||
runner.insert_header(&header).expect("Could not insert header");
|
||||
|
||||
// Run the stage
|
||||
let rx = runner.execute(ExecInput {
|
||||
previous_stage: Some((StageId("Headers"), 1)),
|
||||
stage_progress: None,
|
||||
});
|
||||
|
||||
// Check that the error bubbles up
|
||||
assert_matches!(rx.await.unwrap(), Err(StageError::Internal(_)));
|
||||
}
|
||||
|
||||
mod test_utils {
|
||||
use crate::{
|
||||
stages::bodies::BodyStage,
|
||||
util::test_utils::{StageTestDB, StageTestRunner},
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use async_trait::async_trait;
|
||||
use reth_eth_wire::BlockBody;
|
||||
use reth_interfaces::{
|
||||
db,
|
||||
db::{
|
||||
models::{BlockNumHash, StoredBlockBody},
|
||||
tables, DbCursorRO, DbTx, DbTxMut,
|
||||
},
|
||||
p2p::bodies::{
|
||||
client::BodiesClient,
|
||||
downloader::{BodiesStream, BodyDownloader},
|
||||
error::{BodiesClientError, DownloadError},
|
||||
},
|
||||
test_utils::TestConsensus,
|
||||
};
|
||||
use reth_primitives::{
|
||||
BigEndianHash, BlockLocked, BlockNumber, Header, SealedHeader, H256, U256,
|
||||
};
|
||||
use std::{collections::HashMap, ops::Deref, time::Duration};
|
||||
|
||||
/// The block hash of the genesis block.
|
||||
pub(crate) const GENESIS_HASH: H256 = H256::zero();
|
||||
|
||||
/// A helper to create a collection of resulted-wrapped block bodies keyed by their hash.
|
||||
pub(crate) fn body_by_hash(
|
||||
block: &BlockLocked,
|
||||
) -> (H256, Result<BlockBody, DownloadError>) {
|
||||
(
|
||||
block.hash(),
|
||||
Ok(BlockBody {
|
||||
transactions: block.body.clone(),
|
||||
ommers: block.ommers.iter().cloned().map(|ommer| ommer.unseal()).collect(),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// A helper struct for running the [BodyStage].
|
||||
pub(crate) struct BodyTestRunner<F>
|
||||
where
|
||||
F: Fn() -> TestBodyDownloader,
|
||||
{
|
||||
downloader_builder: F,
|
||||
db: StageTestDB,
|
||||
batch_size: u64,
|
||||
fail_validation: bool,
|
||||
}
|
||||
|
||||
impl<F> BodyTestRunner<F>
|
||||
where
|
||||
F: Fn() -> TestBodyDownloader,
|
||||
{
|
||||
/// Build a new test runner.
|
||||
pub(crate) fn new(downloader_builder: F) -> Self {
|
||||
BodyTestRunner {
|
||||
downloader_builder,
|
||||
db: StageTestDB::default(),
|
||||
batch_size: 10,
|
||||
fail_validation: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_batch_size(&mut self, batch_size: u64) {
|
||||
self.batch_size = batch_size;
|
||||
}
|
||||
|
||||
pub(crate) fn set_fail_validation(&mut self, fail_validation: bool) {
|
||||
self.fail_validation = fail_validation;
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> StageTestRunner for BodyTestRunner<F>
|
||||
where
|
||||
F: Fn() -> TestBodyDownloader,
|
||||
{
|
||||
type S = BodyStage<TestBodyDownloader, TestConsensus>;
|
||||
|
||||
fn db(&self) -> &StageTestDB {
|
||||
&self.db
|
||||
}
|
||||
|
||||
fn stage(&self) -> Self::S {
|
||||
let mut consensus = TestConsensus::default();
|
||||
consensus.set_fail_validation(self.fail_validation);
|
||||
|
||||
BodyStage {
|
||||
downloader: (self.downloader_builder)(),
|
||||
consensus,
|
||||
batch_size: self.batch_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> BodyTestRunner<F>
|
||||
where
|
||||
F: Fn() -> TestBodyDownloader,
|
||||
{
|
||||
/// Insert the genesis block into the appropriate tables
|
||||
///
|
||||
/// The genesis block always has no transactions and no ommers, and it always has the
|
||||
/// same hash.
|
||||
pub(crate) fn insert_genesis(&self) -> Result<(), db::Error> {
|
||||
self.insert_header(&SealedHeader::new(Header::default(), GENESIS_HASH))?;
|
||||
let mut db = self.db.container();
|
||||
let tx = db.get_mut();
|
||||
tx.put::<tables::BlockBodies>(
|
||||
(0, GENESIS_HASH).into(),
|
||||
StoredBlockBody { base_tx_id: 0, tx_amount: 0, ommers: vec![] },
|
||||
)?;
|
||||
db.commit()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Insert header into tables
|
||||
pub(crate) fn insert_header(&self, header: &SealedHeader) -> Result<(), db::Error> {
|
||||
self.insert_headers(std::iter::once(header))
|
||||
}
|
||||
|
||||
/// Insert headers into tables
|
||||
pub(crate) fn insert_headers<'a, I>(&self, headers: I) -> Result<(), db::Error>
|
||||
where
|
||||
I: Iterator<Item = &'a SealedHeader>,
|
||||
{
|
||||
let headers = headers.collect::<Vec<_>>();
|
||||
self.db
|
||||
.map_put::<tables::HeaderNumbers, _, _>(&headers, |h| (h.hash(), h.number))?;
|
||||
self.db.map_put::<tables::Headers, _, _>(&headers, |h| {
|
||||
(BlockNumHash((h.number, h.hash())), h.deref().clone().unseal())
|
||||
})?;
|
||||
self.db.map_put::<tables::CanonicalHeaders, _, _>(&headers, |h| {
|
||||
(h.number, h.hash())
|
||||
})?;
|
||||
|
||||
self.db.transform_append::<tables::HeaderTD, _, _>(&headers, |prev, h| {
|
||||
let prev_td = U256::from_big_endian(&prev.clone().unwrap_or_default());
|
||||
(
|
||||
BlockNumHash((h.number, h.hash())),
|
||||
H256::from_uint(&(prev_td + h.difficulty)).as_bytes().to_vec(),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn last_body(&self) -> Option<StoredBlockBody> {
|
||||
Some(
|
||||
self.db.container().get().cursor::<tables::BlockBodies>().ok()?.last().ok()??.1,
|
||||
)
|
||||
}
|
||||
|
||||
/// Validate that the inserted block data is valid
|
||||
pub(crate) fn validate_db_blocks(
|
||||
&self,
|
||||
highest_block: BlockNumber,
|
||||
) -> Result<(), db::Error> {
|
||||
let db = self.db.container();
|
||||
let tx = db.get();
|
||||
|
||||
let mut block_body_cursor = tx.cursor::<tables::BlockBodies>()?;
|
||||
let mut transaction_cursor = tx.cursor::<tables::Transactions>()?;
|
||||
|
||||
let mut entry = block_body_cursor.first()?;
|
||||
let mut prev_max_tx_id = 0;
|
||||
while let Some((key, body)) = entry {
|
||||
assert!(
|
||||
key.number() <= highest_block,
|
||||
"We wrote a block body outside of our synced range. Found block with number {}, highest block according to stage is {}",
|
||||
key.number(), highest_block
|
||||
);
|
||||
|
||||
assert!(prev_max_tx_id == body.base_tx_id, "Transaction IDs are malformed.");
|
||||
for num in 0..body.tx_amount {
|
||||
let tx_id = body.base_tx_id + num;
|
||||
assert_matches!(
|
||||
transaction_cursor.seek_exact(tx_id),
|
||||
Ok(Some(_)),
|
||||
"A transaction is missing."
|
||||
);
|
||||
}
|
||||
prev_max_tx_id = body.base_tx_id + body.tx_amount;
|
||||
entry = block_body_cursor.next()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(onbjerg): Move
|
||||
/// A [BodiesClient] that should not be called.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct NoopClient;
|
||||
|
||||
#[async_trait]
|
||||
impl BodiesClient for NoopClient {
|
||||
async fn get_block_body(&self, _: H256) -> Result<BlockBody, BodiesClientError> {
|
||||
panic!("Noop client should not be called")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(onbjerg): Move
|
||||
/// A [BodyDownloader] that is backed by an internal [HashMap] for testing.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct TestBodyDownloader {
|
||||
responses: HashMap<H256, Result<BlockBody, DownloadError>>,
|
||||
}
|
||||
|
||||
impl TestBodyDownloader {
|
||||
pub(crate) fn new(responses: HashMap<H256, Result<BlockBody, DownloadError>>) -> Self {
|
||||
Self { responses }
|
||||
}
|
||||
}
|
||||
|
||||
impl BodyDownloader for TestBodyDownloader {
|
||||
type Client = NoopClient;
|
||||
|
||||
fn timeout(&self) -> Duration {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn client(&self) -> &Self::Client {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn bodies_stream<'a, 'b, I>(&'a self, hashes: I) -> BodiesStream<'a>
|
||||
where
|
||||
I: IntoIterator<Item = &'b (BlockNumber, H256)>,
|
||||
<I as IntoIterator>::IntoIter: Send + 'b,
|
||||
'b: 'a,
|
||||
{
|
||||
Box::pin(futures_util::stream::iter(hashes.into_iter().map(
|
||||
|(block_number, hash)| {
|
||||
Ok((
|
||||
*block_number,
|
||||
*hash,
|
||||
self.responses
|
||||
.get(hash)
|
||||
.expect("Stage tried downloading a block we do not have.")
|
||||
.clone()?,
|
||||
))
|
||||
},
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,10 +9,7 @@ use reth_interfaces::{
|
||||
models::blocks::BlockNumHash, tables, DBContainer, Database, DatabaseGAT, DbCursorRO,
|
||||
DbCursorRW, DbTx, DbTxMut,
|
||||
},
|
||||
p2p::headers::{
|
||||
client::HeadersClient,
|
||||
downloader::{DownloadError, Downloader},
|
||||
},
|
||||
p2p::headers::{client::HeadersClient, downloader::HeaderDownloader, error::DownloadError},
|
||||
};
|
||||
use reth_primitives::{rpc::BigEndianHash, BlockNumber, SealedHeader, H256, U256};
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
@@ -20,9 +17,19 @@ use tracing::*;
|
||||
|
||||
const HEADERS: StageId = StageId("Headers");
|
||||
|
||||
/// The headers stage implementation for staged sync
|
||||
/// The headers stage.
|
||||
///
|
||||
/// The headers stage downloads all block headers from the highest block in the local database to
|
||||
/// the perceived highest block on the network.
|
||||
///
|
||||
/// The headers are processed and data is inserted into these tables:
|
||||
///
|
||||
/// - [`HeaderNumbers`][reth_interfaces::db::tables::HeaderNumbers]
|
||||
/// - [`Headers`][reth_interfaces::db::tables::Headers]
|
||||
/// - [`CanonicalHeaders`][reth_interfaces::db::tables::CanonicalHeaders]
|
||||
/// - [`HeaderTD`][reth_interfaces::db::tables::HeaderTD]
|
||||
#[derive(Debug)]
|
||||
pub struct HeaderStage<D: Downloader, C: Consensus, H: HeadersClient> {
|
||||
pub struct HeaderStage<D: HeaderDownloader, C: Consensus, H: HeadersClient> {
|
||||
/// Strategy for downloading the headers
|
||||
pub downloader: D,
|
||||
/// Consensus client implementation
|
||||
@@ -32,7 +39,7 @@ pub struct HeaderStage<D: Downloader, C: Consensus, H: HeadersClient> {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<DB: Database, D: Downloader, C: Consensus, H: HeadersClient> Stage<DB>
|
||||
impl<DB: Database, D: HeaderDownloader, C: Consensus, H: HeadersClient> Stage<DB>
|
||||
for HeaderStage<D, C, H>
|
||||
{
|
||||
/// Return the id of the stage
|
||||
@@ -55,7 +62,7 @@ impl<DB: Database, D: Downloader, C: Consensus, H: HeadersClient> Stage<DB>
|
||||
// TODO: handle input.max_block
|
||||
let last_hash = tx
|
||||
.get::<tables::CanonicalHeaders>(last_block_num)?
|
||||
.ok_or(DatabaseIntegrityError::CannonicalHash { number: last_block_num })?;
|
||||
.ok_or(DatabaseIntegrityError::CanonicalHash { number: last_block_num })?;
|
||||
let last_header =
|
||||
tx.get::<tables::Headers>((last_block_num, last_hash).into())?.ok_or({
|
||||
DatabaseIntegrityError::Header { number: last_block_num, hash: last_hash }
|
||||
@@ -81,14 +88,15 @@ impl<DB: Database, D: Downloader, C: Consensus, H: HeadersClient> Stage<DB>
|
||||
done: false,
|
||||
})
|
||||
}
|
||||
DownloadError::HeaderValidation { hash, details } => {
|
||||
warn!("validation error for header {hash}: {details}");
|
||||
return Err(StageError::Validation { block: last_block_num })
|
||||
DownloadError::HeaderValidation { hash, error } => {
|
||||
warn!("Validation error for header {hash}: {error}");
|
||||
return Err(StageError::Validation { block: last_block_num, error })
|
||||
}
|
||||
// TODO: this error is never propagated, clean up
|
||||
DownloadError::MismatchedHeaders { .. } => {
|
||||
return Err(StageError::Validation { block: last_block_num })
|
||||
}
|
||||
// DownloadError::MismatchedHeaders { .. } => {
|
||||
// return Err(StageError::Validation { block: last_block_num })
|
||||
// }
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
let stage_progress = self.write_headers::<DB>(tx, headers).await?.unwrap_or(last_block_num);
|
||||
@@ -116,7 +124,7 @@ impl<DB: Database, D: Downloader, C: Consensus, H: HeadersClient> Stage<DB>
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Downloader, C: Consensus, H: HeadersClient> HeaderStage<D, C, H> {
|
||||
impl<D: HeaderDownloader, C: Consensus, H: HeadersClient> HeaderStage<D, C, H> {
|
||||
async fn update_head<DB: Database>(
|
||||
&self,
|
||||
tx: &mut <DB as DatabaseGAT<'_>>::TXMut,
|
||||
@@ -124,7 +132,7 @@ impl<D: Downloader, C: Consensus, H: HeadersClient> HeaderStage<D, C, H> {
|
||||
) -> Result<(), StageError> {
|
||||
let hash = tx
|
||||
.get::<tables::CanonicalHeaders>(height)?
|
||||
.ok_or(DatabaseIntegrityError::CannonicalHeader { number: height })?;
|
||||
.ok_or(DatabaseIntegrityError::CanonicalHeader { number: height })?;
|
||||
let td: Vec<u8> = tx.get::<tables::HeaderTD>((height, hash).into())?.unwrap(); // TODO:
|
||||
self.client.update_status(height, hash, H256::from_slice(&td)).await;
|
||||
Ok(())
|
||||
@@ -190,6 +198,8 @@ mod tests {
|
||||
|
||||
stage_test_suite!(HeadersTestRunner);
|
||||
|
||||
/// Check that the execution errors on empty database or
|
||||
/// prev progress missing from the database.
|
||||
#[tokio::test]
|
||||
// Validate that the execution does not fail on timeout
|
||||
async fn execute_timeout() {
|
||||
@@ -206,8 +216,8 @@ mod tests {
|
||||
assert!(runner.validate_execution(input).is_ok(), "validation failed");
|
||||
}
|
||||
|
||||
/// Execute the stage with linear downloader
|
||||
#[tokio::test]
|
||||
// Execute the stage with linear downloader
|
||||
async fn execute_with_linear_downloader() {
|
||||
let mut runner = HeadersTestRunner::with_linear_downloader();
|
||||
let (stage_progress, previous_stage) = (1000, 1200);
|
||||
@@ -248,24 +258,19 @@ mod tests {
|
||||
},
|
||||
ExecInput, UnwindInput,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use reth_headers_downloaders::linear::{LinearDownloadBuilder, LinearDownloader};
|
||||
use reth_interfaces::{
|
||||
consensus::ForkchoiceState,
|
||||
db::{self, models::blocks::BlockNumHash, tables, DbTx},
|
||||
p2p::headers::{
|
||||
client::HeadersClient,
|
||||
downloader::{DownloadError, Downloader},
|
||||
},
|
||||
p2p::headers::downloader::HeaderDownloader,
|
||||
test_utils::{
|
||||
gen_random_header, gen_random_header_range, TestConsensus, TestHeadersClient,
|
||||
generators::{random_header, random_header_range},
|
||||
TestConsensus, TestHeaderDownloader, TestHeadersClient,
|
||||
},
|
||||
};
|
||||
use reth_primitives::{rpc::BigEndianHash, SealedHeader, H256, U256};
|
||||
use std::{ops::Deref, sync::Arc, time::Duration};
|
||||
use tokio_stream::StreamExt;
|
||||
use std::{ops::Deref, sync::Arc};
|
||||
|
||||
pub(crate) struct HeadersTestRunner<D: Downloader> {
|
||||
pub(crate) struct HeadersTestRunner<D: HeaderDownloader> {
|
||||
pub(crate) consensus: Arc<TestConsensus>,
|
||||
pub(crate) client: Arc<TestHeadersClient>,
|
||||
pub(crate) context: Option<Vec<SealedHeader>>,
|
||||
@@ -273,20 +278,20 @@ mod tests {
|
||||
db: StageTestDB,
|
||||
}
|
||||
|
||||
impl Default for HeadersTestRunner<TestDownloader> {
|
||||
impl Default for HeadersTestRunner<TestHeaderDownloader> {
|
||||
fn default() -> Self {
|
||||
let client = Arc::new(TestHeadersClient::default());
|
||||
Self {
|
||||
client: client.clone(),
|
||||
consensus: Arc::new(TestConsensus::default()),
|
||||
downloader: Arc::new(TestDownloader::new(client)),
|
||||
downloader: Arc::new(TestHeaderDownloader::new(client)),
|
||||
db: StageTestDB::default(),
|
||||
context: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Downloader + 'static> StageTestRunner for HeadersTestRunner<D> {
|
||||
impl<D: HeaderDownloader + 'static> StageTestRunner for HeadersTestRunner<D> {
|
||||
type S = HeaderStage<Arc<D>, TestConsensus, TestHeadersClient>;
|
||||
|
||||
fn db(&self) -> &StageTestDB {
|
||||
@@ -303,19 +308,19 @@ mod tests {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Downloader + 'static> ExecuteStageTestRunner for HeadersTestRunner<D> {
|
||||
impl<D: HeaderDownloader + 'static> ExecuteStageTestRunner for HeadersTestRunner<D> {
|
||||
fn seed_execution(
|
||||
&mut self,
|
||||
input: ExecInput,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let start = input.stage_progress.unwrap_or_default();
|
||||
let head = gen_random_header(start, None);
|
||||
let head = random_header(start, None);
|
||||
self.insert_header(&head)?;
|
||||
|
||||
// use previous progress as seed size
|
||||
let end = input.previous_stage.map(|(_, num)| num).unwrap_or_default() + 1;
|
||||
if end > start + 1 {
|
||||
let mut headers = gen_random_header_range(start + 1..end, head.hash());
|
||||
let mut headers = random_header_range(start + 1..end, head.hash());
|
||||
headers.insert(0, head);
|
||||
self.context = Some(headers);
|
||||
}
|
||||
@@ -354,14 +359,14 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Downloader + 'static> UnwindStageTestRunner for HeadersTestRunner<D> {
|
||||
impl<D: HeaderDownloader + 'static> UnwindStageTestRunner for HeadersTestRunner<D> {
|
||||
fn seed_unwind(
|
||||
&mut self,
|
||||
input: UnwindInput,
|
||||
highest_entry: u64,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let lowest_entry = input.unwind_to.saturating_sub(100);
|
||||
let headers = gen_random_header_range(lowest_entry..highest_entry, H256::zero());
|
||||
let headers = random_header_range(lowest_entry..highest_entry, H256::zero());
|
||||
self.insert_headers(headers.iter())?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -371,15 +376,15 @@ mod tests {
|
||||
input: UnwindInput,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let unwind_to = input.unwind_to;
|
||||
self.db().check_no_entry_above_by_value::<tables::HeaderNumbers, _>(
|
||||
self.db.check_no_entry_above_by_value::<tables::HeaderNumbers, _>(
|
||||
unwind_to,
|
||||
|val| val,
|
||||
)?;
|
||||
self.db()
|
||||
self.db
|
||||
.check_no_entry_above::<tables::CanonicalHeaders, _>(unwind_to, |key| key)?;
|
||||
self.db()
|
||||
self.db
|
||||
.check_no_entry_above::<tables::Headers, _>(unwind_to, |key| key.number())?;
|
||||
self.db()
|
||||
self.db
|
||||
.check_no_entry_above::<tables::HeaderTD, _>(unwind_to, |key| key.number())?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -389,13 +394,14 @@ mod tests {
|
||||
pub(crate) fn with_linear_downloader() -> Self {
|
||||
let client = Arc::new(TestHeadersClient::default());
|
||||
let consensus = Arc::new(TestConsensus::default());
|
||||
let downloader =
|
||||
Arc::new(LinearDownloadBuilder::new().build(consensus.clone(), client.clone()));
|
||||
let downloader = Arc::new(
|
||||
LinearDownloadBuilder::default().build(consensus.clone(), client.clone()),
|
||||
);
|
||||
Self { client, consensus, downloader, db: StageTestDB::default(), context: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Downloader> HeadersTestRunner<D> {
|
||||
impl<D: HeaderDownloader> HeadersTestRunner<D> {
|
||||
/// Insert header into tables
|
||||
pub(crate) fn insert_header(&self, header: &SealedHeader) -> Result<(), db::Error> {
|
||||
self.insert_headers(std::iter::once(header))
|
||||
@@ -458,57 +464,5 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TestDownloader {
|
||||
client: Arc<TestHeadersClient>,
|
||||
}
|
||||
|
||||
impl TestDownloader {
|
||||
pub(crate) fn new(client: Arc<TestHeadersClient>) -> Self {
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Downloader for TestDownloader {
|
||||
type Consensus = TestConsensus;
|
||||
type Client = TestHeadersClient;
|
||||
|
||||
fn timeout(&self) -> Duration {
|
||||
Duration::from_secs(1)
|
||||
}
|
||||
|
||||
fn consensus(&self) -> &Self::Consensus {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn client(&self) -> &Self::Client {
|
||||
&self.client
|
||||
}
|
||||
|
||||
async fn download(
|
||||
&self,
|
||||
_: &SealedHeader,
|
||||
_: &ForkchoiceState,
|
||||
) -> Result<Vec<SealedHeader>, DownloadError> {
|
||||
let stream = self.client.stream_headers().await;
|
||||
let stream = stream.timeout(Duration::from_secs(1));
|
||||
|
||||
match Box::pin(stream).try_next().await {
|
||||
Ok(Some(res)) => {
|
||||
let mut headers =
|
||||
res.headers.iter().map(|h| h.clone().seal()).collect::<Vec<_>>();
|
||||
if !headers.is_empty() {
|
||||
headers.sort_unstable_by_key(|h| h.number);
|
||||
headers.remove(0); // remove head from response
|
||||
headers.reverse();
|
||||
}
|
||||
Ok(headers)
|
||||
}
|
||||
_ => Err(DownloadError::Timeout { request_id: 0 }),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
/// The bodies stage.
|
||||
pub mod bodies;
|
||||
/// The headers stage.
|
||||
pub mod headers;
|
||||
/// The cumulative transaction index stage.
|
||||
|
||||
@@ -37,13 +37,13 @@ impl<DB: Database> Stage<DB> for TxIndex {
|
||||
let last_block = input.stage_progress.unwrap_or_default();
|
||||
let last_hash = tx
|
||||
.get::<tables::CanonicalHeaders>(last_block)?
|
||||
.ok_or(DatabaseIntegrityError::CannonicalHeader { number: last_block })?;
|
||||
.ok_or(DatabaseIntegrityError::CanonicalHeader { number: last_block })?;
|
||||
|
||||
// The start block for this iteration
|
||||
let start_block = last_block + 1;
|
||||
let start_hash = tx
|
||||
.get::<tables::CanonicalHeaders>(start_block)?
|
||||
.ok_or(DatabaseIntegrityError::CannonicalHeader { number: start_block })?;
|
||||
.ok_or(DatabaseIntegrityError::CanonicalHeader { number: start_block })?;
|
||||
|
||||
// The maximum block that this stage should insert to
|
||||
let max_block = input.previous_stage.as_ref().map(|(_, block)| *block).unwrap_or_default();
|
||||
@@ -65,8 +65,8 @@ impl<DB: Database> Stage<DB> for TxIndex {
|
||||
|
||||
// Aggregate and insert cumulative transaction count for each block number
|
||||
for entry in entries {
|
||||
let (key, tx_count) = entry?;
|
||||
count += tx_count as u64;
|
||||
let (key, body) = entry?;
|
||||
count += body.tx_amount;
|
||||
cursor.append(key, count)?;
|
||||
}
|
||||
|
||||
@@ -92,7 +92,10 @@ mod tests {
|
||||
UnwindStageTestRunner,
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use reth_interfaces::{db::models::BlockNumHash, test_utils::gen_random_header_range};
|
||||
use reth_interfaces::{
|
||||
db::models::{BlockNumHash, StoredBlockBody},
|
||||
test_utils::generators::random_header_range,
|
||||
};
|
||||
use reth_primitives::H256;
|
||||
|
||||
stage_test_suite!(TxIndexTestRunner);
|
||||
@@ -123,21 +126,24 @@ mod tests {
|
||||
let start = pivot.saturating_sub(100);
|
||||
let mut end = input.previous_stage.as_ref().map(|(_, num)| *num).unwrap_or_default();
|
||||
end += 2; // generate 2 additional headers to account for start header lookup
|
||||
let headers = gen_random_header_range(start..end, H256::zero());
|
||||
let headers = random_header_range(start..end, H256::zero());
|
||||
|
||||
let headers =
|
||||
headers.into_iter().map(|h| (h, rand::random::<u8>())).collect::<Vec<_>>();
|
||||
|
||||
self.db().map_put::<tables::CanonicalHeaders, _, _>(&headers, |(h, _)| {
|
||||
self.db.map_put::<tables::CanonicalHeaders, _, _>(&headers, |(h, _)| {
|
||||
(h.number, h.hash())
|
||||
})?;
|
||||
self.db().map_put::<tables::BlockBodies, _, _>(&headers, |(h, count)| {
|
||||
(BlockNumHash((h.number, h.hash())), *count as u16)
|
||||
self.db.map_put::<tables::BlockBodies, _, _>(&headers, |(h, count)| {
|
||||
(
|
||||
BlockNumHash((h.number, h.hash())),
|
||||
StoredBlockBody { base_tx_id: 0, tx_amount: *count as u64, ommers: vec![] },
|
||||
)
|
||||
})?;
|
||||
|
||||
let slice_up_to =
|
||||
std::cmp::min(pivot.saturating_sub(start) as usize, headers.len() - 1);
|
||||
self.db().transform_append::<tables::CumulativeTxCount, _, _>(
|
||||
self.db.transform_append::<tables::CumulativeTxCount, _, _>(
|
||||
&headers[..=slice_up_to],
|
||||
|prev, (h, count)| {
|
||||
(BlockNumHash((h.number, h.hash())), prev.unwrap_or_default() + (*count as u64))
|
||||
@@ -151,7 +157,7 @@ mod tests {
|
||||
&self,
|
||||
input: ExecInput,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let db = self.db().container();
|
||||
let db = self.db.container();
|
||||
let tx = db.get();
|
||||
let (start, end) = (
|
||||
input.stage_progress.unwrap_or_default(),
|
||||
@@ -169,7 +175,7 @@ mod tests {
|
||||
let mut last_num = start;
|
||||
while let Some(entry) = tx_count_walker.next() {
|
||||
let (key, db_count) = entry?;
|
||||
count += tx.get::<tables::BlockBodies>(key)?.unwrap() as u64;
|
||||
count += tx.get::<tables::BlockBodies>(key)?.unwrap().tx_amount as u64;
|
||||
assert_eq!(db_count, count);
|
||||
last_num = key.number();
|
||||
}
|
||||
@@ -185,16 +191,13 @@ mod tests {
|
||||
input: UnwindInput,
|
||||
highest_entry: u64,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let headers = gen_random_header_range(input.unwind_to..highest_entry, H256::zero());
|
||||
self.db().transform_append::<tables::CumulativeTxCount, _, _>(
|
||||
&headers,
|
||||
|prev, h| {
|
||||
(
|
||||
BlockNumHash((h.number, h.hash())),
|
||||
prev.unwrap_or_default() + (rand::random::<u8>() as u64),
|
||||
)
|
||||
},
|
||||
)?;
|
||||
let headers = random_header_range(input.unwind_to..highest_entry, H256::zero());
|
||||
self.db.transform_append::<tables::CumulativeTxCount, _, _>(&headers, |prev, h| {
|
||||
(
|
||||
BlockNumHash((h.number, h.hash())),
|
||||
prev.unwrap_or_default() + (rand::random::<u8>() as u64),
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -202,10 +205,9 @@ mod tests {
|
||||
&self,
|
||||
input: UnwindInput,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
self.db()
|
||||
.check_no_entry_above::<tables::CumulativeTxCount, _>(input.unwind_to, |h| {
|
||||
h.number()
|
||||
})?;
|
||||
self.db.check_no_entry_above::<tables::CumulativeTxCount, _>(input.unwind_to, |h| {
|
||||
h.number()
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,6 +193,14 @@ pub(crate) mod test_utils {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Invoke a callback with transaction
|
||||
fn query<F, R>(&self, f: F) -> Result<R, Error>
|
||||
where
|
||||
F: FnOnce(&Tx<'_, RW, WriteMap>) -> Result<R, Error>,
|
||||
{
|
||||
f(self.container().get())
|
||||
}
|
||||
|
||||
/// Map a collection of values and store them in the database.
|
||||
/// This function commits the transaction before exiting.
|
||||
///
|
||||
@@ -256,15 +264,13 @@ pub(crate) mod test_utils {
|
||||
T: Table,
|
||||
F: FnMut(T::Key) -> BlockNumber,
|
||||
{
|
||||
let db = self.container();
|
||||
let tx = db.get();
|
||||
|
||||
let mut cursor = tx.cursor::<T>()?;
|
||||
if let Some((key, _)) = cursor.last()? {
|
||||
assert!(selector(key) <= block);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
self.query(|tx| {
|
||||
let mut cursor = tx.cursor::<T>()?;
|
||||
if let Some((key, _)) = cursor.last()? {
|
||||
assert!(selector(key) <= block);
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Check that there is no table entry above a given
|
||||
@@ -278,15 +284,13 @@ pub(crate) mod test_utils {
|
||||
T: Table,
|
||||
F: FnMut(T::Value) -> BlockNumber,
|
||||
{
|
||||
let db = self.container();
|
||||
let tx = db.get();
|
||||
|
||||
let mut cursor = tx.cursor::<T>()?;
|
||||
if let Some((_, value)) = cursor.last()? {
|
||||
assert!(selector(value) <= block);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
self.query(|tx| {
|
||||
let mut cursor = tx.cursor::<T>()?;
|
||||
if let Some((_, value)) = cursor.last()? {
|
||||
assert!(selector(value) <= block);
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,9 @@ reth-primitives = { path = "../primitives" }
|
||||
|
||||
# async/futures
|
||||
async-trait = "0.1"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
parking_lot = "0.12"
|
||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||
|
||||
# misc
|
||||
aquamarine = "0.1" # docs
|
||||
|
||||
@@ -22,3 +22,17 @@ pub enum PoolError {
|
||||
#[error("[{0:?}] Transaction discarded outright due to pool size constraints.")]
|
||||
DiscardedOnInsert(TxHash),
|
||||
}
|
||||
|
||||
// === impl PoolError ===
|
||||
|
||||
impl PoolError {
|
||||
/// Returns the hash of the transaction that resulted in this error.
|
||||
pub fn hash(&self) -> &TxHash {
|
||||
match self {
|
||||
PoolError::ReplacementUnderpriced(hash) => hash,
|
||||
PoolError::ProtocolFeeCapTooLow(hash, _) => hash,
|
||||
PoolError::SpammerExceededCapacity(_, hash) => hash,
|
||||
PoolError::DiscardedOnInsert(hash) => hash,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,8 +67,8 @@
|
||||
//! The final `TransactionPool` is made up of two layers:
|
||||
//!
|
||||
//! The lowest layer is the actual pool implementations that manages (validated) transactions:
|
||||
//! [`TxPool`](crate::pool::TxPool). This is contained in a higher level pool type that guards the
|
||||
//! low level pool and handles additional listeners or metrics:
|
||||
//! [`TxPool`](crate::pool::txpool::TxPool). This is contained in a higher level pool type that
|
||||
//! guards the low level pool and handles additional listeners or metrics:
|
||||
//! [`PoolInner`](crate::pool::PoolInner)
|
||||
//!
|
||||
//! The transaction pool will be used by separate consumers (RPC, P2P), to make sharing easier, the
|
||||
@@ -87,9 +87,9 @@ use crate::{
|
||||
traits::{NewTransactionEvent, PoolStatus, TransactionOrigin},
|
||||
validate::ValidPoolTransaction,
|
||||
};
|
||||
use futures::channel::mpsc::Receiver;
|
||||
use reth_primitives::{BlockID, TxHash, U256, U64};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
|
||||
mod config;
|
||||
pub mod error;
|
||||
@@ -131,11 +131,12 @@ where
|
||||
origin: TransactionOrigin,
|
||||
transactions: impl IntoIterator<Item = V::Transaction>,
|
||||
) -> PoolResult<HashMap<TxHash, TransactionValidationOutcome<V::Transaction>>> {
|
||||
let outcome =
|
||||
futures::future::join_all(transactions.into_iter().map(|tx| self.validate(origin, tx)))
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
let outcome = futures_util::future::join_all(
|
||||
transactions.into_iter().map(|tx| self.validate(origin, tx)),
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
Ok(outcome)
|
||||
}
|
||||
@@ -209,6 +210,10 @@ where
|
||||
self.pool.add_transaction_listener()
|
||||
}
|
||||
|
||||
fn pooled_transactions(&self) -> Vec<TxHash> {
|
||||
self.pool.pooled_transactions()
|
||||
}
|
||||
|
||||
fn best_transactions(
|
||||
&self,
|
||||
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
|
||||
@@ -222,6 +227,10 @@ where
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn retain_unknown(&self, hashes: &mut Vec<TxHash>) {
|
||||
self.pool.retain_unknown(hashes)
|
||||
}
|
||||
|
||||
fn get(&self, tx_hash: &TxHash) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
|
||||
self.inner().get(tx_hash)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
//! Listeners for the transaction-pool
|
||||
|
||||
use crate::pool::events::TransactionEvent;
|
||||
use futures::channel::mpsc::UnboundedSender;
|
||||
use reth_primitives::H256;
|
||||
use std::{collections::HashMap, hash};
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
type EventSink<Hash> = UnboundedSender<TransactionEvent<Hash>>;
|
||||
|
||||
@@ -75,7 +75,7 @@ struct PoolEventNotifier<Hash> {
|
||||
|
||||
impl<Hash: Clone> PoolEventNotifier<Hash> {
|
||||
fn notify(&mut self, event: TransactionEvent<Hash>) {
|
||||
self.senders.retain(|sender| sender.unbounded_send(event.clone()).is_ok())
|
||||
self.senders.retain(|sender| sender.send(event.clone()).is_ok())
|
||||
}
|
||||
|
||||
fn is_done(&self) -> bool {
|
||||
|
||||
@@ -73,7 +73,6 @@ use crate::{
|
||||
};
|
||||
use best::BestTransactions;
|
||||
pub use events::TransactionEvent;
|
||||
use futures::channel::mpsc::{channel, Receiver, Sender};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use reth_primitives::{Address, TxHash, H256};
|
||||
use std::{
|
||||
@@ -81,6 +80,7 @@ use std::{
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::warn;
|
||||
|
||||
mod best;
|
||||
@@ -107,9 +107,9 @@ pub struct PoolInner<V: TransactionValidator, T: TransactionOrdering> {
|
||||
/// Manages listeners for transaction state change events.
|
||||
event_listener: RwLock<PoolEventListener<TxHash>>,
|
||||
/// Listeners for new ready transactions.
|
||||
pending_transaction_listener: Mutex<Vec<Sender<TxHash>>>,
|
||||
pending_transaction_listener: Mutex<Vec<mpsc::Sender<TxHash>>>,
|
||||
/// Listeners for new transactions added to the pool.
|
||||
transaction_listener: Mutex<Vec<Sender<NewTransactionEvent<T::Transaction>>>>,
|
||||
transaction_listener: Mutex<Vec<mpsc::Sender<NewTransactionEvent<T::Transaction>>>>,
|
||||
}
|
||||
|
||||
// === impl PoolInner ===
|
||||
@@ -149,17 +149,23 @@ where
|
||||
|
||||
/// Adds a new transaction listener to the pool that gets notified about every new _ready_
|
||||
/// transaction
|
||||
pub fn add_pending_listener(&self) -> Receiver<TxHash> {
|
||||
pub fn add_pending_listener(&self) -> mpsc::Receiver<TxHash> {
|
||||
const TX_LISTENER_BUFFER_SIZE: usize = 2048;
|
||||
let (tx, rx) = channel(TX_LISTENER_BUFFER_SIZE);
|
||||
let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE);
|
||||
self.pending_transaction_listener.lock().push(tx);
|
||||
rx
|
||||
}
|
||||
|
||||
/// Returns hashes of _all_ transactions in the pool.
|
||||
pub(crate) fn pooled_transactions(&self) -> Vec<TxHash> {
|
||||
let pool = self.pool.read();
|
||||
pool.all().hashes_iter().collect()
|
||||
}
|
||||
|
||||
/// Adds a new transaction listener to the pool that gets notified about every new transaction
|
||||
pub fn add_transaction_listener(&self) -> Receiver<NewTransactionEvent<T::Transaction>> {
|
||||
pub fn add_transaction_listener(&self) -> mpsc::Receiver<NewTransactionEvent<T::Transaction>> {
|
||||
const TX_LISTENER_BUFFER_SIZE: usize = 1024;
|
||||
let (tx, rx) = channel(TX_LISTENER_BUFFER_SIZE);
|
||||
let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE);
|
||||
self.transaction_listener.lock().push(tx);
|
||||
rx
|
||||
}
|
||||
@@ -256,8 +262,8 @@ where
|
||||
let mut transaction_listeners = self.pending_transaction_listener.lock();
|
||||
transaction_listeners.retain_mut(|listener| match listener.try_send(*ready) {
|
||||
Ok(()) => true,
|
||||
Err(e) => {
|
||||
if e.is_full() {
|
||||
Err(err) => {
|
||||
if matches!(err, mpsc::error::TrySendError::Full(_)) {
|
||||
warn!(
|
||||
target: "txpool",
|
||||
"[{:?}] dropping full ready transaction listener",
|
||||
@@ -277,8 +283,8 @@ where
|
||||
|
||||
transaction_listeners.retain_mut(|listener| match listener.try_send(event.clone()) {
|
||||
Ok(()) => true,
|
||||
Err(e) => {
|
||||
if e.is_full() {
|
||||
Err(err) => {
|
||||
if matches!(err, mpsc::error::TrySendError::Full(_)) {
|
||||
warn!(
|
||||
target: "txpool",
|
||||
"dropping full transaction listener",
|
||||
@@ -325,6 +331,12 @@ where
|
||||
self.pool.read().best_transactions()
|
||||
}
|
||||
|
||||
/// Removes all transactions transactions that are missing in the pool.
|
||||
pub(crate) fn retain_unknown(&self, hashes: &mut Vec<TxHash>) {
|
||||
let pool = self.pool.read();
|
||||
hashes.retain(|tx| !pool.contains(tx))
|
||||
}
|
||||
|
||||
/// Returns the transaction by hash.
|
||||
pub(crate) fn get(
|
||||
&self,
|
||||
|
||||
@@ -102,6 +102,11 @@ impl<T: TransactionOrdering> TxPool<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns access to the [`AllTransactions`] container.
|
||||
pub(crate) fn all(&self) -> &AllTransactions<T::Transaction> {
|
||||
&self.all_transactions
|
||||
}
|
||||
|
||||
/// Returns stats about the pool.
|
||||
pub(crate) fn status(&self) -> PoolStatus {
|
||||
PoolStatus {
|
||||
@@ -417,10 +422,6 @@ impl<T: TransactionOrdering> TxPool<T> {
|
||||
#[cfg(test)]
|
||||
#[allow(missing_docs)]
|
||||
impl<T: TransactionOrdering> TxPool<T> {
|
||||
pub(crate) fn all(&self) -> &AllTransactions<T::Transaction> {
|
||||
&self.all_transactions
|
||||
}
|
||||
|
||||
pub(crate) fn pending(&self) -> &PendingPool<T> {
|
||||
&self.pending_pool
|
||||
}
|
||||
@@ -463,6 +464,11 @@ impl<T: PoolTransaction> AllTransactions<T> {
|
||||
Self { max_account_slots, ..Default::default() }
|
||||
}
|
||||
|
||||
/// Returns an iterator over all _unique_ hashes in the pool
|
||||
pub(crate) fn hashes_iter(&self) -> impl Iterator<Item = TxHash> + '_ {
|
||||
self.by_hash.keys().copied()
|
||||
}
|
||||
|
||||
/// Returns if the transaction for the given hash is already included in this pool
|
||||
pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool {
|
||||
self.by_hash.contains_key(tx_hash)
|
||||
|
||||
@@ -11,7 +11,10 @@ use rand::{
|
||||
distributions::{Uniform, WeightedIndex},
|
||||
prelude::Distribution,
|
||||
};
|
||||
use reth_primitives::{Address, TxHash, H256, U256};
|
||||
use reth_primitives::{
|
||||
Address, FromRecoveredTransaction, Transaction, TransactionSignedEcRecovered, TxHash, H256,
|
||||
U256,
|
||||
};
|
||||
use std::{ops::Range, sync::Arc, time::Instant};
|
||||
|
||||
pub type MockTxPool = TxPool<MockOrdering>;
|
||||
@@ -333,6 +336,48 @@ impl PoolTransaction for MockTransaction {
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRecoveredTransaction for MockTransaction {
|
||||
fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self {
|
||||
let sender = tx.signer();
|
||||
let transaction = tx.into_signed();
|
||||
let hash = transaction.hash;
|
||||
match transaction.transaction {
|
||||
Transaction::Legacy { chain_id, nonce, gas_price, gas_limit, to, value, input } => {
|
||||
MockTransaction::Legacy {
|
||||
hash,
|
||||
sender,
|
||||
nonce,
|
||||
gas_price: gas_price.into(),
|
||||
gas_limit,
|
||||
value,
|
||||
}
|
||||
}
|
||||
Transaction::Eip1559 {
|
||||
chain_id,
|
||||
nonce,
|
||||
gas_limit,
|
||||
max_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
to,
|
||||
value,
|
||||
input,
|
||||
access_list,
|
||||
} => MockTransaction::Eip1559 {
|
||||
hash,
|
||||
sender,
|
||||
nonce,
|
||||
max_fee_per_gas: max_fee_per_gas.into(),
|
||||
max_priority_fee_per_gas: max_priority_fee_per_gas.into(),
|
||||
gas_limit,
|
||||
value,
|
||||
},
|
||||
Transaction::Eip2930 { .. } => {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MockTransactionFactory {
|
||||
pub ids: SenderIdentifiers,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::{error::PoolResult, pool::state::SubPool, validate::ValidPoolTransaction, BlockID};
|
||||
use futures::{channel::mpsc::Receiver, future::Shared};
|
||||
use reth_primitives::{Address, TxHash, H256, U256};
|
||||
use reth_primitives::{Address, FromRecoveredTransaction, TxHash, H256, U256};
|
||||
use std::{fmt, sync::Arc};
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
|
||||
/// General purpose abstraction fo a transaction-pool.
|
||||
///
|
||||
@@ -23,6 +23,16 @@ pub trait TransactionPool: Send + Sync + 'static {
|
||||
/// affects the dynamic fee requirement of pending transactions in the pool.
|
||||
fn on_new_block(&self, event: OnNewBlockEvent);
|
||||
|
||||
/// Imports an _external_ transaction.
|
||||
///
|
||||
/// This is intended to be used by the network to insert incoming transactions received over the
|
||||
/// p2p network.
|
||||
///
|
||||
/// Consumer: P2P
|
||||
async fn add_external_transaction(&self, transaction: Self::Transaction) -> PoolResult<TxHash> {
|
||||
self.add_transaction(TransactionOrigin::External, transaction).await
|
||||
}
|
||||
|
||||
/// Adds an _unvalidated_ transaction into the pool.
|
||||
///
|
||||
/// Consumer: RPC
|
||||
@@ -51,6 +61,13 @@ pub trait TransactionPool: Send + Sync + 'static {
|
||||
/// Returns a new stream that yields new valid transactions added to the pool.
|
||||
fn transactions_listener(&self) -> Receiver<NewTransactionEvent<Self::Transaction>>;
|
||||
|
||||
/// Returns hashes of all transactions in the pool.
|
||||
///
|
||||
/// Note: This returns a `Vec` but should guarantee that all hashes are unique.
|
||||
///
|
||||
/// Consumer: P2P
|
||||
fn pooled_transactions(&self) -> Vec<TxHash>;
|
||||
|
||||
/// Returns an iterator that yields transactions that are ready for block production.
|
||||
///
|
||||
/// Consumer: Block production
|
||||
@@ -68,6 +85,13 @@ pub trait TransactionPool: Send + Sync + 'static {
|
||||
tx_hashes: &[TxHash],
|
||||
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
|
||||
|
||||
/// Retains only those hashes that are unknown to the pool.
|
||||
/// In other words, removes all transactions from the given set that are currently present in
|
||||
/// the pool.
|
||||
///
|
||||
/// Consumer: P2P
|
||||
fn retain_unknown(&self, hashes: &mut Vec<TxHash>);
|
||||
|
||||
/// Returns if the transaction for the given hash is already included in this pool.
|
||||
fn contains(&self, tx_hash: &TxHash) -> bool {
|
||||
self.get(tx_hash).is_some()
|
||||
@@ -166,7 +190,7 @@ impl<T> BestTransactions for std::iter::Empty<T> {
|
||||
}
|
||||
|
||||
/// Trait for transaction types used inside the pool
|
||||
pub trait PoolTransaction: fmt::Debug + Send + Sync + 'static {
|
||||
pub trait PoolTransaction: fmt::Debug + Send + Sync + FromRecoveredTransaction {
|
||||
/// Hash of the transaction.
|
||||
fn hash(&self) -> &TxHash;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user