diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 001adae172..2b6c54535c 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -259,7 +259,7 @@ mod tests { #[tokio::test] async fn can_handshake() { let genesis = H256::random(); - let fork_filter = ForkFilter::new(0, genesis, vec![]); + let fork_filter = ForkFilter::new(0, genesis, Vec::::new()); let status = Status { version: EthVersion::Eth67 as u8, @@ -393,7 +393,7 @@ mod tests { ); let genesis = H256::random(); - let fork_filter = ForkFilter::new(0, genesis, vec![]); + let fork_filter = ForkFilter::new(0, genesis, Vec::::new()); let status = Status { version: EthVersion::Eth67 as u8, diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index c674783c6d..4054337915 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -6,7 +6,7 @@ use crate::{ session::SessionsConfig, }; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NodeRecord, DEFAULT_DISCOVERY_PORT}; -use reth_primitives::{Chain, PeerId, H256}; +use reth_primitives::{Chain, ForkFilter, Hardfork, PeerId, H256, MAINNET_GENESIS}; use reth_tasks::TaskExecutor; use secp256k1::{SecretKey, SECP256K1}; use std::{ @@ -51,6 +51,13 @@ pub struct NetworkConfig { pub chain: Chain, /// Genesis hash of the network pub genesis_hash: H256, + /// The [`ForkFilter`] to use at launch for authenticating sessions. + /// + /// See also + /// + /// For sync from block `0`, this should be the default chain [`ForkFilter`] beginning at the + /// first hardfork, `Frontier` for mainnet. + pub fork_filter: ForkFilter, /// The block importer type. pub block_import: Box, /// The default mode of the network. @@ -122,6 +129,10 @@ pub struct NetworkConfigBuilder { status: Option, /// Sets the hello message for the p2p handshake in RLPx hello_message: Option, + /// The [`ForkFilter`] to use at launch for authenticating sessions. + fork_filter: Option, + /// Head used to start set for the fork filter + head: Option, } // === impl NetworkConfigBuilder === @@ -139,12 +150,14 @@ impl NetworkConfigBuilder { peers_config: None, sessions_config: None, chain: Chain::Named(reth_primitives::rpc::Chain::Mainnet), - genesis_hash: Default::default(), + genesis_hash: MAINNET_GENESIS, block_import: Box::::default(), network_mode: Default::default(), executor: None, status: None, hello_message: None, + fork_filter: None, + head: None, } } @@ -259,6 +272,8 @@ impl NetworkConfigBuilder { executor, status, hello_message, + fork_filter, + head, } = self; let listener_addr = listener_addr.unwrap_or_else(|| { @@ -269,6 +284,13 @@ impl NetworkConfigBuilder { hello_message.unwrap_or_else(|| HelloMessage::builder(peer_id).build()); hello_message.port = listener_addr.port(); + // get the fork filter + let fork_filter = fork_filter.unwrap_or_else(|| { + let head = head.unwrap_or_default(); + // TODO(mattsse): this should be chain agnostic: + ForkFilter::new(head, genesis_hash, Hardfork::all_forks()) + }); + NetworkConfig { client, secret_key, @@ -287,6 +309,7 @@ impl NetworkConfigBuilder { executor, status: status.unwrap_or_default(), hello_message, + fork_filter, } } } diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 243287e6fd..6f0e3a5212 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -151,6 +151,7 @@ where executor, hello_message, status, + fork_filter, .. } = config; @@ -166,8 +167,14 @@ where // need to retrieve the addr here since provided port could be `0` let local_peer_id = discovery.local_id(); - let sessions = - SessionManager::new(secret_key, sessions_config, executor, status, hello_message); + let sessions = SessionManager::new( + secret_key, + sessions_config, + executor, + status, + hello_message, + fork_filter, + ); let state = NetworkState::new(client, discovery, peers_manger, genesis_hash); let swarm = Swarm::new(incoming, sessions, state); diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 721fbbd4c6..366b25e905 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -19,7 +19,7 @@ use reth_eth_wire::{ error::EthStreamError, DisconnectReason, HelloMessage, Status, UnauthedEthStream, UnauthedP2PStream, }; -use reth_primitives::{ForkFilter, Hardfork, PeerId, H256, U256}; +use reth_primitives::{ForkFilter, PeerId, H256, U256}; use secp256k1::SecretKey; use std::{ collections::HashMap, @@ -103,13 +103,11 @@ impl SessionManager { executor: Option, status: Status, hello_message: HelloMessage, + fork_filter: ForkFilter, ) -> Self { let (pending_sessions_tx, pending_sessions_rx) = mpsc::channel(config.session_event_buffer); let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer); - let hardfork = Hardfork::from(status.forkid.next); - let fork_filter = hardfork.fork_filter(); - Self { next_id: 0, counter: SessionCounter::new(config.limits), diff --git a/crates/primitives/src/forkid.rs b/crates/primitives/src/forkid.rs index 62853d5e5e..7108bef962 100644 --- a/crates/primitives/src/forkid.rs +++ b/crates/primitives/src/forkid.rs @@ -11,6 +11,7 @@ use reth_rlp::*; use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, BTreeSet}, + fmt, ops::{Add, AddAssign}, }; use thiserror::Error; @@ -19,7 +20,6 @@ use thiserror::Error; #[derive( Clone, Copy, - Debug, PartialEq, Eq, Hash, @@ -31,6 +31,12 @@ use thiserror::Error; )] pub struct ForkHash(pub [u8; 4]); +impl fmt::Debug for ForkHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("ForkHash").field(&hex::encode(&self.0[..])).finish() + } +} + impl From for ForkHash { fn from(genesis: H256) -> Self { Self(crc32::checksum_ieee(&genesis[..]).to_be_bytes()) @@ -78,16 +84,28 @@ pub struct ForkId { #[derive(Clone, Copy, Debug, Error, PartialEq, Eq, Hash)] pub enum ValidationError { /// Remote node is outdated and needs a software update. - #[error("remote node is outdated and needs a software update")] - RemoteStale, + #[error( + "remote node is outdated and needs a software update: local={local:?}, remote={remote:?}" + )] + RemoteStale { + /// locally configured forkId + local: ForkId, + /// ForkId received from remote + remote: ForkId, + }, /// Local node is on an incompatible chain or needs a software update. - #[error("local node is on an incompatible chain or needs a software update")] - LocalIncompatibleOrStale, + #[error("local node is on an incompatible chain or needs a software update: local={local:?}, remote={remote:?}")] + LocalIncompatibleOrStale { + /// locally configured forkId + local: ForkId, + /// ForkId received from remote + remote: ForkId, + }, } /// Filter that describes the state of blockchain and can be used to check incoming `ForkId`s for /// compatibility. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct ForkFilter { forks: BTreeMap, @@ -96,55 +114,16 @@ pub struct ForkFilter { cache: Cache, } -#[derive(Clone, Debug, PartialEq)] -struct Cache { - // An epoch is a period between forks. - // When we progress from one fork to the next one we move to the next epoch. - epoch_start: BlockNumber, - epoch_end: Option, - past: Vec<(BlockNumber, ForkHash)>, - future: Vec, - fork_id: ForkId, -} - -impl Cache { - /// Compute cache. - fn compute_cache(forks: &BTreeMap, head: BlockNumber) -> Self { - let mut past = Vec::with_capacity(forks.len()); - let mut future = Vec::with_capacity(forks.len()); - - let mut epoch_start = 0; - let mut epoch_end = None; - for (block, hash) in forks { - if *block <= head { - epoch_start = *block; - past.push((*block, *hash)); - } else { - if epoch_end.is_none() { - epoch_end = Some(*block); - } - future.push(*hash); - } - } - - let fork_id = ForkId { - hash: past.last().expect("there is always at least one - genesis - fork hash; qed").1, - next: epoch_end.unwrap_or(0), - }; - - Self { epoch_start, epoch_end, past, future, fork_id } - } -} - impl ForkFilter { /// Create the filter from provided head, genesis block hash, past forks and expected future /// forks. - pub fn new(head: BlockNumber, genesis: H256, forks: F) -> Self + pub fn new(head: BlockNumber, genesis: H256, forks: F) -> Self where - F: IntoIterator, + F: IntoIterator, + B: Into, { let genesis_fork_hash = ForkHash::from(genesis); - let mut forks = forks.into_iter().collect::>(); + let mut forks = forks.into_iter().map(Into::into).collect::>(); forks.remove(&0); let forks = forks .into_iter() @@ -197,7 +176,10 @@ impl ForkFilter { /// Check whether the provided `ForkId` is compatible based on the validation rules in /// `EIP-2124`. /// + /// Implements the rules following: + /// /// # Errors + /// /// Returns a `ValidationError` if the `ForkId` is not compatible. pub fn validate(&self, fork_id: ForkId) -> Result<(), ValidationError> { // 1) If local and remote FORK_HASH matches... @@ -208,13 +190,16 @@ impl ForkFilter { } //... compare local head to FORK_NEXT. - if self.head >= fork_id.next { + return if self.head >= fork_id.next { // 1a) A remotely announced but remotely not passed block is already passed locally, // disconnect, since the chains are incompatible. - return Err(ValidationError::LocalIncompatibleOrStale) + Err(ValidationError::LocalIncompatibleOrStale { + local: self.current(), + remote: fork_id, + }) } else { // 1b) Remotely announced fork not yet passed locally, connect. - return Ok(()) + Ok(()) } } @@ -225,10 +210,10 @@ impl ForkFilter { // ...and the remote FORK_NEXT matches with the locally following fork block number, // connect. if let Some((actual_fork_block, _)) = it.next() { - if *actual_fork_block == fork_id.next { - return Ok(()) + return if *actual_fork_block == fork_id.next { + Ok(()) } else { - return Err(ValidationError::RemoteStale) + Err(ValidationError::RemoteStale { local: self.current(), remote: fork_id }) } } @@ -245,7 +230,47 @@ impl ForkFilter { } // 4) Reject in all other cases. - Err(ValidationError::LocalIncompatibleOrStale) + Err(ValidationError::LocalIncompatibleOrStale { local: self.current(), remote: fork_id }) + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct Cache { + // An epoch is a period between forks. + // When we progress from one fork to the next one we move to the next epoch. + epoch_start: BlockNumber, + epoch_end: Option, + past: Vec<(BlockNumber, ForkHash)>, + future: Vec, + fork_id: ForkId, +} + +impl Cache { + /// Compute cache. + fn compute_cache(forks: &BTreeMap, head: BlockNumber) -> Self { + let mut past = Vec::with_capacity(forks.len()); + let mut future = Vec::with_capacity(forks.len()); + + let mut epoch_start = 0; + let mut epoch_end = None; + for (block, hash) in forks { + if *block <= head { + epoch_start = *block; + past.push((*block, *hash)); + } else { + if epoch_end.is_none() { + epoch_end = Some(*block); + } + future.push(*hash); + } + } + + let fork_id = ForkId { + hash: past.last().expect("there is always at least one - genesis - fork hash; qed").1, + next: epoch_end.unwrap_or(0), + }; + + Self { epoch_start, epoch_end, past, future, fork_id } } } @@ -253,7 +278,6 @@ impl ForkFilter { mod tests { use super::*; use hex_literal::hex; - const GENESIS_HASH: H256 = H256(hex!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")); @@ -276,7 +300,7 @@ mod tests { let mut filter = ForkFilter::new( 0, GENESIS_HASH, - vec![1_150_000, 1_920_000, 2_463_000, 2_675_000, 4_370_000, 7_280_000], + vec![1_150_000u64, 1_920_000, 2_463_000, 2_675_000, 4_370_000, 7_280_000], ); // Local is mainnet Petersburg, remote announces the same. No future fork is announced. @@ -287,10 +311,7 @@ mod tests { // at block 0xffffffff, but that is uncertain. filter.set_head(7_987_396); assert_eq!( - filter.validate(ForkId { - hash: ForkHash(hex!("668db0af")), - next: BlockNumber::max_value() - }), + filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: BlockNumber::MAX }), Ok(()) ); @@ -316,10 +337,7 @@ mod tests { // mismatch, but we still connect for now. filter.set_head(7_279_999); assert_eq!( - filter.validate(ForkId { - hash: ForkHash(hex!("a00bc324")), - next: BlockNumber::max_value() - }), + filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: BlockNumber::MAX }), Ok(()) ); @@ -353,32 +371,36 @@ mod tests { // Local is mainnet Petersburg. remote announces Byzantium but is not aware of further // forks. Remote needs software update. filter.set_head(7_987_396); + let remote = ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }; assert_eq!( - filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), - Err(ValidationError::RemoteStale) + filter.validate(remote), + Err(ValidationError::RemoteStale { local: filter.current(), remote }) ); // Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg + // 0xffffffff. Local needs software update, reject. filter.set_head(7_987_396); + let remote = ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }; assert_eq!( - filter.validate(ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }), - Err(ValidationError::LocalIncompatibleOrStale) + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) ); // Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg + // 0xffffffff. Local needs software update, reject. filter.set_head(7_279_999); + let remote = ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }; assert_eq!( - filter.validate(ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 }), - Err(ValidationError::LocalIncompatibleOrStale) + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) ); // Local is mainnet Petersburg, remote is Rinkeby Petersburg. filter.set_head(7_987_396); + let remote = ForkId { hash: ForkHash(hex!("afec6b27")), next: 0 }; assert_eq!( - filter.validate(ForkId { hash: ForkHash(hex!("afec6b27")), next: 0 }), - Err(ValidationError::LocalIncompatibleOrStale) + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) ); // Local is mainnet Petersburg, far in the future. Remote announces Gopherium (non existing @@ -387,17 +409,19 @@ mod tests { // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). filter.set_head(88_888_888); + let remote = ForkId { hash: ForkHash(hex!("668db0af")), next: 88_888_888 }; assert_eq!( - filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 88_888_888 }), - Err(ValidationError::LocalIncompatibleOrStale) + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) ); // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non // existing fork) at block 7279999, before Petersburg. Local is incompatible. filter.set_head(7_279_999); + let remote = ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_279_999 }; assert_eq!( - filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_279_999 }), - Err(ValidationError::LocalIncompatibleOrStale) + filter.validate(remote), + Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote }) ); } @@ -417,7 +441,7 @@ mod tests { assert_eq!( &*reth_rlp::encode_fixed_size(&ForkId { hash: ForkHash(hex!("ffffffff")), - next: u64::max_value() + next: u64::MAX }), hex!("ce84ffffffff88ffffffffffffffff") ); @@ -432,7 +456,7 @@ mod tests { ); assert_eq!( ForkId::decode(&mut (&hex!("ce84ffffffff88ffffffffffffffff") as &[u8])).unwrap(), - ForkId { hash: ForkHash(hex!("ffffffff")), next: u64::max_value() } + ForkId { hash: ForkHash(hex!("ffffffff")), next: u64::MAX } ); } diff --git a/crates/primitives/src/hardfork.rs b/crates/primitives/src/hardfork.rs index 32ffbce346..e3e55bfa71 100644 --- a/crates/primitives/src/hardfork.rs +++ b/crates/primitives/src/hardfork.rs @@ -111,8 +111,9 @@ impl Hardfork { } /// Creates a [`ForkFilter`](crate::ForkFilter) for the given hardfork. - /// This assumes the current hardfork's block number is the current head and uses all known - /// future hardforks to initialize the filter. + /// + /// **CAUTION**: This assumes the current hardfork's block number is the current head and uses + /// all known future hardforks to initialize the filter. pub fn fork_filter(&self) -> ForkFilter { let all_forks = Hardfork::all_forks(); let future_forks: Vec = all_forks @@ -181,6 +182,12 @@ impl From for Hardfork { } } +impl From for BlockNumber { + fn from(value: Hardfork) -> Self { + value.fork_block() + } +} + #[cfg(test)] mod tests { use crate::{forkid::ForkHash, hardfork::Hardfork};