From f1e6639374866aa13889f9d62dafef14c4d55fe0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:03:05 -0500 Subject: [PATCH 1/9] feat(net): authenticate sessions (#178) * Switch stream type of ActiveSession to EthStream * Start `StatusBuilder` for initializing the `Status` message required for the handshake * Add `Hardfork` for `Status` default forkid * Add `MAINNET_GENESIS` constant * finish `StatusBuilder` * initialize eth streams in session * add status, hello, and fork filter to session manager * fix status builder example * add status and hello to network config * will probably remove * removing status and hello from networkconfig * move forkid to primitives * change imports for forkid * add hardfork to primitives * remove hardfork and forkid from eth-wire * fix remaining eth-wire forkid references * put mainnet genesis in constants, remove NodeId * replace NodeId with PeerId * the only NodeId remaining is inherited from enr * PeerId still needs to be documented * also run cargo fmt * replace loop with iter().any() * ignore missing docs for hardforks * use correct PeerId for Discv4::bind example test * document PeerId as secp256k1 public key * cargo fmt * temporarily allow too_many_arguments * the authenticate and start_pending_incoming_session methods have many arguments, we can reconsider the lint or fix the methods at a later point --- crates/net/discv4/src/config.rs | 4 +- crates/net/discv4/src/lib.rs | 72 +++--- crates/net/discv4/src/mock.rs | 26 +- crates/net/discv4/src/node.rs | 26 +- crates/net/discv4/src/proto.rs | 10 +- crates/net/eth-wire/Cargo.toml | 3 + crates/net/eth-wire/src/builder.rs | 145 +++++++++++ crates/net/eth-wire/src/capability.rs | 10 + crates/net/eth-wire/src/error.rs | 4 +- crates/net/eth-wire/src/ethstream.rs | 8 +- crates/net/eth-wire/src/lib.rs | 7 +- crates/net/eth-wire/src/p2pstream.rs | 1 + crates/net/eth-wire/src/types/mod.rs | 2 - crates/net/eth-wire/src/types/status.rs | 32 ++- crates/net/network/src/config.rs | 7 +- crates/net/network/src/discovery.rs | 13 +- crates/net/network/src/fetch.rs | 24 +- crates/net/network/src/lib.rs | 5 +- crates/net/network/src/manager.rs | 12 +- crates/net/network/src/message.rs | 5 +- crates/net/network/src/network.rs | 8 +- crates/net/network/src/peers.rs | 26 +- crates/net/network/src/session/active.rs | 11 +- crates/net/network/src/session/handle.rs | 26 +- crates/net/network/src/session/mod.rs | 143 ++++++++--- crates/net/network/src/state.rs | 21 +- crates/net/network/src/swarm.rs | 26 +- crates/primitives/src/constants.rs | 5 + .../src/types => primitives/src}/forkid.rs | 2 +- crates/primitives/src/hardfork.rs | 225 ++++++++++++++++++ crates/primitives/src/lib.rs | 12 + 31 files changed, 714 insertions(+), 207 deletions(-) create mode 100644 crates/net/eth-wire/src/builder.rs create mode 100644 crates/primitives/src/constants.rs rename crates/{net/eth-wire/src/types => primitives/src}/forkid.rs (99%) create mode 100644 crates/primitives/src/hardfork.rs diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index bcafafc156..cccd8e0cda 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -24,7 +24,7 @@ pub struct Discv4Config { pub find_node_timeout: Duration, /// The duration we set for neighbours responses pub neighbours_timeout: Duration, - /// A set of lists that permit or ban IP's or NodeIds from the server. See + /// A set of lists that permit or ban IP's or PeerIds from the server. See /// `crate::PermitBanList`. pub permit_ban_list: PermitBanList, /// Set the default duration for which nodes are banned for. This timeouts are checked every 5 @@ -110,7 +110,7 @@ impl Discv4ConfigBuilder { self } - /// A set of lists that permit or ban IP's or NodeIds from the server. See + /// A set of lists that permit or ban IP's or PeerIds from the server. See /// `crate::PermitBanList`. pub fn permit_ban_list(&mut self, list: PermitBanList) -> &mut Self { self.config.permit_ban_list = list; diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 2d9f899692..f006905c40 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -30,7 +30,7 @@ use discv5::{ }, ConnectionDirection, ConnectionState, }; -use reth_primitives::{H256, H512}; +use reth_primitives::{PeerId, H256}; use secp256k1::SecretKey; use std::{ cell::RefCell, @@ -67,9 +67,6 @@ pub mod mock; /// reexport to get public ip. pub use public_ip; -/// Identifier for nodes. -pub type NodeId = H512; - /// The default port for discv4 via UDP /// /// Note: the default TCP port is the same. @@ -140,12 +137,13 @@ impl Discv4 { /// use std::str::FromStr; /// use rand::thread_rng; /// use secp256k1::SECP256K1; - /// use reth_discv4::{Discv4, Discv4Config, NodeId, NodeRecord}; + /// use reth_primitives::PeerId; + /// use reth_discv4::{Discv4, Discv4Config, NodeRecord}; /// # async fn t() -> io::Result<()> { /// // generate a (random) keypair /// let mut rng = thread_rng(); /// let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - /// let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + /// let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); /// /// let socket = SocketAddr::from_str("0.0.0.0:0").unwrap(); /// let local_enr = NodeRecord { @@ -212,11 +210,11 @@ impl Discv4 { } /// Looks up the given node id - pub async fn lookup(&self, node_id: NodeId) -> Result, Discv4Error> { + pub async fn lookup(&self, node_id: PeerId) -> Result, Discv4Error> { self.lookup_node(Some(node_id)).await } - async fn lookup_node(&self, node_id: Option) -> Result, Discv4Error> { + async fn lookup_node(&self, node_id: Option) -> Result, Discv4Error> { let (tx, rx) = oneshot::channel(); let cmd = Discv4Command::Lookup { node_id, tx: Some(tx) }; self.to_service.send(cmd).await?; @@ -269,9 +267,9 @@ pub struct Discv4Service { /// followup `FindNode` requests.... Buffering them effectively prevents high `Ping` peaks. queued_pings: VecDeque<(NodeRecord, PingReason)>, /// Currently active pings to specific nodes. - pending_pings: HashMap, + pending_pings: HashMap, /// Currently active FindNode requests - pending_find_nodes: HashMap, + pending_find_nodes: HashMap, /// Commands listener commands_rx: Option>, /// All subscribers for table updates @@ -377,8 +375,8 @@ impl Discv4Service { &mut self.local_enr } - /// Returns true if the given NodeId is currently in the bucket - pub fn contains_node(&self, id: NodeId) -> bool { + /// Returns true if the given PeerId is currently in the bucket + pub fn contains_node(&self, id: PeerId) -> bool { let key = kad_key(id); self.kbuckets.get_index(&key).is_some() } @@ -431,7 +429,7 @@ impl Discv4Service { // // To guard against traffic amplification attacks, Neighbors replies should only be sent if the // sender of FindNode has been verified by the endpoint proof procedure. - pub fn lookup(&mut self, target: NodeId) { + pub fn lookup(&mut self, target: PeerId) { self.lookup_with(target, None) } @@ -445,7 +443,7 @@ impl Discv4Service { /// This takes an optional Sender through which all successfully discovered nodes are sent once /// the request has finished. #[instrument(skip_all, fields(?target), target = "net::discv4")] - fn lookup_with(&mut self, target: NodeId, tx: Option) { + fn lookup_with(&mut self, target: PeerId, tx: Option) { trace!("Starting lookup"); let key = kad_key(target); @@ -499,7 +497,7 @@ impl Discv4Service { /// /// This allows applications, for whatever reason, to remove nodes from the local routing /// table. Returns `true` if the node was in the table and `false` otherwise. - pub fn remove_node(&mut self, node_id: NodeId) -> bool { + pub fn remove_node(&mut self, node_id: PeerId) -> bool { let key = kad_key(node_id); let removed = self.kbuckets.remove(&key); if removed { @@ -559,7 +557,7 @@ impl Discv4Service { } /// Message handler for an incoming `Ping` - fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: NodeId, hash: H256) { + fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: PeerId, hash: H256) { // update the record let record = NodeRecord { address: ping.from.address, @@ -611,7 +609,7 @@ impl Discv4Service { } /// Message handler for an incoming `Pong`. - fn on_pong(&mut self, pong: Pong, remote_addr: SocketAddr, remote_id: NodeId) { + fn on_pong(&mut self, pong: Pong, remote_addr: SocketAddr, remote_id: PeerId) { if self.is_expired(pong.expire) { return } @@ -654,7 +652,7 @@ impl Discv4Service { } /// Handler for incoming `FindNode` message - fn on_find_node(&mut self, msg: FindNode, remote_addr: SocketAddr, node_id: NodeId) { + fn on_find_node(&mut self, msg: FindNode, remote_addr: SocketAddr, node_id: PeerId) { match self.node_status(node_id, remote_addr) { NodeEntryStatus::IsLocal => { // received address from self @@ -675,7 +673,7 @@ impl Discv4Service { /// Handler for incoming `Neighbours` messages that are handled if they're responses to /// `FindNode` requests - fn on_neighbours(&mut self, msg: Neighbours, remote_addr: SocketAddr, node_id: NodeId) { + fn on_neighbours(&mut self, msg: Neighbours, remote_addr: SocketAddr, node_id: PeerId) { // check if this request was expected let ctx = match self.pending_find_nodes.entry(node_id) { Entry::Occupied(mut entry) => { @@ -732,7 +730,7 @@ impl Discv4Service { } /// Sends a Neighbours packet for `target` to the given addr - fn respond_closest(&mut self, target: NodeId, to: SocketAddr) { + fn respond_closest(&mut self, target: PeerId, to: SocketAddr) { let key = kad_key(target); let expire = self.send_neighbours_timeout(); let all_nodes = self.kbuckets.closest_values(&key).collect::>(); @@ -746,7 +744,7 @@ impl Discv4Service { } /// Returns the current status of the node - fn node_status(&mut self, node: NodeId, addr: SocketAddr) -> NodeEntryStatus { + fn node_status(&mut self, node: PeerId, addr: SocketAddr) -> NodeEntryStatus { if node == self.local_enr.id { debug!(?node, target = "net::disc", "Got an incoming discovery request from self"); return NodeEntryStatus::IsLocal @@ -807,7 +805,7 @@ impl Discv4Service { } /// Removes the node from the table - fn expire_node_request(&mut self, node_id: NodeId) { + fn expire_node_request(&mut self, node_id: PeerId) { let key = kad_key(node_id); self.kbuckets.remove(&key); } @@ -976,7 +974,7 @@ pub(crate) async fn send_loop(udp: Arc, rx: EgressReceiver) { } /// Continuously awaits new incoming messages and sends them back through the channel. -pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_id: NodeId) { +pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_id: PeerId) { loop { let mut buf = [0; MAX_PACKET_SIZE]; let res = udp.recv_from(&mut buf).await; @@ -1010,7 +1008,7 @@ pub(crate) async fn receive_loop(udp: Arc, tx: IngressSender, local_i /// The commands sent from the frontend to the service enum Discv4Command { - Lookup { node_id: Option, tx: Option }, + Lookup { node_id: Option, tx: Option }, Updates(OneshotSender>), } @@ -1036,7 +1034,7 @@ struct PingRequest { reason: PingReason, } -/// Rotates the NodeId that is periodically looked up. +/// Rotates the PeerId that is periodically looked up. /// /// By selecting different targets, the lookups will be seeded with different ALPHA seed nodes. #[derive(Debug)] @@ -1066,13 +1064,13 @@ impl Default for LookupTargetRotator { impl LookupTargetRotator { /// this will return the next node id to lookup - fn next(&mut self, local: &NodeId) -> NodeId { + fn next(&mut self, local: &PeerId) -> PeerId { self.counter += 1; self.counter %= self.interval; if self.counter == 0 { return *local } - NodeId::random() + PeerId::random() } } @@ -1087,7 +1085,7 @@ struct LookupContext { impl LookupContext { /// Create new context for a recursive lookup fn new( - target: NodeId, + target: PeerId, nearest_nodes: impl IntoIterator, listener: Option, ) -> Self { @@ -1107,7 +1105,7 @@ impl LookupContext { } /// Returns the target of this lookup - fn target(&self) -> NodeId { + fn target(&self) -> PeerId { self.inner.target } @@ -1132,7 +1130,7 @@ impl LookupContext { } /// Marks the node as queried - fn mark_queried(&self, id: NodeId) { + fn mark_queried(&self, id: PeerId) { if let Some((_, node)) = self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id) { @@ -1141,7 +1139,7 @@ impl LookupContext { } /// Marks the node as responded - fn mark_responded(&self, id: NodeId) { + fn mark_responded(&self, id: PeerId) { if let Some((_, node)) = self.inner.closest_nodes.borrow_mut().iter_mut().find(|(_, node)| node.record.id == id) { @@ -1159,7 +1157,7 @@ impl LookupContext { unsafe impl Send for LookupContext {} struct LookupContextInner { - target: NodeId, + target: PeerId, /// The closest nodes closest_nodes: RefCell>, /// A listener for all the nodes retrieved in this lookup @@ -1249,7 +1247,7 @@ enum PingReason { /// /// Once the expected PONG is received, the endpoint proof is complete and the find node can be /// answered. - FindNode(NodeId, NodeEntryStatus), + FindNode(PeerId, NodeEntryStatus), /// Part of a lookup to ensure endpoint is proven. Lookup(NodeRecord, LookupContext), } @@ -1260,7 +1258,7 @@ pub enum TableUpdate { /// A new node was inserted to the table. Added(NodeRecord), /// Node that was removed from the table - Removed(NodeId), + Removed(PeerId), /// A series of updates Batch(Vec), } @@ -1276,7 +1274,7 @@ mod tests { #[test] fn test_local_rotator() { - let id = NodeId::random(); + let id = PeerId::random(); let mut rotator = LookupTargetRotator::local_only(); assert_eq!(rotator.next(&id), id); assert_eq!(rotator.next(&id), id); @@ -1284,7 +1282,7 @@ mod tests { #[test] fn test_rotator() { - let id = NodeId::random(); + let id = PeerId::random(); let mut rotator = LookupTargetRotator::default(); assert_eq!(rotator.next(&id), id); assert_ne!(rotator.next(&id), id); @@ -1301,7 +1299,7 @@ mod tests { let local_addr = service.local_addr(); for idx in 0..MAX_NODES_PING { - let node = NodeRecord::new(local_addr, NodeId::random()); + let node = NodeRecord::new(local_addr, PeerId::random()); service.add_node(node); assert!(service.pending_pings.contains_key(&node.id)); assert_eq!(service.pending_pings.len(), idx + 1); diff --git a/crates/net/discv4/src/mock.rs b/crates/net/discv4/src/mock.rs index c4565911d7..87d04cdd9c 100644 --- a/crates/net/discv4/src/mock.rs +++ b/crates/net/discv4/src/mock.rs @@ -6,7 +6,7 @@ use crate::{ node::NodeRecord, proto::{FindNode, Message, Neighbours, NodeEndpoint, Packet, Ping, Pong}, receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent, - IngressReceiver, NodeId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, + IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use rand::{thread_rng, Rng, RngCore}; use reth_primitives::H256; @@ -40,8 +40,8 @@ pub struct MockDiscovery { ingress: IngressReceiver, /// Sender for sending outgoing messages egress: EgressSender, - pending_pongs: HashSet, - pending_neighbours: HashMap>, + pending_pongs: HashSet, + pending_neighbours: HashMap>, command_rx: mpsc::Receiver, } @@ -51,7 +51,7 @@ impl MockDiscovery { let mut rng = thread_rng(); let socket = SocketAddr::from_str("0.0.0.0:0").unwrap(); let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); let socket = Arc::new(UdpSocket::bind(socket).await?); let local_addr = socket.local_addr()?; let local_enr = NodeRecord { @@ -95,12 +95,12 @@ impl MockDiscovery { } /// Queue a pending pong. - pub fn queue_pong(&mut self, from: NodeId) { + pub fn queue_pong(&mut self, from: PeerId) { self.pending_pongs.insert(from); } /// Queue a pending Neighbours response. - pub fn queue_neighbours(&mut self, target: NodeId, nodes: Vec) { + pub fn queue_neighbours(&mut self, target: PeerId, nodes: Vec) { self.pending_neighbours.insert(target, nodes); } @@ -195,8 +195,8 @@ pub enum MockEvent { /// Command for interacting with the `MockDiscovery` service pub enum MockCommand { - MockPong { node_id: NodeId }, - MockNeighbours { target: NodeId, nodes: Vec }, + MockPong { node_id: PeerId }, + MockNeighbours { target: PeerId, nodes: Vec }, } /// Creates a new testing instance for [`Discv4`] and its service @@ -209,7 +209,7 @@ pub async fn create_discv4_with_config(config: Discv4Config) -> (Discv4, Discv4S let mut rng = thread_rng(); let socket = SocketAddr::from_str("0.0.0.0:0").unwrap(); let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); let external_addr = public_ip::addr().await.unwrap_or_else(|| socket.ip()); let local_enr = NodeRecord { address: external_addr, tcp_port: socket.port(), udp_port: socket.port(), id }; @@ -231,21 +231,21 @@ pub fn rng_endpoint(rng: &mut impl Rng) -> NodeEndpoint { pub fn rng_record(rng: &mut impl RngCore) -> NodeRecord { let NodeEndpoint { address, udp_port, tcp_port } = rng_endpoint(rng); - NodeRecord { address, tcp_port, udp_port, id: NodeId::random() } + NodeRecord { address, tcp_port, udp_port, id: PeerId::random() } } pub fn rng_ipv6_record(rng: &mut impl RngCore) -> NodeRecord { let mut ip = [0u8; 16]; rng.fill_bytes(&mut ip); let address = IpAddr::V6(ip.into()); - NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: NodeId::random() } + NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() } } pub fn rng_ipv4_record(rng: &mut impl RngCore) -> NodeRecord { let mut ip = [0u8; 4]; rng.fill_bytes(&mut ip); let address = IpAddr::V4(ip.into()); - NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: NodeId::random() } + NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() } } pub fn rng_message(rng: &mut impl RngCore) -> Message { @@ -256,7 +256,7 @@ pub fn rng_message(rng: &mut impl RngCore) -> Message { expire: rng.gen(), }), 2 => Message::Pong(Pong { to: rng_endpoint(rng), echo: H256::random(), expire: rng.gen() }), - 3 => Message::FindNode(FindNode { id: NodeId::random(), expire: rng.gen() }), + 3 => Message::FindNode(FindNode { id: PeerId::random(), expire: rng.gen() }), 4 => { let num: usize = rng.gen_range(1..=SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS); Message::Neighbours(Neighbours { diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index fa9dbab3d9..035ff7902c 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,4 +1,4 @@ -use crate::{proto::Octets, NodeId}; +use crate::{proto::Octets, PeerId}; use bytes::{Buf, BufMut}; use generic_array::GenericArray; use reth_primitives::keccak256; @@ -13,10 +13,10 @@ use url::{Host, Url}; /// The key type for the table. #[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub(crate) struct NodeKey(pub(crate) NodeId); +pub(crate) struct NodeKey(pub(crate) PeerId); -impl From for NodeKey { - fn from(value: NodeId) -> Self { +impl From for NodeKey { + fn from(value: PeerId) -> Self { NodeKey(value) } } @@ -29,9 +29,9 @@ impl From for discv5::Key { } } -/// Converts a `NodeId` into the required `Key` type for the table +/// Converts a `PeerId` into the required `Key` type for the table #[inline] -pub(crate) fn kad_key(node: NodeId) -> discv5::Key { +pub(crate) fn kad_key(node: PeerId) -> discv5::Key { discv5::kbucket::Key::from(NodeKey::from(node)) } @@ -45,20 +45,20 @@ pub struct NodeRecord { /// UDP discovery port. pub udp_port: u16, /// Public key of the discovery service - pub id: NodeId, + pub id: PeerId, } impl NodeRecord { /// Derive the [`NodeRecord`] from the secret key and addr pub fn from_secret_key(addr: SocketAddr, sk: &SecretKey) -> Self { let pk = secp256k1::PublicKey::from_secret_key(SECP256K1, sk); - let id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); Self::new(addr, id) } /// Creates a new record #[allow(unused)] - pub(crate) fn new(addr: SocketAddr, id: NodeId) -> Self { + pub(crate) fn new(addr: SocketAddr, id: PeerId) -> Self { Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } } @@ -112,7 +112,7 @@ impl FromStr for NodeRecord { let id = url .username() - .parse::() + .parse::() .map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?; Ok(Self { address, id, tcp_port: port, udp_port: port }) @@ -126,7 +126,7 @@ impl Encodable for NodeRecord { octets: Octets, udp_port: u16, tcp_port: u16, - id: NodeId, + id: PeerId, } let octets = match self.address { @@ -185,7 +185,7 @@ mod tests { address: IpAddr::V4(ip.into()), tcp_port: rng.gen(), udp_port: rng.gen(), - id: NodeId::random(), + id: PeerId::random(), }; let mut buf = BytesMut::new(); @@ -206,7 +206,7 @@ mod tests { address: IpAddr::V6(ip.into()), tcp_port: rng.gen(), udp_port: rng.gen(), - id: NodeId::random(), + id: PeerId::random(), }; let mut buf = BytesMut::new(); diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 472346f520..8a6945aafe 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -1,6 +1,6 @@ #![allow(missing_docs)] -use crate::{error::DecodePacketError, node::NodeRecord, NodeId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; +use crate::{error::DecodePacketError, node::NodeRecord, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use reth_primitives::{keccak256, H256}; use reth_rlp::{Decodable, DecodeError, Encodable, Header}; @@ -136,7 +136,7 @@ impl Message { let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_bytes())?; let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?; - let node_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); let msg_type = packet[97]; let payload = &mut &packet[98..]; @@ -156,7 +156,7 @@ impl Message { #[derive(Debug)] pub struct Packet { pub msg: Message, - pub node_id: NodeId, + pub node_id: PeerId, pub hash: H256, } @@ -223,7 +223,7 @@ impl From for NodeEndpoint { /// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03).). #[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)] pub struct FindNode { - pub id: NodeId, + pub id: PeerId, pub expire: u64, } @@ -499,7 +499,7 @@ mod tests { for _ in 0..100 { let msg = rng_message(&mut rng); let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng); - let sender_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + let sender_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); let (buf, _) = msg.encode(&secret_key); diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index e754bec021..ff27c7e9e1 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -17,6 +17,9 @@ reth-ecies = { path = "../ecies" } reth-primitives = { path = "../../primitives" } reth-rlp = { path = "../../common/rlp", features = ["alloc", "derive", "std", "ethereum-types", "smol_str"] } +# used for Chain and builders +ethers-core = { git = "https://github.com/gakonst/ethers-rs", default-features = false } + #used for forkid crc = "1" maplit = "1" diff --git a/crates/net/eth-wire/src/builder.rs b/crates/net/eth-wire/src/builder.rs new file mode 100644 index 0000000000..f1c0dd6af1 --- /dev/null +++ b/crates/net/eth-wire/src/builder.rs @@ -0,0 +1,145 @@ +//! Builder structs for [`Status`](crate::types::Status) and [`Hello`](crate::types::Hello) +//! messages. + +use crate::{ + capability::Capability, + p2pstream::{HelloMessage, ProtocolVersion}, + EthVersion, Status, +}; +use reth_primitives::{Chain, ForkId, PeerId, H256, U256}; + +/// Builder for [`Status`](crate::types::Status) messages. +/// +/// # Example +/// ``` +/// use reth_eth_wire::EthVersion; +/// use reth_primitives::{Chain, U256, H256, MAINNET_GENESIS, Hardfork}; +/// use reth_eth_wire::types::Status; +/// +/// // this is just an example status message! +/// let status = Status::builder() +/// .version(EthVersion::Eth66.into()) +/// .chain(Chain::Named(ethers_core::types::Chain::Mainnet)) +/// .total_difficulty(U256::from(100)) +/// .blockhash(H256::from(MAINNET_GENESIS)) +/// .genesis(H256::from(MAINNET_GENESIS)) +/// .forkid(Hardfork::Latest.fork_id()) +/// .build(); +/// +/// assert_eq!( +/// status, +/// Status { +/// version: EthVersion::Eth66.into(), +/// chain: Chain::Named(ethers_core::types::Chain::Mainnet), +/// total_difficulty: U256::from(100), +/// blockhash: H256::from(MAINNET_GENESIS), +/// genesis: H256::from(MAINNET_GENESIS), +/// forkid: Hardfork::Latest.fork_id(), +/// } +/// ); +/// ``` +#[derive(Debug, Default)] +pub struct StatusBuilder { + status: Status, +} + +impl StatusBuilder { + /// Consumes the type and creates the actual [`Status`](crate::types::Status) message. + pub fn build(self) -> Status { + self.status + } + + /// Sets the protocol version. + pub fn version(mut self, version: u8) -> Self { + self.status.version = version; + self + } + + /// Sets the chain id. + pub fn chain(mut self, chain: Chain) -> Self { + self.status.chain = chain; + self + } + + /// Sets the total difficulty. + pub fn total_difficulty(mut self, total_difficulty: U256) -> Self { + self.status.total_difficulty = total_difficulty; + self + } + + /// Sets the block hash. + pub fn blockhash(mut self, blockhash: H256) -> Self { + self.status.blockhash = blockhash; + self + } + + /// Sets the genesis hash. + pub fn genesis(mut self, genesis: H256) -> Self { + self.status.genesis = genesis; + self + } + + /// Sets the fork id. + pub fn forkid(mut self, forkid: ForkId) -> Self { + self.status.forkid = forkid; + self + } +} + +/// Builder for [`Hello`](crate::types::Hello) messages. +pub struct HelloBuilder { + hello: HelloMessage, +} + +impl HelloBuilder { + /// Creates a new [`HelloBuilder`](crate::builder::HelloBuilder) with default [`Hello`] values, + /// and a `PeerId` corresponding to the given pubkey. + pub fn new(pubkey: PeerId) -> Self { + Self { + hello: HelloMessage { + protocol_version: ProtocolVersion::V5, + // TODO: proper client versioning + client_version: "Ethereum/1.0.0".to_string(), + capabilities: vec![EthVersion::Eth67.into()], + // TODO: default port config + port: 30303, + id: pubkey, + }, + } + } + + /// Consumes the type and creates the actual [`Hello`](crate::types::Hello) message. + pub fn build(self) -> HelloMessage { + self.hello + } + + /// Sets the protocol version. + pub fn protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.hello.protocol_version = protocol_version; + self + } + + /// Sets the client version. + pub fn client_version(mut self, client_version: String) -> Self { + self.hello.client_version = client_version; + self + } + + /// Sets the capabilities. + pub fn capabilities(mut self, capabilities: Vec) -> Self { + self.hello.capabilities = capabilities; + self + } + + /// Sets the port. + pub fn port(mut self, port: u16) -> Self { + self.hello.port = port; + self + } + + /// Sets the node id. + pub fn id(mut self, id: PeerId) -> Self { + self.hello.id = id; + self + } +} diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 3ee3c9803c..192afd3a29 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -91,6 +91,16 @@ impl Capabilities { } } +impl From> for Capabilities { + fn from(value: Vec) -> Self { + Self { + eth_66: value.iter().any(Capability::is_eth_v66), + eth_67: value.iter().any(Capability::is_eth_v67), + inner: value, + } + } +} + impl Encodable for Capabilities { fn encode(&self, out: &mut dyn BufMut) { self.inner.encode(out) diff --git a/crates/net/eth-wire/src/error.rs b/crates/net/eth-wire/src/error.rs index dd7df28a33..929fbae225 100644 --- a/crates/net/eth-wire/src/error.rs +++ b/crates/net/eth-wire/src/error.rs @@ -1,9 +1,9 @@ //! Error cases when handling a [`crate::EthStream`] use std::io; -use reth_primitives::{Chain, H256}; +use reth_primitives::{Chain, ValidationError, H256}; -use crate::{capability::SharedCapabilityError, types::forkid::ValidationError}; +use crate::capability::SharedCapabilityError; /// Errors when sending/receiving messages #[derive(thiserror::Error, Debug)] diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index e7fbd205ee..775ef261e4 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -1,10 +1,11 @@ use crate::{ error::{EthStreamError, HandshakeError}, - types::{forkid::ForkFilter, EthMessage, ProtocolMessage, Status}, + types::{EthMessage, ProtocolMessage, Status}, }; use bytes::{Bytes, BytesMut}; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; +use reth_primitives::ForkFilter; use reth_rlp::{Decodable, Encodable}; use std::{ pin::Pin, @@ -117,6 +118,7 @@ where /// An `EthStream` wraps over any `Stream` that yields bytes and makes it /// compatible with eth-networking protocol messages, which get RLP encoded/decoded. #[pin_project] +#[derive(Debug)] pub struct EthStream { #[pin] inner: S, @@ -203,7 +205,7 @@ where mod tests { use crate::{ p2pstream::{HelloMessage, ProtocolVersion, UnauthedP2PStream}, - types::{broadcast::BlockHashNumber, forkid::ForkFilter, EthMessage, Status}, + types::{broadcast::BlockHashNumber, EthMessage, Status}, EthStream, PassthroughCodec, }; use futures::{SinkExt, StreamExt}; @@ -214,7 +216,7 @@ mod tests { use crate::{capability::Capability, types::EthVersion}; use ethers_core::types::Chain; - use reth_primitives::{H256, U256}; + use reth_primitives::{ForkFilter, H256, U256}; use super::UnauthedEthStream; diff --git a/crates/net/eth-wire/src/lib.rs b/crates/net/eth-wire/src/lib.rs index 2404aa9eaf..b770044fa3 100644 --- a/crates/net/eth-wire/src/lib.rs +++ b/crates/net/eth-wire/src/lib.rs @@ -11,12 +11,17 @@ pub use tokio_util::codec::{ LengthDelimitedCodec as PassthroughCodec, LengthDelimitedCodecError as PassthroughCodecError, }; +pub mod builder; pub mod capability; pub mod error; mod ethstream; mod p2pstream; mod pinger; +pub use builder::*; pub mod types; pub use types::*; -pub use ethstream::{EthStream, UnauthedEthStream}; +pub use crate::{ + ethstream::{EthStream, UnauthedEthStream}, + p2pstream::{HelloMessage, P2PStream, UnauthedP2PStream}, +}; diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index dc71dd699c..68b0c2475b 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -138,6 +138,7 @@ where /// A P2PStream wraps over any `Stream` that yields bytes and makes it compatible with `p2p` /// protocol messages. #[pin_project] +#[derive(Debug)] pub struct P2PStream { #[pin] inner: S, diff --git a/crates/net/eth-wire/src/types/mod.rs b/crates/net/eth-wire/src/types/mod.rs index 6e84030022..f330958ee4 100644 --- a/crates/net/eth-wire/src/types/mod.rs +++ b/crates/net/eth-wire/src/types/mod.rs @@ -6,8 +6,6 @@ pub use status::Status; pub mod version; pub use version::EthVersion; -pub mod forkid; - pub mod message; pub use message::{EthMessage, EthMessageID, ProtocolMessage}; diff --git a/crates/net/eth-wire/src/types/status.rs b/crates/net/eth-wire/src/types/status.rs index 6b96816183..33b3634d52 100644 --- a/crates/net/eth-wire/src/types/status.rs +++ b/crates/net/eth-wire/src/types/status.rs @@ -1,5 +1,6 @@ -use super::forkid::ForkId; -use reth_primitives::{Chain, H256, U256}; +use crate::{EthVersion, StatusBuilder}; + +use reth_primitives::{Chain, ForkId, Hardfork, H256, MAINNET_GENESIS, U256}; use reth_rlp::{RlpDecodable, RlpEncodable}; use std::fmt::{Debug, Display}; @@ -37,6 +38,13 @@ pub struct Status { pub forkid: ForkId, } +impl Status { + /// Helper for returning a builder for the status message. + pub fn builder() -> StatusBuilder { + Default::default() + } +} + impl Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let hexed_blockhash = hex::encode(self.blockhash); @@ -84,18 +92,28 @@ impl Debug for Status { } } +impl Default for Status { + fn default() -> Self { + Status { + version: EthVersion::Eth67 as u8, + chain: Chain::Named(ethers_core::types::Chain::Mainnet), + total_difficulty: U256::zero(), + blockhash: MAINNET_GENESIS, + genesis: MAINNET_GENESIS, + forkid: Hardfork::Homestead.fork_id(), + } + } +} + #[cfg(test)] mod tests { use ethers_core::types::Chain as NamedChain; use hex_literal::hex; - use reth_primitives::{Chain, H256, U256}; + use reth_primitives::{Chain, ForkHash, ForkId, H256, U256}; use reth_rlp::{Decodable, Encodable}; use std::str::FromStr; - use crate::types::{ - forkid::{ForkHash, ForkId}, - EthVersion, Status, - }; + use crate::types::{EthVersion, Status}; #[test] fn encode_eth_status_message() { diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index a73f08b7d7..4eb0641cec 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -1,7 +1,6 @@ use crate::{peers::PeersConfig, session::SessionsConfig}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT}; -use reth_eth_wire::forkid::ForkId; -use reth_primitives::{Chain, H256}; +use reth_primitives::{Chain, ForkId, H256}; use secp256k1::SecretKey; use std::{ net::{Ipv4Addr, SocketAddr, SocketAddrV4}, @@ -76,8 +75,12 @@ pub struct NetworkConfigBuilder { peers_config: Option, /// How to configure the sessions manager sessions_config: Option, + /// A fork identifier as defined by EIP-2124. + /// Serves as the chain compatibility identifier. fork_id: Option, + /// The network's chain id chain: Chain, + /// Network genesis hash genesis_hash: H256, } diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 65001938de..b2eb288f5f 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -1,8 +1,9 @@ //! Discovery support for the network. -use crate::{error::NetworkError, NodeId}; +use crate::error::NetworkError; use futures::StreamExt; use reth_discv4::{Discv4, Discv4Config, NodeRecord, TableUpdate}; +use reth_primitives::PeerId; use secp256k1::SecretKey; use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, @@ -19,7 +20,7 @@ pub struct Discovery { /// All nodes discovered via discovery protocol. /// /// These nodes can be ephemeral and are updated via the discovery protocol. - discovered_nodes: HashMap, + discovered_nodes: HashMap, /// Local ENR of the discovery service. local_enr: NodeRecord, /// Handler to interact with the Discovery v4 service @@ -66,12 +67,12 @@ impl Discovery { } /// Returns the id with which the local identifies itself in the network - pub(crate) fn local_id(&self) -> NodeId { + pub(crate) fn local_id(&self) -> PeerId { self.local_enr.id } /// Manually adds an address to the set. - pub(crate) fn add_known_address(&mut self, node_id: NodeId, addr: SocketAddr) { + pub(crate) fn add_known_address(&mut self, node_id: PeerId, addr: SocketAddr) { self.on_discv4_update(TableUpdate::Added(NodeRecord { address: addr.ip(), tcp_port: addr.port(), @@ -81,7 +82,7 @@ impl Discovery { } /// Returns all nodes we know exist in the network. - pub fn known_nodes(&mut self) -> &HashMap { + pub fn known_nodes(&mut self) -> &HashMap { &self.discovered_nodes } @@ -131,7 +132,7 @@ impl Discovery { /// Events produced by the [`Discovery`] manager. pub enum DiscoveryEvent { /// A new node was discovered - Discovered(NodeId, SocketAddr), + Discovered(PeerId, SocketAddr), } #[cfg(test)] diff --git a/crates/net/network/src/fetch.rs b/crates/net/network/src/fetch.rs index 7219681ff1..c870b61491 100644 --- a/crates/net/network/src/fetch.rs +++ b/crates/net/network/src/fetch.rs @@ -1,10 +1,10 @@ //! Fetch data from the network. -use crate::{message::BlockRequest, NodeId}; +use crate::message::BlockRequest; use futures::StreamExt; use reth_eth_wire::{BlockBody, EthMessage}; use reth_interfaces::p2p::{error::RequestResult, headers::client::HeadersRequest}; -use reth_primitives::{Header, H256, U256}; +use reth_primitives::{Header, PeerId, H256, U256}; use std::{ collections::{HashMap, VecDeque}, task::{Context, Poll}, @@ -19,9 +19,9 @@ use tokio_stream::wrappers::UnboundedReceiverStream; /// peers and sends the response once ready. pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests - inflight_headers_requests: HashMap>>>, + inflight_headers_requests: HashMap>>>, /// The list of available peers for requests. - peers: HashMap, + peers: HashMap, /// Requests queued for processing queued_requests: VecDeque, /// Receiver for new incoming download requests @@ -34,13 +34,13 @@ pub struct StateFetcher { impl StateFetcher { /// Invoked when connected to a new peer. - pub(crate) fn new_connected_peer(&mut self, _node_id: NodeId, _best_hash: H256) {} + pub(crate) fn new_connected_peer(&mut self, _node_id: PeerId, _best_hash: H256) {} /// Invoked when an active session was closed. - pub(crate) fn on_session_closed(&mut self, _peer: &NodeId) {} + pub(crate) fn on_session_closed(&mut self, _peer: &PeerId) {} /// Invoked when an active session is about to be disconnected. - pub(crate) fn on_pending_disconnect(&mut self, _peer: &NodeId) {} + pub(crate) fn on_pending_disconnect(&mut self, _peer: &PeerId) {} /// Returns the next action to return fn poll_action(&mut self) -> Option { @@ -94,7 +94,7 @@ impl StateFetcher { /// Called on a `GetBlockHeaders` response from a peer pub(crate) fn on_block_headers_response( &mut self, - _peer: NodeId, + _peer: PeerId, _res: RequestResult>, ) -> Option { None @@ -103,7 +103,7 @@ impl StateFetcher { /// Called on a `GetBlockBodies` response from a peer pub(crate) fn on_block_bodies_response( &mut self, - _peer: NodeId, + _peer: PeerId, _res: RequestResult>, ) -> Option { None @@ -189,7 +189,7 @@ enum DownloadRequest { pub(crate) enum FetchAction { /// Dispatch an eth request to the given peer. EthRequest { - node_id: NodeId, + node_id: PeerId, /// The request to send request: EthMessage, }, @@ -201,8 +201,8 @@ pub(crate) enum FetchAction { #[derive(Debug)] pub(crate) enum BlockResponseOutcome { /// Continue with another request to the peer. - Request(NodeId, BlockRequest), + Request(PeerId, BlockRequest), /// How to handle a bad response // TODO this should include some form of reputation change - BadResponse(NodeId), + BadResponse(PeerId), } diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 05ad4c2d7a..45b1c70979 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -5,7 +5,7 @@ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] // TODO remove later -#![allow(dead_code)] +#![allow(dead_code, clippy::too_many_arguments)] //! reth P2P networking. //! @@ -29,9 +29,6 @@ mod state; mod swarm; mod transactions; -/// Identifier for a unique node -pub type NodeId = reth_discv4::NodeId; - pub use config::NetworkConfig; pub use manager::NetworkManager; pub use network::NetworkHandle; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 9aa2b92901..340efe1137 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -25,7 +25,6 @@ use crate::{ session::SessionManager, state::NetworkState, swarm::{Swarm, SwarmEvent}, - NodeId, }; use futures::{Future, StreamExt}; use parking_lot::Mutex; @@ -34,6 +33,7 @@ use reth_eth_wire::{ EthMessage, }; use reth_interfaces::provider::BlockProvider; +use reth_primitives::PeerId; use std::{ net::SocketAddr, pin::Pin, @@ -88,8 +88,8 @@ pub struct NetworkManager { /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`] /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. num_active_peers: Arc, - /// Local copy of the `NodeId` of the local node. - local_node_id: NodeId, + /// Local copy of the `PeerId` of the local node. + local_node_id: PeerId, } // === impl NetworkManager === @@ -163,7 +163,7 @@ where /// Event hook for an unexpected message from the peer. fn on_invalid_message( &self, - node_id: NodeId, + node_id: PeerId, _capabilities: Arc, _message: CapabilityMessage, ) { @@ -172,7 +172,7 @@ where } /// Handles a received [`CapabilityMessage`] from the peer. - fn on_capability_message(&mut self, _node_id: NodeId, msg: CapabilityMessage) { + fn on_capability_message(&mut self, _node_id: PeerId, msg: CapabilityMessage) { match msg { CapabilityMessage::Eth(eth) => { match eth { @@ -299,7 +299,7 @@ where /// Events emitted by the network that are of interest for subscribers. #[derive(Debug, Clone)] pub enum NetworkEvent { - EthMessage { node_id: NodeId, message: EthMessage }, + EthMessage { node_id: PeerId, message: EthMessage }, } /// Bundles all listeners for [`NetworkEvent`]s. diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 43dc28461d..3aaaca3cc5 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -11,10 +11,9 @@ use reth_eth_wire::{ }; use std::task::{ready, Context, Poll}; -use crate::NodeId; use reth_eth_wire::capability::CapabilityMessage; use reth_interfaces::p2p::error::RequestResult; -use reth_primitives::{Header, Receipt, TransactionSigned}; +use reth_primitives::{Header, PeerId, Receipt, TransactionSigned}; use tokio::sync::{mpsc, mpsc::error::TrySendError, oneshot}; /// Represents all messages that can be sent to a peer session @@ -180,7 +179,7 @@ impl PeerResponseResult { #[derive(Debug, Clone)] pub struct PeerRequestSender { /// id of the remote node. - pub(crate) peer: NodeId, + pub(crate) peer: PeerId, /// The Sender half connected to a session. pub(crate) to_session_tx: mpsc::Sender, } diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 76b9034600..c38986e94d 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,6 +1,6 @@ -use crate::{manager::NetworkEvent, peers::PeersHandle, NodeId}; +use crate::{manager::NetworkEvent, peers::PeersHandle}; use parking_lot::Mutex; -use reth_primitives::{H256, U256}; +use reth_primitives::{PeerId, H256, U256}; use std::{ net::SocketAddr, sync::{atomic::AtomicUsize, Arc}, @@ -24,7 +24,7 @@ impl NetworkHandle { num_active_peers: Arc, listener_address: Arc>, to_manager_tx: UnboundedSender, - local_node_id: NodeId, + local_node_id: PeerId, peers: PeersHandle, ) -> Self { let inner = NetworkInner { @@ -57,7 +57,7 @@ struct NetworkInner { /// The local address that accepts incoming connections. listener_address: Arc>, /// The identifier used by this node. - local_node_id: NodeId, + local_node_id: PeerId, /// Access to the all the nodes peers: PeersHandle, // TODO need something to access } diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d11954844c..9592f215e5 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -1,5 +1,5 @@ use futures::StreamExt; -use reth_discv4::NodeId; +use reth_primitives::PeerId; use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, net::SocketAddr, @@ -32,7 +32,7 @@ pub struct PeersHandle { /// The [`PeersManager`] will be notified on peer related changes pub(crate) struct PeersManager { /// All peers known to the network - peers: HashMap, + peers: HashMap, /// Copy of the receiver half, so new [`PeersHandle`] can be created on demand. manager_tx: mpsc::UnboundedSender, /// Receiver half of the command channel. @@ -74,7 +74,7 @@ impl PeersManager { /// /// If the reputation of the peer is below the `BANNED_REPUTATION` threshold, a disconnect will /// be scheduled. - pub(crate) fn on_active_session(&mut self, peer_id: NodeId, addr: SocketAddr) { + pub(crate) fn on_active_session(&mut self, peer_id: PeerId, addr: SocketAddr) { match self.peers.entry(peer_id) { Entry::Occupied(mut entry) => { let value = entry.get_mut(); @@ -96,7 +96,7 @@ impl PeersManager { /// Called when a session to a peer was disconnected. /// /// Accepts an additional [`ReputationChange`] value to apply to the peer. - pub(crate) fn on_disconnected(&mut self, peer: NodeId, reputation_change: ReputationChange) { + pub(crate) fn on_disconnected(&mut self, peer: PeerId, reputation_change: ReputationChange) { if let Some(mut peer) = self.peers.get_mut(&peer) { self.connection_info.decr_state(peer.state); peer.state = PeerConnectionState::Idle; @@ -108,7 +108,7 @@ impl PeersManager { /// /// If the peer already exists, then the address will e updated. If the addresses differ, the /// old address is returned - pub(crate) fn add_discovered_node(&mut self, peer_id: NodeId, addr: SocketAddr) { + pub(crate) fn add_discovered_node(&mut self, peer_id: PeerId, addr: SocketAddr) { match self.peers.entry(peer_id) { Entry::Occupied(mut entry) => { let node = entry.get_mut(); @@ -121,7 +121,7 @@ impl PeersManager { } /// Removes the tracked node from the set. - pub(crate) fn remove_discovered_node(&mut self, peer_id: NodeId) { + pub(crate) fn remove_discovered_node(&mut self, peer_id: PeerId) { if let Some(entry) = self.peers.remove(&peer_id) { if entry.state.is_connected() { self.connection_info.decr_state(entry.state); @@ -133,11 +133,11 @@ impl PeersManager { /// Returns the idle peer with the highest reputation. /// /// Returns `None` if no peer is available. - fn best_unconnected(&mut self) -> Option<(NodeId, &mut Peer)> { + fn best_unconnected(&mut self) -> Option<(PeerId, &mut Peer)> { self.peers .iter_mut() .filter(|(_, peer)| peer.state.is_unconnected()) - .fold(None::<(&NodeId, &mut Peer)>, |mut best_peer, candidate| { + .fold(None::<(&PeerId, &mut Peer)>, |mut best_peer, candidate| { if let Some(best_peer) = best_peer.take() { if best_peer.1.reputation >= candidate.1.reputation { return Some(best_peer) @@ -331,14 +331,14 @@ pub(crate) enum PeerCommand { /// Command for manually add Add { /// Identifier of the peer. - peer_id: NodeId, + peer_id: PeerId, /// The address of the peer addr: SocketAddr, }, /// Remove a peer from the set /// /// If currently connected this will disconnect the sessin - Remove(NodeId), + Remove(PeerId), } /// Actions the peer manager can trigger. @@ -347,17 +347,17 @@ pub enum PeerAction { /// Start a new connection to a peer. Connect { /// The peer to connect to. - peer_id: NodeId, + peer_id: PeerId, /// Where to reach the node remote_addr: SocketAddr, }, /// Disconnect an existing connection. - Disconnect { peer_id: NodeId }, + Disconnect { peer_id: PeerId }, /// Disconnect an existing incoming connection, because the peers reputation is below the /// banned threshold. DisconnectBannedIncoming { /// Peer id of the established connection. - peer_id: NodeId, + peer_id: PeerId, }, } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 91db0ef2c9..a8a1519dd2 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -6,13 +6,16 @@ use crate::{ handle::{ActiveSessionMessage, SessionCommand}, SessionId, }, - NodeId, }; use fnv::FnvHashMap; use futures::{stream::Fuse, Sink, Stream}; use pin_project::pin_project; use reth_ecies::stream::ECIESStream; -use reth_eth_wire::capability::{Capabilities, CapabilityMessage}; +use reth_eth_wire::{ + capability::{Capabilities, CapabilityMessage}, + EthStream, P2PStream, +}; +use reth_primitives::PeerId; use std::{ collections::VecDeque, future::Future, @@ -31,9 +34,9 @@ pub(crate) struct ActiveSession { pub(crate) next_id: usize, /// The underlying connection. #[pin] - pub(crate) conn: ECIESStream, + pub(crate) conn: EthStream>>, /// Identifier of the node we're connected to. - pub(crate) remote_node_id: NodeId, + pub(crate) remote_node_id: PeerId, /// All capabilities the peer announced pub(crate) remote_capabilities: Arc, /// Internal identifier of this session diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index fce7b5d038..f7cd579ca6 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -1,13 +1,12 @@ //! Session handles -use crate::{ - session::{Direction, SessionId}, - NodeId, -}; +use crate::session::{Direction, SessionId}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, - Status, + error::EthStreamError, + EthStream, P2PStream, Status, }; +use reth_primitives::PeerId; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::{ net::TcpStream, @@ -33,7 +32,7 @@ pub(crate) struct ActiveSessionHandle { /// The assigned id for this session pub(crate) session_id: SessionId, /// The identifier of the remote peer - pub(crate) remote_id: NodeId, + pub(crate) remote_id: PeerId, /// The timestamp when the session has been established. pub(crate) established: Instant, /// Announced capabilities of the peer. @@ -65,23 +64,24 @@ pub(crate) enum PendingSessionEvent { Established { session_id: SessionId, remote_addr: SocketAddr, - node_id: NodeId, + /// The remote node's public key + node_id: PeerId, capabilities: Arc, status: Status, - conn: ECIESStream, + conn: EthStream>>, }, /// Handshake unsuccessful, session was disconnected. Disconnected { remote_addr: SocketAddr, session_id: SessionId, direction: Direction, - error: Option, + error: Option, }, /// Thrown when unable to establish a [`TcpStream`]. OutgoingConnectionError { remote_addr: SocketAddr, session_id: SessionId, - node_id: NodeId, + node_id: PeerId, error: io::Error, }, /// Thrown when authentication via Ecies failed. @@ -101,18 +101,18 @@ pub(crate) enum SessionCommand { #[derive(Debug)] pub(crate) enum ActiveSessionMessage { /// Session disconnected. - Closed { node_id: NodeId, remote_addr: SocketAddr }, + Closed { node_id: PeerId, remote_addr: SocketAddr }, /// A session received a valid message via RLPx. ValidMessage { /// Identifier of the remote peer. - node_id: NodeId, + node_id: PeerId, /// Message received from the peer. message: CapabilityMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { /// Identifier of the remote peer. - node_id: NodeId, + node_id: PeerId, /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index aa92fab76c..c3faecba2a 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -1,21 +1,20 @@ //! Support for handling peer sessions. pub use crate::message::PeerRequestSender; -use crate::{ - session::{ - active::ActiveSession, - handle::{ - ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, - }, +use crate::session::{ + active::ActiveSession, + handle::{ + ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, }, - NodeId, }; use fnv::FnvHashMap; use futures::{future::Either, io, FutureExt, StreamExt}; -use reth_ecies::{stream::ECIESStream, ECIESError}; +use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, - Status, UnauthedEthStream, + error::EthStreamError, + HelloBuilder, HelloMessage, Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; +use reth_primitives::{ForkFilter, Hardfork, PeerId}; use secp256k1::{SecretKey, SECP256K1}; use std::{ collections::HashMap, @@ -48,7 +47,13 @@ pub(crate) struct SessionManager { /// The secret key used for authenticating sessions. secret_key: SecretKey, /// The node id of node - node_id: NodeId, + node_id: PeerId, + /// The `Status` message to send to peers. + status: Status, + /// THe `Hello` message to send to peers. + hello: HelloMessage, + /// The [`ForkFilter`] used to validate the peer's `Status` message. + fork_filter: ForkFilter, /// Size of the command buffer per session. session_command_buffer: usize, /// All spawned session tasks. @@ -61,7 +66,7 @@ pub(crate) struct SessionManager { /// session is authenticated, it can be moved to the `active_session` set. pending_sessions: FnvHashMap, /// All active sessions that are ready to exchange messages. - active_sessions: HashMap, + active_sessions: HashMap, /// The original Sender half of the [`PendingSessionEvent`] channel. /// /// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will @@ -87,12 +92,21 @@ impl SessionManager { let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer); let pk = secret_key.public_key(SECP256K1); - let node_id = NodeId::from_slice(&pk.serialize_uncompressed()[1..]); + let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]); + + // TODO: make sure this is the right place to put these builders - maybe per-Network rather + // than per-Session? + let hello = HelloBuilder::new(node_id).build(); + let status = StatusBuilder::default().build(); + let fork_filter = Hardfork::Frontier.fork_filter(); Self { next_id: 0, secret_key, node_id, + status, + hello, + fork_filter, session_command_buffer: config.session_command_buffer, spawned_tasks: Default::default(), pending_sessions: Default::default(), @@ -139,6 +153,9 @@ impl SessionManager { pending_events, remote_addr, self.secret_key, + self.hello.clone(), + self.status, + self.fork_filter.clone(), )); let handle = PendingSessionHandle { disconnect_tx }; @@ -147,7 +164,7 @@ impl SessionManager { } /// Starts a new pending session from the local node to the given remote node. - pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_node_id: NodeId) { + pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_node_id: PeerId) { let session_id = self.next_id(); let (disconnect_tx, disconnect_rx) = oneshot::channel(); let pending_events = self.pending_sessions_tx.clone(); @@ -158,6 +175,9 @@ impl SessionManager { remote_addr, remote_node_id, self.secret_key, + self.hello.clone(), + self.status, + self.fork_filter.clone(), )); let handle = PendingSessionHandle { disconnect_tx }; @@ -168,7 +188,7 @@ impl SessionManager { /// /// This will trigger the disconnect on the session task to gracefully terminate. The result /// will be picked up by the receiver. - pub(crate) fn disconnect(&self, node: NodeId) { + pub(crate) fn disconnect(&self, node: PeerId) { if let Some(session) = self.active_sessions.get(&node) { session.disconnect(); } @@ -376,7 +396,7 @@ pub(crate) enum SessionEvent { /// /// This session is now able to exchange data. SessionEstablished { - node_id: NodeId, + node_id: PeerId, remote_addr: SocketAddr, capabilities: Arc, status: Status, @@ -384,30 +404,30 @@ pub(crate) enum SessionEvent { }, /// A session received a valid message via RLPx. ValidMessage { - node_id: NodeId, + node_id: PeerId, /// Message received from the peer. message: CapabilityMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { - node_id: NodeId, + node_id: PeerId, /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. message: CapabilityMessage, }, /// Closed an incoming pending session during authentication. - IncomingPendingSessionClosed { remote_addr: SocketAddr, error: Option }, + IncomingPendingSessionClosed { remote_addr: SocketAddr, error: Option }, /// Closed an outgoing pending session during authentication. OutgoingPendingSessionClosed { remote_addr: SocketAddr, - node_id: NodeId, - error: Option, + node_id: PeerId, + error: Option, }, /// Failed to establish a tcp stream - OutgoingConnectionError { remote_addr: SocketAddr, node_id: NodeId, error: io::Error }, + OutgoingConnectionError { remote_addr: SocketAddr, node_id: PeerId, error: io::Error }, /// Active session was disconnected. - Disconnected { node_id: NodeId, remote_addr: SocketAddr }, + Disconnected { node_id: PeerId, remote_addr: SocketAddr }, } /// The error thrown when the max configured limit has been reached and no more connections are @@ -426,6 +446,9 @@ async fn start_pending_incoming_session( events: mpsc::Sender, remote_addr: SocketAddr, secret_key: SecretKey, + hello: HelloMessage, + status: Status, + fork_filter: ForkFilter, ) { authenticate( disconnect_rx, @@ -435,6 +458,9 @@ async fn start_pending_incoming_session( remote_addr, secret_key, Direction::Incoming, + hello, + status, + fork_filter, ) .await } @@ -446,8 +472,11 @@ async fn start_pending_outbound_session( events: mpsc::Sender, session_id: SessionId, remote_addr: SocketAddr, - remote_node_id: NodeId, + remote_node_id: PeerId, secret_key: SecretKey, + hello: HelloMessage, + status: Status, + fork_filter: ForkFilter, ) { let stream = match TcpStream::connect(remote_addr).await { Ok(stream) => stream, @@ -471,6 +500,9 @@ async fn start_pending_outbound_session( remote_addr, secret_key, Direction::Outgoing(remote_node_id), + hello, + status, + fork_filter, ) .await } @@ -481,7 +513,7 @@ pub(crate) enum Direction { /// Incoming connection. Incoming, /// Outgoing connection to a specific node. - Outgoing(NodeId), + Outgoing(PeerId), } async fn authenticate( @@ -492,6 +524,9 @@ async fn authenticate( remote_addr: SocketAddr, secret_key: SecretKey, direction: Direction, + hello: HelloMessage, + status: Status, + fork_filter: ForkFilter, ) { let stream = match direction { Direction::Incoming => match ECIESStream::incoming(stream, secret_key).await { @@ -520,8 +555,17 @@ async fn authenticate( } }; - let unauthed = UnauthedEthStream::new(stream); - let auth = authenticate_stream(unauthed, session_id, remote_addr, direction).boxed(); + let unauthed = UnauthedP2PStream::new(stream); + let auth = authenticate_stream( + unauthed, + session_id, + remote_addr, + direction, + hello, + status, + fork_filter, + ) + .boxed(); match futures::future::select(disconnect_rx, auth).await { Either::Left((_, _)) => { @@ -544,10 +588,47 @@ async fn authenticate( /// /// On Success return the authenticated stream as [`PendingSessionEvent`] async fn authenticate_stream( - _stream: UnauthedEthStream>, - _session_id: SessionId, - _remote_addr: SocketAddr, - _direction: Direction, + stream: UnauthedP2PStream>, + session_id: SessionId, + remote_addr: SocketAddr, + direction: Direction, + hello: HelloMessage, + status: Status, + fork_filter: ForkFilter, ) -> PendingSessionEvent { - todo!() + // conduct the p2p handshake and return the authenticated stream + let (p2p_stream, their_hello) = match stream.handshake(hello).await { + Ok(stream_res) => stream_res, + Err(err) => { + return PendingSessionEvent::Disconnected { + remote_addr, + session_id, + direction, + error: Some(err.into()), + } + } + }; + + // if the hello handshake was successful we can try status handshake + let eth_unauthed = UnauthedEthStream::new(p2p_stream); + let (eth_stream, their_status) = match eth_unauthed.handshake(status, fork_filter).await { + Ok(stream_res) => stream_res, + Err(err) => { + return PendingSessionEvent::Disconnected { + remote_addr, + session_id, + direction, + error: Some(err), + } + } + }; + + PendingSessionEvent::Established { + session_id, + remote_addr, + node_id: their_hello.id, + capabilities: Arc::new(Capabilities::from(their_hello.capabilities)), + status: their_status, + conn: eth_stream, + } } diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 30e0eac43c..b2404c0841 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -5,12 +5,11 @@ use crate::{ fetch::StateFetcher, message::{PeerRequestSender, PeerResponse}, peers::{PeerAction, PeersManager}, - NodeId, }; use reth_eth_wire::{capability::Capabilities, Status}; use reth_interfaces::provider::BlockProvider; -use reth_primitives::H256; +use reth_primitives::{PeerId, H256}; use std::{ collections::{HashMap, VecDeque}, net::SocketAddr, @@ -37,7 +36,7 @@ use tracing::trace; /// This type is also responsible for responding for received request. pub struct NetworkState { /// All connected peers and their state. - connected_peers: HashMap, + connected_peers: HashMap, /// Manages connections to peers. peers_manager: PeersManager, /// Buffered messages until polled. @@ -83,7 +82,7 @@ where /// should be rejected. pub(crate) fn on_session_activated( &mut self, - peer: NodeId, + peer: PeerId, capabilities: Arc, status: Status, request_tx: PeerRequestSender, @@ -107,7 +106,7 @@ where } /// Event hook for a disconnected session for the peer. - pub(crate) fn on_session_closed(&mut self, peer: NodeId) { + pub(crate) fn on_session_closed(&mut self, peer: PeerId) { self.connected_peers.remove(&peer); self.state_fetcher.on_session_closed(&peer); } @@ -149,7 +148,7 @@ where } /// Disconnect the session - fn on_session_disconnected(&mut self, peer: NodeId) { + fn on_session_disconnected(&mut self, peer: PeerId) { self.connected_peers.remove(&peer); } @@ -157,7 +156,7 @@ where /// /// Caution: this will replace an already pending response. It's the responsibility of the /// caller to select the peer. - fn handle_block_request(&mut self, peer: NodeId, request: BlockRequest) { + fn handle_block_request(&mut self, peer: PeerId, request: BlockRequest) { if let Some(ref mut peer) = self.connected_peers.get_mut(&peer) { let (request, response) = match request { BlockRequest::GetBlockHeaders(request) => { @@ -192,7 +191,7 @@ where } /// Invoked when received a response from a connected peer. - fn on_eth_response(&mut self, peer: NodeId, resp: PeerResponseResult) -> Option { + fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option { match resp { PeerResponseResult::BlockHeaders(res) => { let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; @@ -283,9 +282,9 @@ pub struct ConnectedPeer { /// Message variants triggered by the [`State`] pub enum StateAction { /// Create a new connection to the given node. - Connect { remote_addr: SocketAddr, node_id: NodeId }, + Connect { remote_addr: SocketAddr, node_id: PeerId }, /// Disconnect an existing connection - Disconnect { node_id: NodeId }, + Disconnect { node_id: PeerId }, } #[derive(Debug, thiserror::Error)] @@ -293,6 +292,6 @@ pub enum AddSessionError { #[error("No capacity for new sessions")] AtCapacity { /// The peer of the session - peer: NodeId, + peer: PeerId, }, } diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index e621956132..136f11abdb 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -2,12 +2,14 @@ use crate::{ listener::{ConnectionListener, ListenerEvent}, session::{SessionEvent, SessionId, SessionManager}, state::{AddSessionError, NetworkState, StateAction}, - NodeId, }; use futures::Stream; -use reth_ecies::ECIESError; -use reth_eth_wire::capability::{Capabilities, CapabilityMessage}; +use reth_eth_wire::{ + capability::{Capabilities, CapabilityMessage}, + error::EthStreamError, +}; use reth_interfaces::provider::BlockProvider; +use reth_primitives::PeerId; use std::{ io, net::SocketAddr, @@ -55,7 +57,7 @@ where } /// Triggers a new outgoing connection to the given node - pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: NodeId) { + pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id) } @@ -191,13 +193,13 @@ pub enum SwarmEvent { /// Events related to the actual network protocol. CapabilityMessage { /// The peer that sent the message - node_id: NodeId, + node_id: PeerId, /// Message received from the peer message: CapabilityMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidCapabilityMessage { - node_id: NodeId, + node_id: PeerId, /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. @@ -226,28 +228,28 @@ pub enum SwarmEvent { remote_addr: SocketAddr, }, SessionEstablished { - node_id: NodeId, + node_id: PeerId, remote_addr: SocketAddr, }, SessionClosed { - node_id: NodeId, + node_id: PeerId, remote_addr: SocketAddr, }, /// Closed an incoming pending session during authentication. IncomingPendingSessionClosed { remote_addr: SocketAddr, - error: Option, + error: Option, }, /// Closed an outgoing pending session during authentication. OutgoingPendingSessionClosed { remote_addr: SocketAddr, - node_id: NodeId, - error: Option, + node_id: PeerId, + error: Option, }, /// Failed to establish a tcp stream to the given address/node OutgoingConnectionError { remote_addr: SocketAddr, - node_id: NodeId, + node_id: PeerId, error: io::Error, }, } diff --git a/crates/primitives/src/constants.rs b/crates/primitives/src/constants.rs new file mode 100644 index 0000000000..d51732f458 --- /dev/null +++ b/crates/primitives/src/constants.rs @@ -0,0 +1,5 @@ +use crate::H256; + +/// The Ethereum mainnet genesis hash. +pub const MAINNET_GENESIS: H256 = + H256(hex_literal::hex!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")); diff --git a/crates/net/eth-wire/src/types/forkid.rs b/crates/primitives/src/forkid.rs similarity index 99% rename from crates/net/eth-wire/src/types/forkid.rs rename to crates/primitives/src/forkid.rs index 77b9ad950e..5a11de1045 100644 --- a/crates/net/eth-wire/src/types/forkid.rs +++ b/crates/primitives/src/forkid.rs @@ -3,9 +3,9 @@ #![deny(missing_docs)] #![allow(clippy::redundant_else, clippy::too_many_lines)] +use crate::{BlockNumber, H256}; use crc::crc32; use maplit::btreemap; -use reth_primitives::{BlockNumber, H256}; use reth_rlp::*; use std::{ collections::{BTreeMap, BTreeSet}, diff --git a/crates/primitives/src/hardfork.rs b/crates/primitives/src/hardfork.rs new file mode 100644 index 0000000000..32ffbce346 --- /dev/null +++ b/crates/primitives/src/hardfork.rs @@ -0,0 +1,225 @@ +use crate::{BlockNumber, ForkFilter, ForkHash, ForkId, MAINNET_GENESIS}; +use std::str::FromStr; + +/// Ethereum mainnet hardforks +#[allow(missing_docs)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum Hardfork { + Frontier, + Homestead, + Dao, + Tangerine, + SpuriousDragon, + Byzantium, + Constantinople, + Petersburg, + Istanbul, + Muirglacier, + Berlin, + London, + ArrowGlacier, + GrayGlacier, + Latest, +} + +impl Hardfork { + /// Get the first block number of the hardfork. + pub fn fork_block(&self) -> u64 { + match *self { + Hardfork::Frontier => 0, + Hardfork::Homestead => 1150000, + Hardfork::Dao => 1920000, + Hardfork::Tangerine => 2463000, + Hardfork::SpuriousDragon => 2675000, + Hardfork::Byzantium => 4370000, + Hardfork::Constantinople | Hardfork::Petersburg => 7280000, + Hardfork::Istanbul => 9069000, + Hardfork::Muirglacier => 9200000, + Hardfork::Berlin => 12244000, + Hardfork::London => 12965000, + Hardfork::ArrowGlacier => 13773000, + Hardfork::GrayGlacier | Hardfork::Latest => 15050000, + } + } + + /// Get the EIP-2124 fork id for a given hardfork + /// + /// The [`ForkId`](ethereum_forkid::ForkId) includes a CRC32 checksum of the all fork block + /// numbers from genesis, and the next upcoming fork block number. + /// If the next fork block number is not yet known, it is set to 0. + pub fn fork_id(&self) -> ForkId { + match *self { + Hardfork::Frontier => { + ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 } + } + Hardfork::Homestead => { + ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 } + } + Hardfork::Dao => ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + Hardfork::Tangerine => { + ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 } + } + Hardfork::SpuriousDragon => { + ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 } + } + Hardfork::Byzantium => { + ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 } + } + Hardfork::Constantinople | Hardfork::Petersburg => { + ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 } + } + Hardfork::Istanbul => { + ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 } + } + Hardfork::Muirglacier => { + ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 } + } + Hardfork::Berlin => ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, + Hardfork::London => ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, + Hardfork::ArrowGlacier => { + ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 } + } + Hardfork::Latest | Hardfork::GrayGlacier => { + // update `next` when another fork block num is known + ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 0 } + } + } + } + + /// This returns all known hardforks in order. + pub fn all_forks() -> Vec { + vec![ + Hardfork::Homestead, + Hardfork::Dao, + Hardfork::Tangerine, + Hardfork::SpuriousDragon, + Hardfork::Byzantium, + Hardfork::Constantinople, /* petersburg is skipped because it's the same block num + * as constantinople */ + Hardfork::Istanbul, + Hardfork::Muirglacier, + Hardfork::Berlin, + Hardfork::London, + Hardfork::ArrowGlacier, + Hardfork::GrayGlacier, + ] + } + + /// This returns all known hardfork block numbers as a vector. + pub fn all_fork_blocks() -> Vec { + Hardfork::all_forks().iter().map(|f| f.fork_block()).collect() + } + + /// Creates a [`ForkFilter`](crate::ForkFilter) for the given hardfork. + /// This assumes the current hardfork's block number is the current head and uses all known + /// future hardforks to initialize the filter. + pub fn fork_filter(&self) -> ForkFilter { + let all_forks = Hardfork::all_forks(); + let future_forks: Vec = all_forks + .iter() + .filter(|f| f.fork_block() > self.fork_block()) + .map(|f| f.fork_block()) + .collect(); + + // this data structure is not chain-agnostic, so we can pass in the constant mainnet + // genesis + ForkFilter::new(self.fork_block(), MAINNET_GENESIS, future_forks) + } +} + +impl FromStr for Hardfork { + type Err = String; + + fn from_str(s: &str) -> Result { + let s = s.to_lowercase(); + let hardfork = match s.as_str() { + "frontier" | "1" => Hardfork::Frontier, + "homestead" | "2" => Hardfork::Homestead, + "dao" | "3" => Hardfork::Dao, + "tangerine" | "4" => Hardfork::Tangerine, + "spuriousdragon" | "5" => Hardfork::SpuriousDragon, + "byzantium" | "6" => Hardfork::Byzantium, + "constantinople" | "7" => Hardfork::Constantinople, + "petersburg" | "8" => Hardfork::Petersburg, + "istanbul" | "9" => Hardfork::Istanbul, + "muirglacier" | "10" => Hardfork::Muirglacier, + "berlin" | "11" => Hardfork::Berlin, + "london" | "12" => Hardfork::London, + "arrowglacier" | "13" => Hardfork::ArrowGlacier, + "grayglacier" => Hardfork::GrayGlacier, + "latest" | "14" => Hardfork::Latest, + _ => return Err(format!("Unknown hardfork {s}")), + }; + Ok(hardfork) + } +} + +impl Default for Hardfork { + fn default() -> Self { + Hardfork::Latest + } +} + +impl From for Hardfork { + fn from(num: BlockNumber) -> Hardfork { + match num { + _i if num < 1_150_000 => Hardfork::Frontier, + _i if num < 1_920_000 => Hardfork::Dao, + _i if num < 2_463_000 => Hardfork::Homestead, + _i if num < 2_675_000 => Hardfork::Tangerine, + _i if num < 4_370_000 => Hardfork::SpuriousDragon, + _i if num < 7_280_000 => Hardfork::Byzantium, + _i if num < 9_069_000 => Hardfork::Constantinople, + _i if num < 9_200_000 => Hardfork::Istanbul, + _i if num < 12_244_000 => Hardfork::Muirglacier, + _i if num < 12_965_000 => Hardfork::Berlin, + _i if num < 13_773_000 => Hardfork::London, + _i if num < 15_050_000 => Hardfork::ArrowGlacier, + + _ => Hardfork::Latest, + } + } +} + +#[cfg(test)] +mod tests { + use crate::{forkid::ForkHash, hardfork::Hardfork}; + use crc::crc32; + + #[test] + fn test_hardfork_blocks() { + let hf: Hardfork = 12_965_000u64.into(); + assert_eq!(hf, Hardfork::London); + + let hf: Hardfork = 4370000u64.into(); + assert_eq!(hf, Hardfork::Byzantium); + + let hf: Hardfork = 12244000u64.into(); + assert_eq!(hf, Hardfork::Berlin); + } + + #[test] + // this test checks that the fork hash assigned to forks accurately map to the fork_id method + fn test_forkhash_from_fork_blocks() { + // set the genesis hash + let genesis = + hex::decode("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") + .unwrap(); + + // set the frontier forkhash + let mut curr_forkhash = ForkHash(crc32::checksum_ieee(&genesis[..]).to_be_bytes()); + + // now we go through enum members + let frontier_forkid = Hardfork::Frontier.fork_id(); + assert_eq!(curr_forkhash, frontier_forkid.hash); + + // list of the above hardforks + let hardforks = Hardfork::all_forks(); + + // check that the curr_forkhash we compute matches the output of each fork_id returned + for hardfork in hardforks { + curr_forkhash += hardfork.fork_block(); + assert_eq!(curr_forkhash, hardfork.fork_id().hash); + } + } +} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 0cf932a414..d70f9253ad 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -10,7 +10,10 @@ mod account; mod block; mod chain; +mod constants; mod error; +mod forkid; +mod hardfork; mod header; mod hex_bytes; mod integer_list; @@ -23,6 +26,9 @@ mod transaction; pub use account::Account; pub use block::{Block, BlockLocked}; pub use chain::Chain; +pub use constants::MAINNET_GENESIS; +pub use forkid::{ForkFilter, ForkHash, ForkId, ValidationError}; +pub use hardfork::Hardfork; pub use header::{Header, SealedHeader}; pub use hex_bytes::Bytes; pub use integer_list::IntegerList; @@ -56,6 +62,12 @@ pub type StorageKey = H256; /// Storage value pub type StorageValue = U256; +// TODO: should we use `PublicKey` for this? Even when dealing with public keys we should try to +// prevent misuse +/// This represents an uncompressed secp256k1 public key. +/// This encodes the concatenation of the x and y components of the affine point in bytes. +pub type PeerId = H512; + pub use ethers_core::{ types as rpc, types::{BigEndianHash, Bloom, H128, H160, H256, H512, H64, U128, U256, U64}, From 14e97b2c43dccb40e9e1031304f2e29ec4143588 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Nov 2022 20:34:15 +0100 Subject: [PATCH 2/9] chore(txpool): add external transaction helper function (#201) --- crates/transaction-pool/src/traits.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index fb7064dd33..81caa7206c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -23,6 +23,14 @@ pub trait TransactionPool: Send + Sync + 'static { /// affects the dynamic fee requirement of pending transactions in the pool. fn on_new_block(&self, event: OnNewBlockEvent); + /// Imports an _external_ transaction. + /// + /// This is intended to be used by the network to insert incoming transactions received over the + /// p2p network. + async fn add_external_transaction(&self, transaction: Self::Transaction) -> PoolResult { + self.add_transaction(TransactionOrigin::External, transaction).await + } + /// Adds an _unvalidated_ transaction into the pool. /// /// Consumer: RPC From 651eed1086271517709ac2744fadc747911b045b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Nov 2022 20:35:08 +0100 Subject: [PATCH 3/9] chore(txpool): add PoolError::hash function (#202) --- crates/transaction-pool/src/error.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 12c1b62821..8a4f971b32 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -22,3 +22,17 @@ pub enum PoolError { #[error("[{0:?}] Transaction discarded outright due to pool size constraints.")] DiscardedOnInsert(TxHash), } + +// === impl PoolError === + +impl PoolError { + /// Returns the hash of the transaction that resulted in this error. + pub fn hash(&self) -> &TxHash { + match self { + PoolError::ReplacementUnderpriced(hash) => hash, + PoolError::ProtocolFeeCapTooLow(hash, _) => hash, + PoolError::SpammerExceededCapacity(_, hash) => hash, + PoolError::DiscardedOnInsert(hash) => hash, + } + } +} From 92a7818512ce93e4e36ba5ea634ddab1939bfca4 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Tue, 15 Nov 2022 08:49:11 +0100 Subject: [PATCH 4/9] feat: bodies stage (#190) * chore: clean up `.gitignore` * fix: make RO cursors `Send + Sync` * feat(wip): bodies stage * driveby: improve docs * chore: don't panic if we're the first stage * chore: use `Vec` for ommers * feat: error handling in bodies downloader * chore: remove stale comment * chore: pascal-case stage id * refactor: remove unused new fns * refactor: distinguish downloaders with prefix * refactor: move downloader errs to own module * refactor: `stream_bodies` -> `bodies_stream` * test: fix borked imports in header stage * test: clean up header tests * test: add basic body stage tests * test: add 2 more body stage test skeletons * test: move generator test utils to own module * refactor: move proof functions to primitives crate * feat: add block generator test utils * test: more body stage tests * chore: fix typo (`Cannonical*` -> `Canonical`) * docs: document `bodies_to_download` * test: more body stage tests * test: more body stage tests * refactor: clean up body stage tests a bit * test: fix broken tests * refactor: clean up body stage tests * test: more body stage tests --- .gitignore | 2 +- Cargo.lock | 27 +- Cargo.toml | 1 + crates/.gitignore | 1 - crates/consensus/Cargo.toml | 12 +- crates/consensus/src/consensus.rs | 6 +- crates/consensus/src/lib.rs | 3 - crates/consensus/src/verification.rs | 52 +- crates/db/src/kv/mod.rs | 2 +- crates/interfaces/Cargo.toml | 3 + crates/interfaces/src/consensus.rs | 16 +- crates/interfaces/src/db/codecs/scale.rs | 16 +- crates/interfaces/src/db/mod.rs | 19 +- crates/interfaces/src/db/models/blocks.rs | 22 +- crates/interfaces/src/db/tables.rs | 30 +- crates/interfaces/src/p2p/bodies/client.rs | 14 + .../interfaces/src/p2p/bodies/downloader.rs | 44 + crates/interfaces/src/p2p/bodies/error.rs | 51 ++ crates/interfaces/src/p2p/bodies/mod.rs | 8 + crates/interfaces/src/p2p/error.rs | 2 +- .../interfaces/src/p2p/headers/downloader.rs | 62 +- crates/interfaces/src/p2p/headers/error.rs | 44 + crates/interfaces/src/p2p/headers/mod.rs | 3 + crates/interfaces/src/p2p/mod.rs | 3 + crates/interfaces/src/test_utils/bodies.rs | 33 + .../interfaces/src/test_utils/generators.rs | 142 +++ crates/interfaces/src/test_utils/headers.rs | 52 +- crates/interfaces/src/test_utils/mod.rs | 5 + crates/net/bodies-downloaders/Cargo.toml | 21 + .../net/bodies-downloaders/src/concurrent.rs | 132 +++ crates/net/bodies-downloaders/src/lib.rs | 11 + crates/net/ecies/src/lib.rs | 2 +- crates/net/eth-wire/src/types/blocks.rs | 1 + crates/net/headers-downloaders/src/linear.rs | 37 +- crates/primitives/Cargo.toml | 6 +- crates/primitives/src/error.rs | 4 +- crates/primitives/src/header.rs | 38 +- crates/primitives/src/lib.rs | 24 +- .../{consensus => primitives}/src/proofs.rs | 40 +- crates/primitives/src/transaction/mod.rs | 18 +- crates/stages/Cargo.toml | 4 + crates/stages/src/error.rs | 36 +- crates/stages/src/pipeline.rs | 11 +- crates/stages/src/stages/bodies.rs | 842 ++++++++++++++++++ crates/stages/src/stages/headers.rs | 182 ++-- crates/stages/src/stages/mod.rs | 2 + crates/stages/src/stages/tx_index.rs | 22 +- crates/transaction-pool/src/lib.rs | 4 +- 48 files changed, 1772 insertions(+), 340 deletions(-) delete mode 100644 crates/.gitignore create mode 100644 crates/interfaces/src/p2p/bodies/client.rs create mode 100644 crates/interfaces/src/p2p/bodies/downloader.rs create mode 100644 crates/interfaces/src/p2p/bodies/error.rs create mode 100644 crates/interfaces/src/p2p/bodies/mod.rs create mode 100644 crates/interfaces/src/p2p/headers/error.rs create mode 100644 crates/interfaces/src/test_utils/bodies.rs create mode 100644 crates/interfaces/src/test_utils/generators.rs create mode 100644 crates/net/bodies-downloaders/Cargo.toml create mode 100644 crates/net/bodies-downloaders/src/concurrent.rs create mode 100644 crates/net/bodies-downloaders/src/lib.rs rename crates/{consensus => primitives}/src/proofs.rs (72%) create mode 100644 crates/stages/src/stages/bodies.rs diff --git a/.gitignore b/.gitignore index 6db043d3d9..ee44a96390 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ .idea -/target +target diff --git a/Cargo.lock b/Cargo.lock index 0e4ca0e4c0..6a6dc2c52a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3043,6 +3043,21 @@ dependencies = [ "walkdir", ] +[[package]] +name = "reth-bodies-downloaders" +version = "0.1.0" +dependencies = [ + "assert_matches", + "futures-util", + "once_cell", + "rand 0.8.5", + "reth-eth-wire", + "reth-interfaces", + "reth-primitives", + "serial_test", + "tokio", +] + [[package]] name = "reth-codecs" version = "0.1.0" @@ -3057,16 +3072,11 @@ dependencies = [ "async-trait", "auto_impl", "eyre", - "hash-db", - "plain_hasher", "reth-interfaces", "reth-primitives", "reth-rlp", - "rlp", - "sha3", "thiserror", "tokio", - "triehash", ] [[package]] @@ -3225,6 +3235,7 @@ dependencies = [ "rand 0.8.5", "reth-codecs", "reth-db", + "reth-eth-wire", "reth-primitives", "reth-rpc-types", "serde", @@ -3332,10 +3343,12 @@ dependencies = [ "crc", "derive_more", "ethers-core", + "hash-db", "hex", "hex-literal", "maplit", "parity-scale-codec", + "plain_hasher", "reth-codecs", "reth-rlp", "secp256k1", @@ -3344,6 +3357,7 @@ dependencies = [ "sucds", "thiserror", "tiny-keccak", + "triehash", ] [[package]] @@ -3416,9 +3430,12 @@ dependencies = [ "aquamarine", "assert_matches", "async-trait", + "futures-util", "metrics", "rand 0.8.5", + "reth-bodies-downloaders", "reth-db", + "reth-eth-wire", "reth-headers-downloaders", "reth-interfaces", "reth-primitives", diff --git a/Cargo.toml b/Cargo.toml index d1fa725587..d94f9eab3e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ members = [ "crates/net/rpc-api", "crates/net/rpc-types", "crates/net/headers-downloaders", + "crates/net/bodies-downloaders", "crates/primitives", "crates/stages", "crates/transaction-pool", diff --git a/crates/.gitignore b/crates/.gitignore deleted file mode 100644 index 2f7896d1d1..0000000000 --- a/crates/.gitignore +++ /dev/null @@ -1 +0,0 @@ -target/ diff --git a/crates/consensus/Cargo.toml b/crates/consensus/Cargo.toml index aefc34afab..267f026ffc 100644 --- a/crates/consensus/Cargo.toml +++ b/crates/consensus/Cargo.toml @@ -17,14 +17,4 @@ async-trait = "0.1.57" thiserror = "1.0.37" eyre = "0.6.8" auto_impl = "1.0" -tokio = { version = "1.21.2", features = ["sync"] } - -# proof related -triehash = "0.8" -# See to replace hashers to simplify libraries -plain_hasher = "0.2" -hash-db = "0.15" -# todo replace with faster rlp impl -rlp = { version = "0.5", default-features = false } -# replace with tiny-keccak (it is faster hasher) -sha3 = { version = "0.10", default-features = false } \ No newline at end of file +tokio = { version = "1.21.2", features = ["sync"] } \ No newline at end of file diff --git a/crates/consensus/src/consensus.rs b/crates/consensus/src/consensus.rs index d000884941..3d174df727 100644 --- a/crates/consensus/src/consensus.rs +++ b/crates/consensus/src/consensus.rs @@ -2,7 +2,7 @@ use crate::{verification, Config}; use reth_interfaces::consensus::{Consensus, Error, ForkchoiceState}; -use reth_primitives::{SealedHeader, H256}; +use reth_primitives::{BlockLocked, SealedHeader, H256}; use tokio::sync::watch; /// Ethereum consensus @@ -40,4 +40,8 @@ impl Consensus for EthConsensus { // * mix_hash & nonce PoW stuf // * extra_data } + + fn pre_validate_block(&self, block: &BlockLocked) -> Result<(), Error> { + verification::validate_block_standalone(block, false) + } } diff --git a/crates/consensus/src/lib.rs b/crates/consensus/src/lib.rs index 9d9a6e48ce..a36c5abf42 100644 --- a/crates/consensus/src/lib.rs +++ b/crates/consensus/src/lib.rs @@ -10,9 +10,6 @@ pub mod config; pub mod consensus; pub mod verification; -/// Helper function for calculating Merkle proofs and hashes -pub mod proofs; - pub use config::Config; pub use consensus::EthConsensus; pub use reth_interfaces::consensus::Error; diff --git a/crates/consensus/src/verification.rs b/crates/consensus/src/verification.rs index 27428f716e..50e357adac 100644 --- a/crates/consensus/src/verification.rs +++ b/crates/consensus/src/verification.rs @@ -115,10 +115,20 @@ pub fn validate_transaction_regarding_state( Ok(()) } -/// Validate block standalone -pub fn validate_block_standalone(block: &BlockLocked) -> Result<(), Error> { - // check ommers hash - let ommers_hash = crate::proofs::calculate_ommers_root(block.ommers.iter().map(|h| h.as_ref())); +/// Validate a block without regard for state: +/// +/// - Compares the ommer hash in the block header to the block body +/// - Compares the transactions root in the block header to the block body +/// - Pre-execution transaction validation +/// - (Optionally) Compares the receipts root in the block header to the block body +pub fn validate_block_standalone( + block: &BlockLocked, + validate_receipts: bool, +) -> Result<(), Error> { + // Check ommers hash + // TODO(onbjerg): This should probably be accessible directly on [Block] + let ommers_hash = + reth_primitives::proofs::calculate_ommers_root(block.ommers.iter().map(|h| h.as_ref())); if block.header.ommers_hash != ommers_hash { return Err(Error::BodyOmmersHashDiff { got: ommers_hash, @@ -126,8 +136,9 @@ pub fn validate_block_standalone(block: &BlockLocked) -> Result<(), Error> { }) } - // check transaction root - let transaction_root = crate::proofs::calculate_transaction_root(block.body.iter()); + // Check transaction root + // TODO(onbjerg): This should probably be accessible directly on [Block] + let transaction_root = reth_primitives::proofs::calculate_transaction_root(block.body.iter()); if block.header.transactions_root != transaction_root { return Err(Error::BodyTransactionRootDiff { got: transaction_root, @@ -135,18 +146,27 @@ pub fn validate_block_standalone(block: &BlockLocked) -> Result<(), Error> { }) } - // TODO transaction verification, Maybe make it configurable as in check only + // TODO: transaction verification,maybe make it configurable as in check only // signatures/limits/types + // Things to probably check: + // - Chain ID + // - Base fee per gas (if applicable) + // - Max priority fee per gas (if applicable) - // check if all transactions limit does not goes over block limit + // TODO: Check if all transaction gas total does not go over block limit - // check receipts root - let receipts_root = crate::proofs::calculate_receipt_root(block.receipts.iter()); - if block.header.receipts_root != receipts_root { - return Err(Error::BodyReceiptsRootDiff { - got: receipts_root, - expected: block.header.receipts_root, - }) + // Check receipts root + // TODO(onbjerg): This should probably be accessible directly on [Block] + // NOTE(onbjerg): Pre-validation does not validate the receipts root since we do not have the + // receipts yet (this validation is before execution). Maybe this should not be in here? + if validate_receipts { + let receipts_root = reth_primitives::proofs::calculate_receipt_root(block.receipts.iter()); + if block.header.receipts_root != receipts_root { + return Err(Error::BodyReceiptsRootDiff { + got: receipts_root, + expected: block.header.receipts_root, + }) + } } Ok(()) @@ -284,7 +304,7 @@ pub fn full_validation( config: &Config, ) -> RethResult<()> { validate_header_standalone(&block.header, config)?; - validate_block_standalone(block)?; + validate_block_standalone(block, true)?; let parent = validate_block_regarding_chain(block, &provider)?; validate_header_regarding_parent(&parent, &block.header, config)?; Ok(()) diff --git a/crates/db/src/kv/mod.rs b/crates/db/src/kv/mod.rs index fdad0808b9..a5600702e8 100644 --- a/crates/db/src/kv/mod.rs +++ b/crates/db/src/kv/mod.rs @@ -50,7 +50,7 @@ impl Database for Env { impl Env { /// Opens the database at the specified path with the given `EnvKind`. /// - /// It does not create the tables, for that call [`create_tables`]. + /// It does not create the tables, for that call [`Env::create_tables`]. pub fn open(path: &Path, kind: EnvKind) -> Result, Error> { let mode = match kind { EnvKind::RO => Mode::ReadOnly, diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index b754f5e75e..752fc9efca 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -16,6 +16,9 @@ auto_impl = "1.0" tokio = { version = "1.21.2", features = ["sync"] } bytes = "1.2" +# TODO(onbjerg): We only need this for [BlockBody] +reth-eth-wire = { path = "../net/eth-wire" } + # codecs serde = { version = "1.0.*", default-features = false } postcard = { version = "1.0.2", features = ["alloc"] } diff --git a/crates/interfaces/src/consensus.rs b/crates/interfaces/src/consensus.rs index ac82fce85c..0e98c015d7 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/interfaces/src/consensus.rs @@ -1,20 +1,30 @@ use async_trait::async_trait; -use reth_primitives::{BlockHash, BlockNumber, SealedHeader, H256}; +use reth_primitives::{BlockHash, BlockLocked, BlockNumber, SealedHeader, H256}; use tokio::sync::watch::Receiver; /// Re-export forkchoice state pub use reth_rpc_types::engine::ForkchoiceState; /// Consensus is a protocol that chooses canonical chain. -/// We are checking validity of block header here. #[async_trait] #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Send + Sync { /// Get a receiver for the fork choice state fn fork_choice_state(&self) -> Receiver; - /// Validate if header is correct and follows consensus specification + /// Validate if header is correct and follows consensus specification. + /// + /// **This should not be called for the genesis block**. fn validate_header(&self, header: &SealedHeader, parent: &SealedHeader) -> Result<(), Error>; + + /// Validate a block disregarding world state, i.e. things that can be checked before sender + /// recovery and execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and + /// 11.1 "Ommer Validation". + /// + /// **This should not be called for the genesis block**. + fn pre_validate_block(&self, block: &BlockLocked) -> Result<(), Error>; } /// Consensus Errors diff --git a/crates/interfaces/src/db/codecs/scale.rs b/crates/interfaces/src/db/codecs/scale.rs index 93e2d23740..ce8eaa13ff 100644 --- a/crates/interfaces/src/db/codecs/scale.rs +++ b/crates/interfaces/src/db/codecs/scale.rs @@ -1,4 +1,7 @@ -use crate::db::{models::accounts::AccountBeforeTx, Compress, Decompress, Error}; +use crate::db::{ + models::{accounts::AccountBeforeTx, StoredBlockBody}, + Compress, Decompress, Error, +}; use parity_scale_codec::decode_from_bytes; use reth_primitives::*; @@ -53,7 +56,16 @@ impl ScaleValue for Vec {} impl sealed::Sealed for Vec {} impl_scale!(U256, H256, H160); -impl_scale!(Header, Account, Log, Receipt, TxType, StorageEntry, TransactionSigned); +impl_scale!( + Header, + Account, + Log, + Receipt, + TxType, + StorageEntry, + TransactionSigned, + StoredBlockBody +); impl_scale!(AccountBeforeTx); impl_scale_value!(u8, u32, u16, u64); diff --git a/crates/interfaces/src/db/mod.rs b/crates/interfaces/src/db/mod.rs index 30691c7cfe..81d241a691 100644 --- a/crates/interfaces/src/db/mod.rs +++ b/crates/interfaces/src/db/mod.rs @@ -74,9 +74,9 @@ pub trait Database: for<'a> DatabaseGAT<'a> { /// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers pub trait DbTxGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { /// Cursor GAT - type Cursor: DbCursorRO<'a, T>; + type Cursor: DbCursorRO<'a, T> + Send + Sync; /// DupCursor GAT - type DupCursor: DbDupCursorRO<'a, T> + DbCursorRO<'a, T>; + type DupCursor: DbDupCursorRO<'a, T> + DbCursorRO<'a, T> + Send + Sync; } /// Implements the GAT method from: @@ -85,12 +85,14 @@ pub trait DbTxGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync /// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers pub trait DbTxMutGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { /// Cursor GAT - type CursorMut: DbCursorRW<'a, T> + DbCursorRO<'a, T>; + type CursorMut: DbCursorRW<'a, T> + DbCursorRO<'a, T> + Send + Sync; /// DupCursor GAT type DupCursorMut: DbDupCursorRW<'a, T> + DbCursorRW<'a, T> + DbDupCursorRO<'a, T> - + DbCursorRO<'a, T>; + + DbCursorRO<'a, T> + + Send + + Sync; } /// Read only transaction @@ -190,7 +192,9 @@ pub trait DbCursorRW<'tx, T: Table> { /// exists in a table, and insert a new row if the specified value doesn't already exist fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), Error>; - /// Append value to next cursor item + /// Append value to next cursor item. + /// + /// This is efficient for pre-sorted data. If the data is not pre-sorted, use [`insert`]. fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), Error>; /// Delete current value that cursor points to @@ -201,7 +205,10 @@ pub trait DbCursorRW<'tx, T: Table> { pub trait DbDupCursorRW<'tx, T: DupSort> { /// Append value to next cursor item fn delete_current_duplicates(&mut self) -> Result<(), Error>; - /// Append duplicate value + + /// Append duplicate value. + /// + /// This is efficient for pre-sorted data. If the data is not pre-sorted, use [`insert`]. fn append_dup(&mut self, key: T::Key, value: T::Value) -> Result<(), Error>; } diff --git a/crates/interfaces/src/db/models/blocks.rs b/crates/interfaces/src/db/models/blocks.rs index c24c0ca4b5..a2b0c416ef 100644 --- a/crates/interfaces/src/db/models/blocks.rs +++ b/crates/interfaces/src/db/models/blocks.rs @@ -8,14 +8,30 @@ use crate::{ impl_fixed_arbitrary, }; use bytes::Bytes; -use reth_primitives::{BlockHash, BlockNumber, H256}; +use reth_codecs::main_codec; +use reth_primitives::{BlockHash, BlockNumber, Header, TxNumber, H256}; use serde::{Deserialize, Serialize}; /// Total chain number of transactions. Key for [`CumulativeTxCount`]. pub type NumTransactions = u64; -/// Number of transactions in the block. Value for [`BlockBodies`]. -pub type NumTxesInBlock = u16; +/// The storage representation of a block body. +/// +/// A block body is stored as a pointer to the first transaction in the block (`base_tx_id`), a +/// count of how many transactions are in the block, and the headers of the block's uncles. +/// +/// The [TxNumber]s for all the transactions in the block are `base_tx_id..(base_tx_id + +/// tx_amount)`. +#[derive(Debug)] +#[main_codec] +pub struct StoredBlockBody { + /// The ID of the first transaction in the block. + pub base_tx_id: TxNumber, + /// The number of transactions in the block. + pub tx_amount: u64, + /// The block headers of this block's uncles. + pub ommers: Vec
, +} /// Hash of the block header. Value for [`CanonicalHeaders`] pub type HeaderHash = H256; diff --git a/crates/interfaces/src/db/tables.rs b/crates/interfaces/src/db/tables.rs index b9f0c19715..6b663f7c6c 100644 --- a/crates/interfaces/src/db/tables.rs +++ b/crates/interfaces/src/db/tables.rs @@ -3,7 +3,7 @@ use crate::db::{ models::{ accounts::{AccountBeforeTx, TxNumberAddress}, - blocks::{BlockNumHash, HeaderHash, NumTransactions, NumTxesInBlock}, + blocks::{BlockNumHash, HeaderHash, NumTransactions, StoredBlockBody}, ShardedKey, }, DupSort, @@ -13,7 +13,7 @@ use reth_primitives::{ TransactionSigned, TxNumber, H256, }; -/// Enum for the type of table present in libmdbx. +/// Enum for the types of tables present in libmdbx. #[derive(Debug)] pub enum TableType { /// key value table @@ -119,8 +119,10 @@ table!( Headers => BlockNumHash => Header); table!( - /// Stores the number of transactions of a block. - BlockBodies => BlockNumHash => NumTxesInBlock); + /// Stores a pointer to the first transaction in the block, the number of transactions in the block, and the uncles/ommers of the block. + /// + /// The transaction IDs point to the [`Transactions`] table. + BlockBodies => BlockNumHash => StoredBlockBody); table!( /// Stores the maximum [`TxNumber`] from which this particular block starts. @@ -131,19 +133,19 @@ table!( NonCanonicalTransactions => BlockNumHashTxNumber => TransactionSigned); table!( - /// Stores the transaction body from canonical transactions. Canonical only + /// (Canonical only) Stores the transaction body for canonical transactions. Transactions => TxNumber => TransactionSigned); table!( - /// Stores transaction receipts. Canonical only + /// (Canonical only) Stores transaction receipts. Receipts => TxNumber => Receipt); table!( - /// Stores transaction logs. Canonical only + /// (Canonical only) Stores transaction logs. Logs => TxNumber => Receipt); table!( - /// Stores the current state of an Account. + /// Stores the current state of an [`Account`]. PlainAccountState => Address => Account); table!( @@ -200,27 +202,27 @@ table!( AccountHistory => ShardedKey
=> TxNumberList); table!( - /// Stores the transaction numbers that changed each storage key. + /// Stores pointers to transactions that changed each storage key. StorageHistory => AddressStorageKey => TxNumberList); dupsort!( - /// Stores state of an account before a certain transaction changed it. + /// Stores the state of an account before a certain transaction changed it. AccountChangeSet => TxNumber => [Address] AccountBeforeTx); dupsort!( - /// Stores state of a storage key before a certain transaction changed it. + /// Stores the state of a storage key before a certain transaction changed it. StorageChangeSet => TxNumberAddress => [H256] StorageEntry); table!( - /// Stores the transaction sender from each transaction. + /// Stores the transaction sender for each transaction. TxSenders => TxNumber => Address); // Is it necessary? if so, inverted index index so we dont repeat addresses? table!( - /// Config. + /// Configuration values. Config => ConfigKey => ConfigValue); table!( - /// Stores the block number of each stage id. + /// Stores the highest synced block number of each stage. SyncStage => StageId => BlockNumber); /// diff --git a/crates/interfaces/src/p2p/bodies/client.rs b/crates/interfaces/src/p2p/bodies/client.rs new file mode 100644 index 0000000000..4e546fea95 --- /dev/null +++ b/crates/interfaces/src/p2p/bodies/client.rs @@ -0,0 +1,14 @@ +use reth_eth_wire::BlockBody; +use reth_primitives::H256; + +use crate::p2p::bodies::error::BodiesClientError; +use async_trait::async_trait; +use std::fmt::Debug; + +/// A client capable of downloading block bodies. +#[async_trait] +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait BodiesClient: Send + Sync + Debug { + /// Fetches the block body for the requested block. + async fn get_block_body(&self, hash: H256) -> Result; +} diff --git a/crates/interfaces/src/p2p/bodies/downloader.rs b/crates/interfaces/src/p2p/bodies/downloader.rs new file mode 100644 index 0000000000..677cbab0cd --- /dev/null +++ b/crates/interfaces/src/p2p/bodies/downloader.rs @@ -0,0 +1,44 @@ +use super::client::BodiesClient; +use crate::p2p::bodies::error::DownloadError; +use reth_eth_wire::BlockBody; +use reth_primitives::{BlockNumber, H256}; +use std::{pin::Pin, time::Duration}; +use tokio_stream::Stream; + +/// A downloader capable of fetching block bodies from header hashes. +/// +/// A downloader represents a distinct strategy for submitting requests to download block bodies, +/// while a [BodiesClient] represents a client capable of fulfilling these requests. +pub trait BodyDownloader: Sync + Send { + /// The [BodiesClient] used to fetch the block bodies + type Client: BodiesClient; + + /// The request timeout duration + fn timeout(&self) -> Duration; + + /// The block bodies client + fn client(&self) -> &Self::Client; + + /// Download the bodies from `starting_block` (inclusive) up until `target_block` (inclusive). + /// + /// The returned stream will always emit bodies in the order they were requested, but multiple + /// requests may be in flight at the same time. + /// + /// The stream may exit early in some cases. Thus, a downloader can only at a minimum guarantee: + /// + /// - All emitted bodies map onto a request + /// - The emitted bodies are emitted in order: i.e. the body for the first block is emitted + /// first, even if it was not fetched first. + /// + /// It is *not* guaranteed that all the requested bodies are fetched: the downloader may close + /// the stream before the entire range has been fetched for any reason + fn bodies_stream<'a, 'b, I>(&'a self, headers: I) -> BodiesStream<'a> + where + I: IntoIterator, + ::IntoIter: Send + 'b, + 'b: 'a; +} + +/// A stream of block bodies. +pub type BodiesStream<'a> = + Pin> + Send + 'a>>; diff --git a/crates/interfaces/src/p2p/bodies/error.rs b/crates/interfaces/src/p2p/bodies/error.rs new file mode 100644 index 0000000000..b6b11682ff --- /dev/null +++ b/crates/interfaces/src/p2p/bodies/error.rs @@ -0,0 +1,51 @@ +use crate::p2p::error::RequestError; +use reth_primitives::H256; +use thiserror::Error; + +/// Body client errors. +#[derive(Error, Debug, Clone)] +pub enum BodiesClientError { + /// Timed out while waiting for a response. + #[error("Timed out while getting bodies for block {header_hash}.")] + Timeout { + /// The header hash of the block that timed out. + header_hash: H256, + }, + /// The client encountered an internal error. + #[error(transparent)] + Internal(#[from] RequestError), +} + +/// Body downloader errors. +#[derive(Error, Debug, Clone)] +pub enum DownloadError { + /// Timed out while waiting for a response. + #[error("Timed out while getting bodies for block {header_hash}.")] + Timeout { + /// The header hash of the block that timed out. + header_hash: H256, + }, + /// The [BodiesClient] used by the downloader experienced an error. + #[error("The downloader client encountered an error.")] + Client { + /// The underlying client error. + #[source] + source: BodiesClientError, + }, +} + +impl From for DownloadError { + fn from(error: BodiesClientError) -> Self { + match error { + BodiesClientError::Timeout { header_hash } => DownloadError::Timeout { header_hash }, + _ => DownloadError::Client { source: error }, + } + } +} + +impl DownloadError { + /// Indicates whether this error is retryable or fatal. + pub fn is_retryable(&self) -> bool { + matches!(self, DownloadError::Timeout { .. }) + } +} diff --git a/crates/interfaces/src/p2p/bodies/mod.rs b/crates/interfaces/src/p2p/bodies/mod.rs new file mode 100644 index 0000000000..bc0c5df092 --- /dev/null +++ b/crates/interfaces/src/p2p/bodies/mod.rs @@ -0,0 +1,8 @@ +/// Traits and types for block body clients. +pub mod client; + +/// Block body downloaders. +pub mod downloader; + +/// Error types. +pub mod error; diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 56e7bd3401..856e5f65e4 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -4,7 +4,7 @@ use tokio::sync::{mpsc, oneshot}; pub type RequestResult = Result; /// Error variants that can happen when sending requests to a session. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error, Clone)] #[allow(missing_docs)] pub enum RequestError { #[error("Closed channel to the peer.")] diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index f5b4c1f7e5..2657b37be7 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -1,60 +1,20 @@ use super::client::{HeadersClient, HeadersRequest, HeadersStream}; use crate::consensus::Consensus; +use crate::p2p::headers::error::DownloadError; use async_trait::async_trait; -use reth_primitives::{ - rpc::{BlockId, BlockNumber}, - Header, SealedHeader, H256, -}; +use reth_primitives::{rpc::BlockId, Header, SealedHeader}; use reth_rpc_types::engine::ForkchoiceState; -use std::{fmt::Debug, time::Duration}; -use thiserror::Error; +use std::time::Duration; use tokio_stream::StreamExt; -/// The downloader error type -#[derive(Error, Debug, Clone)] -pub enum DownloadError { - /// Header validation failed - #[error("Failed to validate header {hash}. Details: {details}.")] - HeaderValidation { - /// Hash of header failing validation - hash: H256, - /// The details of validation failure - details: String, - }, - /// Timed out while waiting for request id response. - #[error("Timed out while getting headers for request {request_id}.")] - Timeout { - /// The request id that timed out - request_id: u64, - }, - /// Error when checking that the current [`Header`] has the parent's hash as the parent_hash - /// field, and that they have sequential block numbers. - #[error("Headers did not match, current number: {header_number} / current hash: {header_hash}, parent number: {parent_number} / parent_hash: {parent_hash}")] - MismatchedHeaders { - /// The header number being evaluated - header_number: BlockNumber, - /// The header hash being evaluated - header_hash: H256, - /// The parent number being evaluated - parent_number: BlockNumber, - /// The parent hash being evaluated - parent_hash: H256, - }, -} - -impl DownloadError { - /// Returns bool indicating whether this error is retryable or fatal, in the cases - /// where the peer responds with no headers, or times out. - pub fn is_retryable(&self) -> bool { - matches!(self, DownloadError::Timeout { .. }) - } -} - -/// The header downloading strategy +/// A downloader capable of fetching block headers. +/// +/// A downloader represents a distinct strategy for submitting requests to download block headers, +/// while a [HeadersClient] represents a client capable of fulfilling these requests. #[async_trait] #[auto_impl::auto_impl(&, Arc, Box)] -pub trait Downloader: Sync + Send { +pub trait HeaderDownloader: Sync + Send { /// The Consensus used to verify block validity when /// downloading type Consensus: Consensus; @@ -118,9 +78,9 @@ pub trait Downloader: Sync + Send { }) } - self.consensus().validate_header(header, parent).map_err(|e| { - DownloadError::HeaderValidation { hash: parent.hash(), details: e.to_string() } - })?; + self.consensus() + .validate_header(header, parent) + .map_err(|error| DownloadError::HeaderValidation { hash: parent.hash(), error })?; Ok(()) } } diff --git a/crates/interfaces/src/p2p/headers/error.rs b/crates/interfaces/src/p2p/headers/error.rs new file mode 100644 index 0000000000..035727f1a1 --- /dev/null +++ b/crates/interfaces/src/p2p/headers/error.rs @@ -0,0 +1,44 @@ +use crate::consensus; +use reth_primitives::{rpc::BlockNumber, H256}; +use thiserror::Error; + +/// The downloader error type +#[derive(Error, Debug, Clone)] +pub enum DownloadError { + /// Header validation failed + #[error("Failed to validate header {hash}. Details: {error}.")] + HeaderValidation { + /// Hash of header failing validation + hash: H256, + /// The details of validation failure + #[source] + error: consensus::Error, + }, + /// Timed out while waiting for request id response. + #[error("Timed out while getting headers for request {request_id}.")] + Timeout { + /// The request id that timed out + request_id: u64, + }, + /// Error when checking that the current [`Header`] has the parent's hash as the parent_hash + /// field, and that they have sequential block numbers. + #[error("Headers did not match, current number: {header_number} / current hash: {header_hash}, parent number: {parent_number} / parent_hash: {parent_hash}")] + MismatchedHeaders { + /// The header number being evaluated + header_number: BlockNumber, + /// The header hash being evaluated + header_hash: H256, + /// The parent number being evaluated + parent_number: BlockNumber, + /// The parent hash being evaluated + parent_hash: H256, + }, +} + +impl DownloadError { + /// Returns bool indicating whether this error is retryable or fatal, in the cases + /// where the peer responds with no headers, or times out. + pub fn is_retryable(&self) -> bool { + matches!(self, DownloadError::Timeout { .. }) + } +} diff --git a/crates/interfaces/src/p2p/headers/mod.rs b/crates/interfaces/src/p2p/headers/mod.rs index 915b28ff08..d85e6d42ad 100644 --- a/crates/interfaces/src/p2p/headers/mod.rs +++ b/crates/interfaces/src/p2p/headers/mod.rs @@ -9,3 +9,6 @@ pub mod client; /// [`Consensus`]: crate::consensus::Consensus /// [`HeadersClient`]: client::HeadersClient pub mod downloader; + +/// Error types. +pub mod error; diff --git a/crates/interfaces/src/p2p/mod.rs b/crates/interfaces/src/p2p/mod.rs index fb351fdc26..ecdad59702 100644 --- a/crates/interfaces/src/p2p/mod.rs +++ b/crates/interfaces/src/p2p/mod.rs @@ -1,3 +1,6 @@ +/// Traits for implementing P2P block body clients. +pub mod bodies; + /// Traits for implementing P2P Header Clients. Also includes implementations /// of a Linear and a Parallel downloader generic over the [`Consensus`] and /// [`HeadersClient`]. diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/interfaces/src/test_utils/bodies.rs new file mode 100644 index 0000000000..802b216739 --- /dev/null +++ b/crates/interfaces/src/test_utils/bodies.rs @@ -0,0 +1,33 @@ +use crate::p2p::bodies::{client::BodiesClient, error::BodiesClientError}; +use async_trait::async_trait; +use reth_eth_wire::BlockBody; +use reth_primitives::H256; +use std::fmt::{Debug, Formatter}; + +/// A test client for fetching bodies +pub struct TestBodiesClient +where + F: Fn(H256) -> Result, +{ + /// The function that is called on each body request. + pub responder: F, +} + +impl Debug for TestBodiesClient +where + F: Fn(H256) -> Result, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TestBodiesClient").finish() + } +} + +#[async_trait] +impl BodiesClient for TestBodiesClient +where + F: Fn(H256) -> Result + Send + Sync, +{ + async fn get_block_body(&self, hash: H256) -> Result { + (self.responder)(hash) + } +} diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs new file mode 100644 index 0000000000..c8c8489f82 --- /dev/null +++ b/crates/interfaces/src/test_utils/generators.rs @@ -0,0 +1,142 @@ +use rand::{thread_rng, Rng}; +use reth_primitives::{ + proofs, Address, BlockLocked, Bytes, Header, SealedHeader, Signature, Transaction, + TransactionKind, TransactionSigned, H256, U256, +}; + +// TODO(onbjerg): Maybe we should split this off to its own crate, or move the helpers to the +// relevant crates? + +/// Generates a range of random [SealedHeader]s. +/// +/// The parent hash of the first header +/// in the result will be equal to `head`. +/// +/// The headers are assumed to not be correct if validated. +pub fn random_header_range(rng: std::ops::Range, head: H256) -> Vec { + let mut headers = Vec::with_capacity(rng.end.saturating_sub(rng.start) as usize); + for idx in rng { + headers.push(random_header( + idx, + Some(headers.last().map(|h: &SealedHeader| h.hash()).unwrap_or(head)), + )); + } + headers +} + +/// Generate a random [SealedHeader]. +/// +/// The header is assumed to not be correct if validated. +pub fn random_header(number: u64, parent: Option) -> SealedHeader { + let header = reth_primitives::Header { + number, + nonce: rand::random(), + difficulty: U256::from(rand::random::()), + parent_hash: parent.unwrap_or_default(), + ..Default::default() + }; + header.seal() +} + +/// Generates a random legacy [Transaction]. +/// +/// Every field is random, except: +/// +/// - The chain ID, which is always 1 +/// - The input, which is always nothing +pub fn random_tx() -> Transaction { + Transaction::Legacy { + chain_id: Some(1), + nonce: rand::random::().into(), + gas_price: rand::random::().into(), + gas_limit: rand::random::().into(), + to: TransactionKind::Call(Address::random()), + value: rand::random::().into(), + input: Bytes::default(), + } +} + +/// Generates a random legacy [Transaction] that is signed. +/// +/// On top of the considerations of [gen_random_tx], these apply as well: +/// +/// - There is no guarantee that the nonce is not used twice for the same account +pub fn random_signed_tx() -> TransactionSigned { + let tx = random_tx(); + let hash = tx.signature_hash(); + TransactionSigned { + transaction: tx, + hash, + signature: Signature { + // TODO + r: Default::default(), + s: Default::default(), + odd_y_parity: false, + }, + } +} + +/// Generate a random block filled with a random number of signed transactions (generated using +/// [random_signed_tx]). +/// +/// All fields use the default values (and are assumed to be invalid) except for: +/// +/// - `parent_hash` +/// - `transactions_root` +/// - `ommers_hash` +/// +/// Additionally, `gas_used` and `gas_limit` always exactly match the total `gas_limit` of all +/// transactions in the block. +/// +/// The ommer headers are not assumed to be valid. +pub fn random_block(number: u64, parent: Option) -> BlockLocked { + let mut rng = thread_rng(); + + // Generate transactions + let transactions: Vec = + (0..rand::random::()).into_iter().map(|_| random_signed_tx()).collect(); + let total_gas = transactions.iter().fold(0, |sum, tx| sum + tx.transaction.gas_limit()); + + // Generate ommers + let mut ommers = Vec::new(); + for _ in 0..rng.gen_range(0..2) { + ommers.push(random_header(number, parent).unseal()); + } + + // Calculate roots + let transactions_root = proofs::calculate_transaction_root(transactions.iter()); + let ommers_hash = proofs::calculate_ommers_root(ommers.iter()); + + BlockLocked { + header: Header { + parent_hash: parent.unwrap_or_default(), + number, + gas_used: total_gas, + gas_limit: total_gas, + transactions_root, + ommers_hash, + ..Default::default() + } + .seal(), + body: transactions, + ommers: ommers.into_iter().map(|ommer| ommer.seal()).collect(), + ..Default::default() + } +} + +/// Generate a range of random blocks. +/// +/// The parent hash of the first block +/// in the result will be equal to `head`. +/// +/// See [random_block] for considerations when validating the generated blocks. +pub fn random_block_range(rng: std::ops::Range, head: H256) -> Vec { + let mut blocks = Vec::with_capacity(rng.end.saturating_sub(rng.start) as usize); + for idx in rng { + blocks.push(random_block( + idx, + Some(blocks.last().map(|block: &BlockLocked| block.header.hash()).unwrap_or(head)), + )); + } + blocks +} diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index ddd9c0d138..e0f73fbe54 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -1,24 +1,25 @@ //! Testing support for headers related interfaces. use crate::{ - consensus::{self, Consensus}, + consensus::{self, Consensus, Error}, p2p::headers::{ client::{HeadersClient, HeadersRequest, HeadersResponse, HeadersStream}, - downloader::{DownloadError, Downloader}, + downloader::HeaderDownloader, + error::DownloadError, }, }; -use reth_primitives::{Header, SealedHeader, H256, H512, U256}; +use reth_primitives::{BlockLocked, Header, SealedHeader, H256, H512}; use reth_rpc_types::engine::ForkchoiceState; use std::{collections::HashSet, sync::Arc, time::Duration}; use tokio::sync::{broadcast, mpsc, watch}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; -#[derive(Debug)] /// A test downloader which just returns the values that have been pushed to it. -pub struct TestDownloader { +#[derive(Debug)] +pub struct TestHeaderDownloader { result: Result, DownloadError>, } -impl TestDownloader { +impl TestHeaderDownloader { /// Instantiates the downloader with the mock responses pub fn new(result: Result, DownloadError>) -> Self { Self { result } @@ -26,7 +27,7 @@ impl TestDownloader { } #[async_trait::async_trait] -impl Downloader for TestDownloader { +impl HeaderDownloader for TestHeaderDownloader { type Consensus = TestConsensus; type Client = TestHeadersClient; @@ -51,8 +52,8 @@ impl Downloader for TestDownloader { } } -#[derive(Debug)] /// A test client for fetching headers +#[derive(Debug)] pub struct TestHeadersClient { req_tx: mpsc::Sender<(u64, HeadersRequest)>, req_rx: Arc>>, @@ -109,7 +110,7 @@ impl HeadersClient for TestHeadersClient { } } -/// Consensus client impl for testing +/// Consensus engine implementation for testing #[derive(Debug)] pub struct TestConsensus { /// Watcher over the forkchoice state @@ -132,14 +133,14 @@ impl Default for TestConsensus { } impl TestConsensus { - /// Update the forkchoice state + /// Update the fork choice state pub fn update_tip(&self, tip: H256) { let state = ForkchoiceState { head_block_hash: tip, finalized_block_hash: H256::zero(), safe_block_hash: H256::zero(), }; - self.channel.0.send(state).expect("updating forkchoice state failed"); + self.channel.0.send(state).expect("updating fork choice state failed"); } /// Update the validation flag @@ -165,29 +166,12 @@ impl Consensus for TestConsensus { Ok(()) } } -} -/// Generate a range of random header. The parent hash of the first header -/// in the result will be equal to head -pub fn gen_random_header_range(rng: std::ops::Range, head: H256) -> Vec { - let mut headers = Vec::with_capacity(rng.end.saturating_sub(rng.start) as usize); - for idx in rng { - headers.push(gen_random_header( - idx, - Some(headers.last().map(|h: &SealedHeader| h.hash()).unwrap_or(head)), - )); + fn pre_validate_block(&self, _block: &BlockLocked) -> Result<(), Error> { + if self.fail_validation { + Err(consensus::Error::BaseFeeMissing) + } else { + Ok(()) + } } - headers -} - -/// Generate a random header -pub fn gen_random_header(number: u64, parent: Option) -> SealedHeader { - let header = reth_primitives::Header { - number, - nonce: rand::random(), - difficulty: U256::from(rand::random::()), - parent_hash: parent.unwrap_or_default(), - ..Default::default() - }; - header.seal() } diff --git a/crates/interfaces/src/test_utils/mod.rs b/crates/interfaces/src/test_utils/mod.rs index 7f034b6597..415fccb745 100644 --- a/crates/interfaces/src/test_utils/mod.rs +++ b/crates/interfaces/src/test_utils/mod.rs @@ -1,5 +1,10 @@ mod api; +mod bodies; mod headers; +/// Generators for different data structures like block headers, block bodies and ranges of those. +pub mod generators; + pub use api::TestApi; +pub use bodies::*; pub use headers::*; diff --git a/crates/net/bodies-downloaders/Cargo.toml b/crates/net/bodies-downloaders/Cargo.toml new file mode 100644 index 0000000000..c54b263f90 --- /dev/null +++ b/crates/net/bodies-downloaders/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "reth-bodies-downloaders" +version = "0.1.0" +edition = "2021" +license = "MIT OR Apache-2.0" +repository = "https://github.com/foundry-rs/reth" +readme = "README.md" +description = "Implementations of various block body downloaders" + +[dependencies] +futures-util = "0.3.25" +reth-interfaces = { path = "../../interfaces" } +reth-primitives = { path = "../../primitives" } +reth-eth-wire = { path= "../eth-wire" } +[dev-dependencies] +assert_matches = "1.5.0" +once_cell = "1.15.0" +rand = "0.8.5" +reth-interfaces = { path = "../../interfaces", features = ["test-utils"] } +tokio = { version = "1.21.2", features = ["full"] } +serial_test = "0.9.0" diff --git a/crates/net/bodies-downloaders/src/concurrent.rs b/crates/net/bodies-downloaders/src/concurrent.rs new file mode 100644 index 0000000000..3fa66297c5 --- /dev/null +++ b/crates/net/bodies-downloaders/src/concurrent.rs @@ -0,0 +1,132 @@ +use futures_util::{stream, StreamExt, TryFutureExt}; +use reth_interfaces::p2p::bodies::{ + client::BodiesClient, + downloader::{BodiesStream, BodyDownloader}, + error::{BodiesClientError, DownloadError}, +}; +use reth_primitives::{BlockNumber, H256}; +use std::{sync::Arc, time::Duration}; + +/// Downloads bodies in batches. +/// +/// All blocks in a batch are fetched at the same time. +#[derive(Debug)] +pub struct ConcurrentDownloader { + /// The bodies client + client: Arc, + /// The batch size per one request + pub batch_size: usize, + /// A single request timeout + pub request_timeout: Duration, + /// The number of retries for downloading + pub request_retries: usize, +} + +impl BodyDownloader for ConcurrentDownloader { + type Client = C; + + /// The request timeout duration + fn timeout(&self) -> Duration { + self.request_timeout + } + + /// The block bodies client + fn client(&self) -> &Self::Client { + &self.client + } + + fn bodies_stream<'a, 'b, I>(&'a self, headers: I) -> BodiesStream<'a> + where + I: IntoIterator, + ::IntoIter: Send + 'b, + 'b: 'a, + { + // TODO: Retry + Box::pin( + stream::iter(headers.into_iter().map(|(block_number, header_hash)| { + { + self.client + .get_block_body(*header_hash) + .map_ok(move |body| (*block_number, *header_hash, body)) + .map_err(|err| match err { + BodiesClientError::Timeout { header_hash } => { + DownloadError::Timeout { header_hash } + } + err => DownloadError::Client { source: err }, + }) + } + })) + .buffered(self.batch_size), + ) + } +} + +/// A [ConcurrentDownloader] builder. +#[derive(Debug)] +pub struct ConcurrentDownloaderBuilder { + /// The batch size per one request + batch_size: usize, + /// A single request timeout + request_timeout: Duration, + /// The number of retries for downloading + request_retries: usize, +} + +impl Default for ConcurrentDownloaderBuilder { + fn default() -> Self { + Self { batch_size: 100, request_timeout: Duration::from_millis(100), request_retries: 5 } + } +} + +impl ConcurrentDownloaderBuilder { + /// Set the request batch size + pub fn batch_size(mut self, size: usize) -> Self { + self.batch_size = size; + self + } + + /// Set the request timeout + pub fn timeout(mut self, timeout: Duration) -> Self { + self.request_timeout = timeout; + self + } + + /// Set the number of retries per request + pub fn retries(mut self, retries: usize) -> Self { + self.request_retries = retries; + self + } + + /// Build [ConcurrentDownloader] with the provided client + pub fn build(self, client: Arc) -> ConcurrentDownloader { + ConcurrentDownloader { + client, + batch_size: self.batch_size, + request_timeout: self.request_timeout, + request_retries: self.request_retries, + } + } +} + +#[cfg(test)] +mod tests { + #[tokio::test] + #[ignore] + async fn emits_bodies_in_order() {} + + #[tokio::test] + #[ignore] + async fn header_iter_failure() {} + + #[tokio::test] + #[ignore] + async fn client_failure() {} + + #[tokio::test] + #[ignore] + async fn retries_requests() {} + + #[tokio::test] + #[ignore] + async fn timeout() {} +} diff --git a/crates/net/bodies-downloaders/src/lib.rs b/crates/net/bodies-downloaders/src/lib.rs new file mode 100644 index 0000000000..e9f0fb6c39 --- /dev/null +++ b/crates/net/bodies-downloaders/src/lib.rs @@ -0,0 +1,11 @@ +#![warn(missing_docs, unreachable_pub)] +#![deny(unused_must_use, rust_2018_idioms)] +#![doc(test( + no_crate_inject, + attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) +))] + +//! Implements body downloader algorithms. + +/// A naive concurrent downloader. +pub mod concurrent; diff --git a/crates/net/ecies/src/lib.rs b/crates/net/ecies/src/lib.rs index ab53207c48..2b6ccbf9ab 100644 --- a/crates/net/ecies/src/lib.rs +++ b/crates/net/ecies/src/lib.rs @@ -34,7 +34,7 @@ pub enum EgressECIESValue { #[derive(Clone, Debug, PartialEq, Eq)] /// Raw ingress values for an ECIES protocol pub enum IngressECIESValue { - /// Receiving a message from a [`peerId`] + /// Receiving a message from a [`PeerId`] AuthReceive(PeerId), /// Receiving an ACK message Ack, diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire/src/types/blocks.rs index 935b7f559a..7f90bf6c90 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire/src/types/blocks.rs @@ -108,6 +108,7 @@ impl From> for GetBlockBodies { } } +// TODO(onbjerg): We should have this type in primitives /// A response to [`GetBlockBodies`], containing bodies if any bodies were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)] pub struct BlockBody { diff --git a/crates/net/headers-downloaders/src/linear.rs b/crates/net/headers-downloaders/src/linear.rs index cebdcdd679..b52718d0ed 100644 --- a/crates/net/headers-downloaders/src/linear.rs +++ b/crates/net/headers-downloaders/src/linear.rs @@ -5,7 +5,8 @@ use reth_interfaces::{ consensus::Consensus, p2p::headers::{ client::{HeadersClient, HeadersStream}, - downloader::{DownloadError, Downloader}, + downloader::HeaderDownloader, + error::DownloadError, }, }; use reth_primitives::{rpc::BlockId, SealedHeader}; @@ -27,7 +28,7 @@ pub struct LinearDownloader { } #[async_trait] -impl Downloader for LinearDownloader { +impl HeaderDownloader for LinearDownloader { type Consensus = C; type Client = H; @@ -161,11 +162,6 @@ impl Default for LinearDownloadBuilder { } impl LinearDownloadBuilder { - /// Initialize a new builder - pub fn new() -> Self { - Self::default() - } - /// Set the request batch size pub fn batch_size(mut self, size: u64) -> Self { self.batch_size = size; @@ -207,7 +203,8 @@ mod tests { use reth_interfaces::{ p2p::headers::client::HeadersRequest, test_utils::{ - gen_random_header, gen_random_header_range, TestConsensus, TestHeadersClient, + generators::{random_header, random_header_range}, + TestConsensus, TestHeadersClient, }, }; use reth_primitives::{rpc::BlockId, SealedHeader}; @@ -233,7 +230,7 @@ mod tests { let retries = 5; let (tx, rx) = oneshot::channel(); tokio::spawn(async move { - let downloader = LinearDownloadBuilder::new() + let downloader = LinearDownloadBuilder::default() .retries(retries) .build(CONSENSUS.clone(), CLIENT.clone()); let result = @@ -257,7 +254,7 @@ mod tests { let retries = 5; let (tx, rx) = oneshot::channel(); tokio::spawn(async move { - let downloader = LinearDownloadBuilder::new() + let downloader = LinearDownloadBuilder::default() .retries(retries) .build(CONSENSUS.clone(), CLIENT.clone()); let result = @@ -286,14 +283,14 @@ mod tests { #[tokio::test] #[serial] async fn download_propagates_consensus_validation_error() { - let tip_parent = gen_random_header(1, None); - let tip = gen_random_header(2, Some(tip_parent.hash())); + let tip_parent = random_header(1, None); + let tip = random_header(2, Some(tip_parent.hash())); let tip_hash = tip.hash(); let (tx, rx) = oneshot::channel(); tokio::spawn(async move { let downloader = - LinearDownloadBuilder::new().build(CONSENSUS_FAIL.clone(), CLIENT.clone()); + LinearDownloadBuilder::default().build(CONSENSUS_FAIL.clone(), CLIENT.clone()); let forkchoice = ForkchoiceState { head_block_hash: tip_hash, ..Default::default() }; let result = downloader.download(&SealedHeader::default(), &forkchoice).await; tx.send(result).expect("failed to forward download response"); @@ -322,14 +319,15 @@ mod tests { #[tokio::test] #[serial] async fn download_starts_with_chain_tip() { - let head = gen_random_header(1, None); - let tip = gen_random_header(2, Some(head.hash())); + let head = random_header(1, None); + let tip = random_header(2, Some(head.hash())); let tip_hash = tip.hash(); let chain_head = head.clone(); let (tx, mut rx) = oneshot::channel(); tokio::spawn(async move { - let downloader = LinearDownloadBuilder::new().build(CONSENSUS.clone(), CLIENT.clone()); + let downloader = + LinearDownloadBuilder::default().build(CONSENSUS.clone(), CLIENT.clone()); let forkchoice = ForkchoiceState { head_block_hash: tip_hash, ..Default::default() }; let result = downloader.download(&chain_head, &forkchoice).await; tx.send(result).expect("failed to forward download response"); @@ -359,15 +357,16 @@ mod tests { #[serial] async fn download_returns_headers_desc() { let (start, end) = (100, 200); - let head = gen_random_header(start, None); - let mut headers = gen_random_header_range(start + 1..end, head.hash()); + let head = random_header(start, None); + let mut headers = random_header_range(start + 1..end, head.hash()); headers.reverse(); let tip_hash = headers.first().unwrap().hash(); let chain_head = head.clone(); let (tx, rx) = oneshot::channel(); tokio::spawn(async move { - let downloader = LinearDownloadBuilder::new().build(CONSENSUS.clone(), CLIENT.clone()); + let downloader = + LinearDownloadBuilder::default().build(CONSENSUS.clone(), CLIENT.clone()); let forkchoice = ForkchoiceState { head_block_hash: tip_hash, ..Default::default() }; let result = downloader.download(&chain_head, &forkchoice).await; tx.send(result).expect("failed to forward download response"); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 4418823a36..1406750d35 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -34,7 +34,11 @@ hex = "0.4" hex-literal = "0.3" derive_more = "0.99" - +# proof related +triehash = "0.8" +# See to replace hashers to simplify libraries +plain_hasher = "0.2" +hash-db = "0.15" [dev-dependencies] arbitrary = { version = "1.1.7", features = ["derive"]} diff --git a/crates/primitives/src/error.rs b/crates/primitives/src/error.rs index 2360eeb8c0..1918399165 100644 --- a/crates/primitives/src/error.rs +++ b/crates/primitives/src/error.rs @@ -4,8 +4,8 @@ use thiserror::Error; /// Primitives error type. #[derive(Debug, Error)] pub enum Error { - /// Input provided is invalid. - #[error("Input provided is invalid.")] + /// The provided input is invalid. + #[error("The provided input is invalid.")] InvalidInput, /// Failed to deserialize data into type. #[error("Failed to deserialize data into type.")] diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index 42da926041..4aa8c26a35 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -1,4 +1,7 @@ -use crate::{BlockHash, BlockNumber, Bloom, H160, H256, U256}; +use crate::{ + proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, + BlockHash, BlockNumber, Bloom, H160, H256, U256, +}; use bytes::{BufMut, BytesMut}; use ethers_core::{types::H64, utils::keccak256}; use reth_codecs::main_codec; @@ -7,7 +10,7 @@ use std::ops::Deref; /// Block header #[main_codec] -#[derive(Debug, Clone, PartialEq, Eq, Default, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Header { /// The Keccak 256-bit hash of the parent /// block’s header, in its entirety; formally Hp. @@ -64,6 +67,29 @@ pub struct Header { pub base_fee_per_gas: Option, } +impl Default for Header { + fn default() -> Self { + Header { + parent_hash: Default::default(), + ommers_hash: EMPTY_LIST_HASH, + beneficiary: Default::default(), + state_root: EMPTY_ROOT, + transactions_root: EMPTY_ROOT, + receipts_root: EMPTY_ROOT, + logs_bloom: Default::default(), + difficulty: Default::default(), + number: 0, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + mix_hash: Default::default(), + nonce: 0, + base_fee_per_gas: None, + } + } +} + impl Header { /// Heavy function that will calculate hash of data and will *not* save the change to metadata. /// Use [`Header::seal`], [`SealedHeader`] and unlock if you need hash to be persistent. @@ -239,6 +265,10 @@ mod tests { gas_used: 0x15b3_u64, timestamp: 0x1a0a_u64, extra_data: Bytes::from_str("7788").unwrap().0, + ommers_hash: H256::zero(), + state_root: H256::zero(), + transactions_root: H256::zero(), + receipts_root: H256::zero(), ..Default::default() }; let mut data = vec![]; @@ -285,6 +315,10 @@ mod tests { gas_used: 0x15b3u64, timestamp: 0x1a0au64, extra_data: Bytes::from_str("7788").unwrap().0, + ommers_hash: H256::zero(), + state_root: H256::zero(), + transactions_root: H256::zero(), + receipts_root: H256::zero(), ..Default::default() }; let header =
::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index d70f9253ad..4c172e4d3c 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -6,6 +6,8 @@ ))] //! Commonly used types in reth. +//! +//! This crate contains Ethereum primitive types and helper functions. mod account; mod block; @@ -23,6 +25,9 @@ mod receipt; mod storage; mod transaction; +/// Helper function for calculating Merkle proofs and hashes +pub mod proofs; + pub use account::Account; pub use block::{Block, BlockLocked}; pub use chain::Chain; @@ -41,25 +46,24 @@ pub use transaction::{ TransactionSignedEcRecovered, TxType, }; -/// Block hash. +/// A block hash. pub type BlockHash = H256; -/// Block Number is height of chain +/// A block number. pub type BlockNumber = u64; -/// Ethereum address +/// An Ethereum address. pub type Address = H160; +// TODO(onbjerg): Is this not the same as [BlockHash]? /// BlockId is Keccak hash of the header pub type BlockID = H256; -/// TxHash is Kecack hash of rlp encoded signed transaction +/// A transaction hash is a kecack hash of an RLP encoded signed transaction. pub type TxHash = H256; -/// TxNumber is sequence number of all existing transactions +/// The sequence number of all existing transactions. pub type TxNumber = u64; -/// Chain identifier type, introduced in EIP-155 +/// Chain identifier type (introduced in EIP-155). pub type ChainId = u64; - -/// Storage Key +/// An account storage key. pub type StorageKey = H256; - -/// Storage value +/// An account storage value. pub type StorageValue = U256; // TODO: should we use `PublicKey` for this? Even when dealing with public keys we should try to diff --git a/crates/consensus/src/proofs.rs b/crates/primitives/src/proofs.rs similarity index 72% rename from crates/consensus/src/proofs.rs rename to crates/primitives/src/proofs.rs index 964cc8159b..1c8fcb477f 100644 --- a/crates/consensus/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,26 +1,38 @@ +use crate::{keccak256, Bytes, Header, Log, Receipt, TransactionSigned, H256}; +use ethers_core::utils::rlp::RlpStream; use hash_db::Hasher; +use hex_literal::hex; use plain_hasher::PlainHasher; -use reth_primitives::{Bytes, Header, Log, Receipt, TransactionSigned, H256}; use reth_rlp::Encodable; -use rlp::RlpStream; -use sha3::{Digest, Keccak256}; use triehash::sec_trie_root; +/// Keccak-256 hash of the RLP of an empty list, KEC("\xc0"). +pub const EMPTY_LIST_HASH: H256 = + H256(hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")); + +/// Root hash of an empty trie. +pub const EMPTY_ROOT: H256 = + H256(hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")); + +/// A [Hasher] that calculates a keccak256 hash of the given data. #[derive(Default, Debug, Clone, PartialEq, Eq)] struct KeccakHasher; + impl Hasher for KeccakHasher { type Out = H256; type StdHasher = PlainHasher; + const LENGTH: usize = 32; + fn hash(x: &[u8]) -> Self::Out { - let out = Keccak256::digest(x); - // TODO make more performant, H256 from slice is not good enought. - H256::from_slice(out.as_slice()) + keccak256(x) } } -/// Calculate Transaction root. Iterate over transaction and create merkle trie of -/// (rlp(index),encoded(tx)) pairs. +/// Calculate a transaction root. +/// +/// Iterates over the given transactions and the merkle merkle trie root of +/// `(rlp(index), encoded(tx))` pairs. pub fn calculate_transaction_root<'a>( transactions: impl IntoIterator, ) -> H256 { @@ -40,7 +52,7 @@ pub fn calculate_transaction_root<'a>( ) } -/// Create receipt root for header +/// Calculates the receipt root for a header. pub fn calculate_receipt_root<'a>(receipts: impl IntoIterator) -> H256 { sec_trie_root::( receipts @@ -57,7 +69,7 @@ pub fn calculate_receipt_root<'a>(receipts: impl IntoIterator(logs: impl IntoIterator) -> H256 { //https://github.com/ethereum/go-ethereum/blob/356bbe343a30789e77bb38f25983c8f2f2bfbb47/cmd/evm/internal/t8ntool/execution.go#L255 let mut stream = RlpStream::new(); @@ -71,11 +83,10 @@ pub fn calculate_log_root<'a>(logs: impl IntoIterator) -> H256 { stream.finalize_unbounded_list(); let out = stream.out().freeze(); - let out = Keccak256::digest(out); - H256::from_slice(out.as_slice()) + keccak256(out) } -/// Calculate hash for ommer/uncle headers +/// Calculates the root hash for ommer/uncle headers. pub fn calculate_ommers_root<'a>(_ommers: impl IntoIterator) -> H256 { // RLP Encode let mut stream = RlpStream::new(); @@ -87,8 +98,7 @@ pub fn calculate_ommers_root<'a>(_ommers: impl IntoIterator) */ stream.finalize_unbounded_list(); let bytes = stream.out().freeze(); - let out = Keccak256::digest(bytes); - H256::from_slice(out.as_slice()) + keccak256(bytes) } // TODO state root diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 13c022a96d..c6953f14ca 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -13,8 +13,9 @@ use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header, EMPT pub use signature::Signature; pub use tx_type::TxType; -/// Raw Transaction. -/// Transaction type is introduced in EIP-2718: https://eips.ethereum.org/EIPS/eip-2718 +/// A raw transaction. +/// +/// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Transaction { @@ -49,7 +50,7 @@ pub enum Transaction { /// input data of the message call, formally Td. input: Bytes, }, - /// Transaction with AccessList. https://eips.ethereum.org/EIPS/eip-2930 + /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)). Eip2930 { /// Added as EIP-155: Simple replay attack protection chain_id: ChainId, @@ -86,7 +87,7 @@ pub enum Transaction { /// accessing outside the list. access_list: AccessList, }, - /// Transaction with priority fee. https://eips.ethereum.org/EIPS/eip-1559 + /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). Eip1559 { /// Added as EIP-155: Simple replay attack protection chain_id: u64, @@ -175,6 +176,15 @@ impl Transaction { } } + /// Get the gas limit of the transaction. + pub fn gas_limit(&self) -> u64 { + match self { + Transaction::Legacy { gas_limit, .. } | + Transaction::Eip2930 { gas_limit, .. } | + Transaction::Eip1559 { gas_limit, .. } => *gas_limit, + } + } + /// Max fee per gas for eip1559 transaction, for legacy transactions this is gas_limit pub fn max_fee_per_gas(&self) -> u64 { match self { diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 099d64e8b5..5dcb9688a8 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -18,10 +18,14 @@ tracing-futures = "0.2.5" tokio = { version = "1.21.2", features = ["sync"] } aquamarine = "0.1.12" metrics = "0.20.1" +futures-util = "0.3.25" [dev-dependencies] reth-db = { path = "../db", features = ["test-utils"] } reth-interfaces = { path = "../interfaces", features = ["test-utils"] } +reth-bodies-downloaders = { path = "../net/bodies-downloaders" } +# TODO(onbjerg): We only need this for [BlockBody] +reth-eth-wire = { path = "../net/eth-wire" } reth-headers-downloaders = { path = "../net/headers-downloaders" } tokio = { version = "*", features = ["rt", "sync", "macros"] } tokio-stream = "0.1.10" diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index 522457917d..9ab07435ce 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -1,5 +1,5 @@ use crate::pipeline::PipelineEvent; -use reth_interfaces::db::Error as DbError; +use reth_interfaces::{consensus, db::Error as DbError}; use reth_primitives::{BlockNumber, H256}; use thiserror::Error; use tokio::sync::mpsc::error::SendError; @@ -8,12 +8,13 @@ use tokio::sync::mpsc::error::SendError; #[derive(Error, Debug)] pub enum StageError { /// The stage encountered a state validation error. - /// - /// TODO: This depends on the consensus engine and should include the validation failure reason - #[error("Stage encountered a validation error in block {block}.")] + #[error("Stage encountered a validation error in block {block}: {error}.")] Validation { /// The block that failed validation. block: BlockNumber, + /// The underlying consensus error. + #[source] + error: consensus::Error, }, /// The stage encountered a database error. #[error("An internal database error occurred.")] @@ -30,34 +31,41 @@ pub enum StageError { /// The sender stage error #[derive(Error, Debug)] pub enum DatabaseIntegrityError { - /// Cannonical hash is missing from db - #[error("no cannonical hash for block #{number}")] - CannonicalHash { + // TODO(onbjerg): What's the difference between this and the one below? + /// The canonical hash for a block is missing from the database. + #[error("No canonical hash for block #{number}")] + CanonicalHash { /// The block number key number: BlockNumber, }, - /// Cannonical header is missing from db - #[error("no cannonical hash for block #{number}")] - CannonicalHeader { + /// The canonical header for a block is missing from the database. + #[error("No canonical hash for block #{number}")] + CanonicalHeader { /// The block number key number: BlockNumber, }, - /// Header is missing from db - #[error("no header for block #{number} ({hash})")] + /// A header is missing from the database. + #[error("No header for block #{number} ({hash})")] Header { /// The block number key number: BlockNumber, /// The block hash key hash: H256, }, - /// Cumulative transaction count is missing from db - #[error("no cumulative tx count for ${number} ({hash})")] + /// The cumulative transaction count is missing from the database. + #[error("No cumulative tx count for ${number} ({hash})")] CumulativeTxCount { /// The block number key number: BlockNumber, /// The block hash key hash: H256, }, + /// A block body is missing. + #[error("Block body not found for block #{number}")] + BlockBody { + /// The block number key + number: BlockNumber, + }, } /// A pipeline execution error. diff --git a/crates/stages/src/pipeline.rs b/crates/stages/src/pipeline.rs index 74914d73ac..f3581c2d2f 100644 --- a/crates/stages/src/pipeline.rs +++ b/crates/stages/src/pipeline.rs @@ -341,8 +341,8 @@ impl QueuedStage { Err(err) => { state.events_sender.send(PipelineEvent::Error { stage_id }).await?; - return if let StageError::Validation { block } = err { - debug!(stage = %stage_id, bad_block = %block, "Stage encountered a validation error."); + return if let StageError::Validation { block, error } = err { + debug!(stage = %stage_id, bad_block = %block, "Stage encountered a validation error: {error}"); // We unwind because of a validation error. If the unwind itself fails, // we bail entirely, otherwise we restart the execution loop from the @@ -362,13 +362,13 @@ impl QueuedStage { #[cfg(test)] mod tests { - use super::*; use crate::{StageId, UnwindOutput}; use reth_db::{ kv::{test_utils, Env, EnvKind}, mdbx::{self, WriteMap}, }; + use reth_interfaces::consensus; use tokio::sync::mpsc::channel; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; use utils::TestStage; @@ -520,7 +520,10 @@ mod tests { ) .push( TestStage::new(StageId("B")) - .add_exec(Err(StageError::Validation { block: 5 })) + .add_exec(Err(StageError::Validation { + block: 5, + error: consensus::Error::BaseFeeMissing, + })) .add_unwind(Ok(UnwindOutput { stage_progress: 0 })) .add_exec(Ok(ExecOutput { stage_progress: 10, diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs new file mode 100644 index 0000000000..d7916e5871 --- /dev/null +++ b/crates/stages/src/stages/bodies.rs @@ -0,0 +1,842 @@ +use crate::{ + DatabaseIntegrityError, ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, + UnwindOutput, +}; +use futures_util::TryStreamExt; +use reth_interfaces::{ + consensus::Consensus, + db::{ + models::StoredBlockBody, tables, DBContainer, Database, DatabaseGAT, DbCursorRO, + DbCursorRW, DbTx, DbTxMut, + }, + p2p::bodies::downloader::BodyDownloader, +}; +use reth_primitives::{ + proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, + BlockLocked, BlockNumber, SealedHeader, H256, +}; +use std::fmt::Debug; +use tracing::warn; + +const BODIES: StageId = StageId("Bodies"); + +// TODO(onbjerg): Metrics and events (gradual status for e.g. CLI) +/// The body stage downloads block bodies. +/// +/// The body stage downloads block bodies for all block headers stored locally in the database. +/// +/// # Empty blocks +/// +/// Blocks with an ommers hash corresponding to no ommers *and* a transaction root corresponding to +/// no transactions will not have a block body downloaded for them, since it would be meaningless to +/// do so. +/// +/// This also means that if there is no body for the block in the database (assuming the +/// block number <= the synced block of this stage), then the block can be considered empty. +/// +/// # Tables +/// +/// The bodies are processed and data is inserted into these tables: +/// +/// - [`BlockBodies`][reth_interfaces::db::tables::BlockBodies] +/// - [`Transactions`][reth_interfaces::db::tables::Transactions] +/// +/// # Genesis +/// +/// This stage expects that the genesis has been inserted into the appropriate tables: +/// +/// - The header tables (see [HeadersStage][crate::stages::headers::HeadersStage]) +/// - The various indexes (e.g. [TotalTxIndex][crate::stages::tx_index::TxIndex]) +/// - The [`BlockBodies`][reth_interfaces::db::tables::BlockBodies] table +#[derive(Debug)] +pub struct BodyStage { + /// The body downloader. + pub downloader: D, + /// The consensus engine. + pub consensus: C, + /// The maximum amount of block bodies to process in one stage execution. + /// + /// Smaller batch sizes result in less memory usage, but more disk I/O. Larger batch sizes + /// result in more memory usage, less disk I/O, and more infrequent checkpoints. + pub batch_size: u64, +} + +#[async_trait::async_trait] +impl Stage for BodyStage { + /// Return the id of the stage + fn id(&self) -> StageId { + BODIES + } + + /// Download block bodies from the last checkpoint for this stage up until the latest synced + /// header, limited by the stage's batch size. + async fn execute( + &mut self, + db: &mut DBContainer<'_, DB>, + input: ExecInput, + ) -> Result { + let tx = db.get_mut(); + + let previous_stage_progress = + input.previous_stage.as_ref().map(|(_, block)| *block).unwrap_or_default(); + if previous_stage_progress == 0 { + warn!("The body stage seems to be running first, no work can be completed."); + } + + // The block we ended at last sync, and the one we are starting on now + let previous_block = input.stage_progress.unwrap_or_default(); + let starting_block = previous_block + 1; + + // Short circuit in case we already reached the target block + let target = previous_stage_progress.min(starting_block + self.batch_size); + if target <= previous_block { + return Ok(ExecOutput { stage_progress: target, reached_tip: true, done: true }) + } + + let bodies_to_download = self.bodies_to_download::(tx, starting_block, target)?; + + // Cursors used to write bodies and transactions + let mut bodies_cursor = tx.cursor_mut::()?; + let mut tx_cursor = tx.cursor_mut::()?; + let mut base_tx_id = bodies_cursor + .last()? + .map(|(_, body)| body.base_tx_id + body.tx_amount) + .ok_or(DatabaseIntegrityError::BlockBody { number: starting_block })?; + + // Cursor used to look up headers for block pre-validation + let mut header_cursor = tx.cursor::()?; + + // NOTE(onbjerg): The stream needs to live here otherwise it will just create a new iterator + // on every iteration of the while loop -_- + let mut bodies_stream = self.downloader.bodies_stream(bodies_to_download.iter()); + let mut highest_block = previous_block; + while let Some((block_number, header_hash, body)) = + bodies_stream.try_next().await.map_err(|err| StageError::Internal(err.into()))? + { + // Fetch the block header for pre-validation + let block = BlockLocked { + header: SealedHeader::new( + header_cursor + .seek_exact((block_number, header_hash).into())? + .ok_or(DatabaseIntegrityError::Header { + number: block_number, + hash: header_hash, + })? + .1, + header_hash, + ), + body: body.transactions, + // TODO: We should have a type w/o receipts probably, no reason to allocate here + receipts: vec![], + ommers: body.ommers.into_iter().map(|header| header.seal()).collect(), + }; + + // Pre-validate the block and unwind if it is invalid + self.consensus + .pre_validate_block(&block) + .map_err(|err| StageError::Validation { block: block_number, error: err })?; + + // Write block + bodies_cursor.append( + (block_number, header_hash).into(), + StoredBlockBody { + base_tx_id, + tx_amount: block.body.len() as u64, + ommers: block.ommers.into_iter().map(|header| header.unseal()).collect(), + }, + )?; + + // Write transactions + for transaction in block.body { + tx_cursor.append(base_tx_id, transaction)?; + base_tx_id += 1; + } + + highest_block = block_number; + } + + // The stage is "done" if: + // - We got fewer blocks than our target + // - We reached our target and the target was not limited by the batch size of the stage + let capped = target < previous_stage_progress; + let done = highest_block < target || !capped; + + Ok(ExecOutput { stage_progress: highest_block, reached_tip: true, done }) + } + + /// Unwind the stage. + async fn unwind( + &mut self, + db: &mut DBContainer<'_, DB>, + input: UnwindInput, + ) -> Result> { + let tx = db.get_mut(); + let mut block_body_cursor = tx.cursor_mut::()?; + let mut transaction_cursor = tx.cursor_mut::()?; + + let mut entry = block_body_cursor.last()?; + while let Some((key, body)) = entry { + if key.number() <= input.unwind_to { + break + } + + for num in 0..body.tx_amount { + let tx_id = body.base_tx_id + num; + if transaction_cursor.seek_exact(tx_id)?.is_some() { + transaction_cursor.delete_current()?; + } + } + + block_body_cursor.delete_current()?; + entry = block_body_cursor.prev()?; + } + + Ok(UnwindOutput { stage_progress: input.unwind_to }) + } +} + +impl BodyStage { + /// Computes a list of `(block_number, header_hash)` for blocks that we need to download bodies + /// for. + /// + /// This skips empty blocks (i.e. no ommers, no transactions). + fn bodies_to_download( + &self, + tx: &mut >::TXMut, + starting_block: BlockNumber, + target: BlockNumber, + ) -> Result, StageError> { + let mut header_cursor = tx.cursor::()?; + let mut header_hashes_cursor = tx.cursor::()?; + let mut walker = header_hashes_cursor + .walk(starting_block)? + .take_while(|item| item.as_ref().map_or(false, |(num, _)| *num <= target)); + + let mut bodies_to_download = Vec::new(); + while let Some(Ok((block_number, header_hash))) = walker.next() { + let header = header_cursor + .seek_exact((block_number, header_hash).into())? + .ok_or(DatabaseIntegrityError::Header { number: block_number, hash: header_hash })? + .1; + if header.ommers_hash == EMPTY_LIST_HASH && header.transactions_root == EMPTY_ROOT { + continue + } + + bodies_to_download.push((block_number, header_hash)); + } + + Ok(bodies_to_download) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::util::test_utils::StageTestRunner; + use assert_matches::assert_matches; + use reth_eth_wire::BlockBody; + use reth_interfaces::{ + consensus, + p2p::bodies::error::DownloadError, + test_utils::generators::{random_block, random_block_range}, + }; + use reth_primitives::{BlockNumber, H256}; + use std::collections::HashMap; + use test_utils::*; + + /// Check that the execution is short-circuited if the database is empty. + #[tokio::test] + async fn empty_db() { + let runner = BodyTestRunner::new(TestBodyDownloader::default); + let rx = runner.execute(ExecInput::default()); + assert_matches!( + rx.await.unwrap(), + Ok(ExecOutput { stage_progress: 0, reached_tip: true, done: true }) + ) + } + + /// Check that the execution is short-circuited if the target was already reached. + #[tokio::test] + async fn already_reached_target() { + let runner = BodyTestRunner::new(TestBodyDownloader::default); + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), 100)), + stage_progress: Some(100), + }); + assert_matches!( + rx.await.unwrap(), + Ok(ExecOutput { stage_progress: 100, reached_tip: true, done: true }) + ) + } + + /// Checks that the stage downloads at most `batch_size` blocks. + #[tokio::test] + async fn partial_body_download() { + // Generate blocks + let blocks = random_block_range(1..200, GENESIS_HASH); + let bodies: HashMap> = + blocks.iter().map(body_by_hash).collect(); + let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone())); + + // Set the batch size (max we sync per stage execution) to less than the number of blocks + // the previous stage synced (10 vs 20) + runner.set_batch_size(10); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner + .insert_headers(blocks.iter().map(|block| &block.header)) + .expect("Could not insert headers"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: None, + }); + + // Check that we only synced around `batch_size` blocks even though the number of blocks + // synced by the previous stage is higher + let output = rx.await.unwrap(); + assert_matches!( + output, + Ok(ExecOutput { stage_progress, reached_tip: true, done: false }) if stage_progress < 200 + ); + runner + .validate_db_blocks(output.unwrap().stage_progress) + .expect("Written block data invalid"); + } + + /// Same as [partial_body_download] except the `batch_size` is not hit. + #[tokio::test] + async fn full_body_download() { + // Generate blocks #1-20 + let blocks = random_block_range(1..21, GENESIS_HASH); + let bodies: HashMap> = + blocks.iter().map(body_by_hash).collect(); + let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone())); + + // Set the batch size to more than what the previous stage synced (40 vs 20) + runner.set_batch_size(40); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner + .insert_headers(blocks.iter().map(|block| &block.header)) + .expect("Could not insert headers"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: None, + }); + + // Check that we synced all blocks successfully, even though our `batch_size` allows us to + // sync more (if there were more headers) + let output = rx.await.unwrap(); + assert_matches!( + output, + Ok(ExecOutput { stage_progress: 20, reached_tip: true, done: true }) + ); + runner + .validate_db_blocks(output.unwrap().stage_progress) + .expect("Written block data invalid"); + } + + /// Same as [full_body_download] except we have made progress before + #[tokio::test] + async fn sync_from_previous_progress() { + // Generate blocks #1-20 + let blocks = random_block_range(1..21, GENESIS_HASH); + let bodies: HashMap> = + blocks.iter().map(body_by_hash).collect(); + let runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone())); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner + .insert_headers(blocks.iter().map(|block| &block.header)) + .expect("Could not insert headers"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: None, + }); + + // Check that we synced at least 10 blocks + let first_run = rx.await.unwrap(); + assert_matches!( + first_run, + Ok(ExecOutput { stage_progress, reached_tip: true, done: false }) if stage_progress >= 10 + ); + let first_run_progress = first_run.unwrap().stage_progress; + + // Execute again on top of the previous run + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: Some(first_run_progress), + }); + + // Check that we synced more blocks + let output = rx.await.unwrap(); + assert_matches!( + output, + Ok(ExecOutput { stage_progress, reached_tip: true, done: true }) if stage_progress > first_run_progress + ); + runner + .validate_db_blocks(output.unwrap().stage_progress) + .expect("Written block data invalid"); + } + + /// Checks that the stage asks to unwind if pre-validation of the block fails. + #[tokio::test] + async fn pre_validation_failure() { + // Generate blocks #1-19 + let blocks = random_block_range(1..20, GENESIS_HASH); + let bodies: HashMap> = + blocks.iter().map(body_by_hash).collect(); + let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone())); + + // Fail validation + runner.set_fail_validation(true); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner + .insert_headers(blocks.iter().map(|block| &block.header)) + .expect("Could not insert headers"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: None, + }); + + // Check that the error bubbles up + assert_matches!( + rx.await.unwrap(), + Err(StageError::Validation { block: 1, error: consensus::Error::BaseFeeMissing }) + ); + } + + /// Checks that the stage unwinds correctly with no data. + #[tokio::test] + async fn unwind_empty_db() { + let unwind_to = 10; + let runner = BodyTestRunner::new(TestBodyDownloader::default); + let rx = runner.unwind(UnwindInput { bad_block: None, stage_progress: 20, unwind_to }); + assert_matches!( + rx.await.unwrap(), + Ok(UnwindOutput { stage_progress }) if stage_progress == unwind_to + ) + } + + /// Checks that the stage unwinds correctly with data. + #[tokio::test] + async fn unwind() { + // Generate blocks #1-20 + let blocks = random_block_range(1..21, GENESIS_HASH); + let bodies: HashMap> = + blocks.iter().map(body_by_hash).collect(); + let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone())); + + // Set the batch size to more than what the previous stage synced (40 vs 20) + runner.set_batch_size(40); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner + .insert_headers(blocks.iter().map(|block| &block.header)) + .expect("Could not insert headers"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: None, + }); + + // Check that we synced all blocks successfully, even though our `batch_size` allows us to + // sync more (if there were more headers) + let output = rx.await.unwrap(); + assert_matches!( + output, + Ok(ExecOutput { stage_progress: 20, reached_tip: true, done: true }) + ); + let stage_progress = output.unwrap().stage_progress; + runner.validate_db_blocks(stage_progress).expect("Written block data invalid"); + + // Unwind all of it + let unwind_to = 1; + let rx = runner.unwind(UnwindInput { bad_block: None, stage_progress, unwind_to }); + assert_matches!( + rx.await.unwrap(), + Ok(UnwindOutput { stage_progress }) if stage_progress == 1 + ); + + let last_body = runner.last_body().expect("Could not read last body"); + let last_tx_id = last_body.base_tx_id + last_body.tx_amount; + runner + .db() + .check_no_entry_above::(unwind_to, |key| key.number()) + .expect("Did not unwind block bodies correctly."); + runner + .db() + .check_no_entry_above::(last_tx_id, |key| key) + .expect("Did not unwind transactions correctly.") + } + + /// Checks that the stage unwinds correctly, even if a transaction in a block is missing. + #[tokio::test] + async fn unwind_missing_tx() { + // Generate blocks #1-20 + let blocks = random_block_range(1..21, GENESIS_HASH); + let bodies: HashMap> = + blocks.iter().map(body_by_hash).collect(); + let mut runner = BodyTestRunner::new(|| TestBodyDownloader::new(bodies.clone())); + + // Set the batch size to more than what the previous stage synced (40 vs 20) + runner.set_batch_size(40); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner + .insert_headers(blocks.iter().map(|block| &block.header)) + .expect("Could not insert headers"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), blocks.len() as BlockNumber)), + stage_progress: None, + }); + + // Check that we synced all blocks successfully, even though our `batch_size` allows us to + // sync more (if there were more headers) + let output = rx.await.unwrap(); + assert_matches!( + output, + Ok(ExecOutput { stage_progress: 20, reached_tip: true, done: true }) + ); + let stage_progress = output.unwrap().stage_progress; + runner.validate_db_blocks(stage_progress).expect("Written block data invalid"); + + // Delete a transaction + { + let mut db = runner.db().container(); + let mut tx_cursor = db + .get_mut() + .cursor_mut::() + .expect("Could not get transaction cursor"); + tx_cursor + .last() + .expect("Could not read database") + .expect("Could not read last transaction"); + tx_cursor.delete_current().expect("Could not delete last transaction"); + db.commit().expect("Could not commit database"); + } + + // Unwind all of it + let unwind_to = 1; + let rx = runner.unwind(UnwindInput { bad_block: None, stage_progress, unwind_to }); + assert_matches!( + rx.await.unwrap(), + Ok(UnwindOutput { stage_progress }) if stage_progress == 1 + ); + + let last_body = runner.last_body().expect("Could not read last body"); + let last_tx_id = last_body.base_tx_id + last_body.tx_amount; + runner + .db() + .check_no_entry_above::(unwind_to, |key| key.number()) + .expect("Did not unwind block bodies correctly."); + runner + .db() + .check_no_entry_above::(last_tx_id, |key| key) + .expect("Did not unwind transactions correctly.") + } + + /// Checks that the stage exits if the downloader times out + /// TODO: We should probably just exit as "OK", commit the blocks we downloaded successfully and + /// try again? + #[tokio::test] + async fn downloader_timeout() { + // Generate a header + let header = random_block(1, Some(GENESIS_HASH)).header; + let runner = BodyTestRunner::new(|| { + TestBodyDownloader::new(HashMap::from([( + header.hash(), + Err(DownloadError::Timeout { header_hash: header.hash() }), + )])) + }); + + // Insert required state + runner.insert_genesis().expect("Could not insert genesis block"); + runner.insert_header(&header).expect("Could not insert header"); + + // Run the stage + let rx = runner.execute(ExecInput { + previous_stage: Some((StageId("Headers"), 1)), + stage_progress: None, + }); + + // Check that the error bubbles up + assert_matches!(rx.await.unwrap(), Err(StageError::Internal(_))); + } + + mod test_utils { + use crate::{ + stages::bodies::BodyStage, + util::test_utils::{StageTestDB, StageTestRunner}, + }; + use assert_matches::assert_matches; + use async_trait::async_trait; + use reth_eth_wire::BlockBody; + use reth_interfaces::{ + db, + db::{ + models::{BlockNumHash, StoredBlockBody}, + tables, DbCursorRO, DbTx, DbTxMut, + }, + p2p::bodies::{ + client::BodiesClient, + downloader::{BodiesStream, BodyDownloader}, + error::{BodiesClientError, DownloadError}, + }, + test_utils::TestConsensus, + }; + use reth_primitives::{ + BigEndianHash, BlockLocked, BlockNumber, Header, SealedHeader, H256, U256, + }; + use std::{collections::HashMap, ops::Deref, time::Duration}; + + /// The block hash of the genesis block. + pub(crate) const GENESIS_HASH: H256 = H256::zero(); + + /// A helper to create a collection of resulted-wrapped block bodies keyed by their hash. + pub(crate) fn body_by_hash( + block: &BlockLocked, + ) -> (H256, Result) { + ( + block.hash(), + Ok(BlockBody { + transactions: block.body.clone(), + ommers: block.ommers.iter().cloned().map(|ommer| ommer.unseal()).collect(), + }), + ) + } + + /// A helper struct for running the [BodyStage]. + pub(crate) struct BodyTestRunner + where + F: Fn() -> TestBodyDownloader, + { + downloader_builder: F, + db: StageTestDB, + batch_size: u64, + fail_validation: bool, + } + + impl BodyTestRunner + where + F: Fn() -> TestBodyDownloader, + { + /// Build a new test runner. + pub(crate) fn new(downloader_builder: F) -> Self { + BodyTestRunner { + downloader_builder, + db: StageTestDB::default(), + batch_size: 10, + fail_validation: false, + } + } + + pub(crate) fn set_batch_size(&mut self, batch_size: u64) { + self.batch_size = batch_size; + } + + pub(crate) fn set_fail_validation(&mut self, fail_validation: bool) { + self.fail_validation = fail_validation; + } + } + + impl StageTestRunner for BodyTestRunner + where + F: Fn() -> TestBodyDownloader, + { + type S = BodyStage; + + fn db(&self) -> &StageTestDB { + &self.db + } + + fn stage(&self) -> Self::S { + let mut consensus = TestConsensus::default(); + consensus.set_fail_validation(self.fail_validation); + + BodyStage { + downloader: (self.downloader_builder)(), + consensus, + batch_size: self.batch_size, + } + } + } + + impl BodyTestRunner + where + F: Fn() -> TestBodyDownloader, + { + /// Insert the genesis block into the appropriate tables + /// + /// The genesis block always has no transactions and no ommers, and it always has the + /// same hash. + pub(crate) fn insert_genesis(&self) -> Result<(), db::Error> { + self.insert_header(&SealedHeader::new(Header::default(), GENESIS_HASH))?; + let mut db = self.db.container(); + let tx = db.get_mut(); + tx.put::( + (0, GENESIS_HASH).into(), + StoredBlockBody { base_tx_id: 0, tx_amount: 0, ommers: vec![] }, + )?; + db.commit()?; + + Ok(()) + } + + /// Insert header into tables + pub(crate) fn insert_header(&self, header: &SealedHeader) -> Result<(), db::Error> { + self.insert_headers(std::iter::once(header)) + } + + /// Insert headers into tables + pub(crate) fn insert_headers<'a, I>(&self, headers: I) -> Result<(), db::Error> + where + I: Iterator, + { + let headers = headers.collect::>(); + self.db + .map_put::(&headers, |h| (h.hash(), h.number))?; + self.db.map_put::(&headers, |h| { + (BlockNumHash((h.number, h.hash())), h.deref().clone().unseal()) + })?; + self.db.map_put::(&headers, |h| { + (h.number, h.hash()) + })?; + + self.db.transform_append::(&headers, |prev, h| { + let prev_td = U256::from_big_endian(&prev.clone().unwrap_or_default()); + ( + BlockNumHash((h.number, h.hash())), + H256::from_uint(&(prev_td + h.difficulty)).as_bytes().to_vec(), + ) + })?; + + Ok(()) + } + + pub(crate) fn last_body(&self) -> Option { + Some( + self.db() + .container() + .get() + .cursor::() + .ok()? + .last() + .ok()?? + .1, + ) + } + + /// Validate that the inserted block data is valid + pub(crate) fn validate_db_blocks( + &self, + highest_block: BlockNumber, + ) -> Result<(), db::Error> { + let db = self.db.container(); + let tx = db.get(); + + let mut block_body_cursor = tx.cursor::()?; + let mut transaction_cursor = tx.cursor::()?; + + let mut entry = block_body_cursor.first()?; + let mut prev_max_tx_id = 0; + while let Some((key, body)) = entry { + assert!( + key.number() <= highest_block, + "We wrote a block body outside of our synced range. Found block with number {}, highest block according to stage is {}", + key.number(), highest_block + ); + + assert!(prev_max_tx_id == body.base_tx_id, "Transaction IDs are malformed."); + for num in 0..body.tx_amount { + let tx_id = body.base_tx_id + num; + assert_matches!( + transaction_cursor.seek_exact(tx_id), + Ok(Some(_)), + "A transaction is missing." + ); + } + prev_max_tx_id = body.base_tx_id + body.tx_amount; + entry = block_body_cursor.next()?; + } + + Ok(()) + } + } + + // TODO(onbjerg): Move + /// A [BodiesClient] that should not be called. + #[derive(Debug)] + pub(crate) struct NoopClient; + + #[async_trait] + impl BodiesClient for NoopClient { + async fn get_block_body(&self, _: H256) -> Result { + panic!("Noop client should not be called") + } + } + + // TODO(onbjerg): Move + /// A [BodyDownloader] that is backed by an internal [HashMap] for testing. + #[derive(Debug, Default)] + pub(crate) struct TestBodyDownloader { + responses: HashMap>, + } + + impl TestBodyDownloader { + pub(crate) fn new(responses: HashMap>) -> Self { + Self { responses } + } + } + + impl BodyDownloader for TestBodyDownloader { + type Client = NoopClient; + + fn timeout(&self) -> Duration { + unreachable!() + } + + fn client(&self) -> &Self::Client { + unreachable!() + } + + fn bodies_stream<'a, 'b, I>(&'a self, hashes: I) -> BodiesStream<'a> + where + I: IntoIterator, + ::IntoIter: Send + 'b, + 'b: 'a, + { + Box::pin(futures_util::stream::iter(hashes.into_iter().map( + |(block_number, hash)| { + Ok(( + *block_number, + *hash, + self.responses + .get(hash) + .expect("Stage tried downloading a block we do not have.") + .clone()?, + )) + }, + ))) + } + } + } +} diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index f24c81547c..a20fed002f 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -9,10 +9,7 @@ use reth_interfaces::{ models::blocks::BlockNumHash, tables, DBContainer, Database, DatabaseGAT, DbCursorRO, DbCursorRW, DbTx, DbTxMut, }, - p2p::headers::{ - client::HeadersClient, - downloader::{DownloadError, Downloader}, - }, + p2p::headers::{client::HeadersClient, downloader::HeaderDownloader, error::DownloadError}, }; use reth_primitives::{rpc::BigEndianHash, BlockNumber, SealedHeader, H256, U256}; use std::{fmt::Debug, sync::Arc}; @@ -20,9 +17,19 @@ use tracing::*; const HEADERS: StageId = StageId("Headers"); -/// The headers stage implementation for staged sync +/// The headers stage. +/// +/// The headers stage downloads all block headers from the highest block in the local database to +/// the perceived highest block on the network. +/// +/// The headers are processed and data is inserted into these tables: +/// +/// - [`HeaderNumbers`][reth_interfaces::db::tables::HeaderNumbers] +/// - [`Headers`][reth_interfaces::db::tables::Headers] +/// - [`CanonicalHeaders`][reth_interfaces::db::tables::CanonicalHeaders] +/// - [`HeaderTD`][reth_interfaces::db::tables::HeaderTD] #[derive(Debug)] -pub struct HeaderStage { +pub struct HeaderStage { /// Strategy for downloading the headers pub downloader: D, /// Consensus client implementation @@ -32,7 +39,7 @@ pub struct HeaderStage { } #[async_trait::async_trait] -impl Stage +impl Stage for HeaderStage { /// Return the id of the stage @@ -55,7 +62,7 @@ impl Stage // TODO: handle input.max_block let last_hash = tx .get::(last_block_num)? - .ok_or(DatabaseIntegrityError::CannonicalHash { number: last_block_num })?; + .ok_or(DatabaseIntegrityError::CanonicalHash { number: last_block_num })?; let last_header = tx.get::((last_block_num, last_hash).into())?.ok_or({ DatabaseIntegrityError::Header { number: last_block_num, hash: last_hash } @@ -81,14 +88,15 @@ impl Stage done: false, }) } - DownloadError::HeaderValidation { hash, details } => { - warn!("validation error for header {hash}: {details}"); - return Err(StageError::Validation { block: last_block_num }) + DownloadError::HeaderValidation { hash, error } => { + warn!("Validation error for header {hash}: {error}"); + return Err(StageError::Validation { block: last_block_num, error }) } // TODO: this error is never propagated, clean up - DownloadError::MismatchedHeaders { .. } => { - return Err(StageError::Validation { block: last_block_num }) - } + // DownloadError::MismatchedHeaders { .. } => { + // return Err(StageError::Validation { block: last_block_num }) + // } + _ => unreachable!(), }, }; let stage_progress = self.write_headers::(tx, headers).await?.unwrap_or(last_block_num); @@ -116,7 +124,7 @@ impl Stage } } -impl HeaderStage { +impl HeaderStage { async fn update_head( &self, tx: &mut >::TXMut, @@ -124,7 +132,7 @@ impl HeaderStage { ) -> Result<(), StageError> { let hash = tx .get::(height)? - .ok_or(DatabaseIntegrityError::CannonicalHeader { number: height })?; + .ok_or(DatabaseIntegrityError::CanonicalHeader { number: height })?; let td: Vec = tx.get::((height, hash).into())?.unwrap(); // TODO: self.client.update_status(height, hash, H256::from_slice(&td)).await; Ok(()) @@ -184,31 +192,36 @@ mod tests { use super::*; use crate::util::test_utils::StageTestRunner; use assert_matches::assert_matches; - use reth_interfaces::test_utils::{gen_random_header, gen_random_header_range}; - use test_utils::{HeadersTestRunner, TestDownloader}; + use reth_interfaces::{ + consensus, + test_utils::{ + generators::{random_header, random_header_range}, + TestHeaderDownloader, + }, + }; + use test_utils::HeadersTestRunner; const TEST_STAGE: StageId = StageId("Headers"); + /// Check that the execution errors on empty database or + /// prev progress missing from the database. #[tokio::test] - // Check that the execution errors on empty database or - // prev progress missing from the database. async fn execute_empty_db() { let runner = HeadersTestRunner::default(); let rx = runner.execute(ExecInput::default()); assert_matches!( rx.await.unwrap(), - Err(StageError::DatabaseIntegrity(DatabaseIntegrityError::CannonicalHeader { .. })) + Err(StageError::DatabaseIntegrity(DatabaseIntegrityError::CanonicalHeader { .. })) ); } + /// Check that the execution exits on downloader timeout. #[tokio::test] - // Check that the execution exits on downloader timeout. async fn execute_timeout() { - let head = gen_random_header(0, None); - let runner = - HeadersTestRunner::with_downloader(TestDownloader::new(Err(DownloadError::Timeout { - request_id: 0, - }))); + let head = random_header(0, None); + let runner = HeadersTestRunner::with_downloader(TestHeaderDownloader::new(Err( + DownloadError::Timeout { request_id: 0 }, + ))); runner.insert_header(&head).expect("failed to insert header"); let rx = runner.execute(ExecInput::default()); @@ -216,30 +229,33 @@ mod tests { assert_matches!(rx.await.unwrap(), Ok(ExecOutput { done, .. }) if !done); } + /// Check that validation error is propagated during the execution. #[tokio::test] - // Check that validation error is propagated during the execution. async fn execute_validation_error() { - let head = gen_random_header(0, None); - let runner = HeadersTestRunner::with_downloader(TestDownloader::new(Err( - DownloadError::HeaderValidation { hash: H256::zero(), details: "".to_owned() }, + let head = random_header(0, None); + let runner = HeadersTestRunner::with_downloader(TestHeaderDownloader::new(Err( + DownloadError::HeaderValidation { + hash: H256::zero(), + error: consensus::Error::BaseFeeMissing, + }, ))); runner.insert_header(&head).expect("failed to insert header"); let rx = runner.execute(ExecInput::default()); runner.consensus.update_tip(H256::from_low_u64_be(1)); - assert_matches!(rx.await.unwrap(), Err(StageError::Validation { block }) if block == 0); + assert_matches!(rx.await.unwrap(), Err(StageError::Validation { block, error: consensus::Error::BaseFeeMissing, }) if block == 0); } + /// Validate that all necessary tables are updated after the + /// header download on no previous progress. #[tokio::test] - // Validate that all necessary tables are updated after the - // header download on no previous progress. async fn execute_no_progress() { let (start, end) = (0, 100); - let head = gen_random_header(start, None); - let headers = gen_random_header_range(start + 1..end, head.hash()); + let head = random_header(start, None); + let headers = random_header_range(start + 1..end, head.hash()); let result = headers.iter().rev().cloned().collect::>(); - let runner = HeadersTestRunner::with_downloader(TestDownloader::new(Ok(result))); + let runner = HeadersTestRunner::with_downloader(TestHeaderDownloader::new(Ok(result))); runner.insert_header(&head).expect("failed to insert header"); let rx = runner.execute(ExecInput::default()); @@ -251,19 +267,19 @@ mod tests { Ok(ExecOutput { done, reached_tip, stage_progress }) if done && reached_tip && stage_progress == tip.number ); - assert!(headers.iter().try_for_each(|h| runner.validate_db_header(&h)).is_ok()); + assert!(headers.iter().try_for_each(|h| runner.validate_db_header(h)).is_ok()); } + /// Validate that all necessary tables are updated after the + /// header download with some previous progress. #[tokio::test] - // Validate that all necessary tables are updated after the - // header download with some previous progress. async fn execute_prev_progress() { let (start, end) = (10000, 10241); - let head = gen_random_header(start, None); - let headers = gen_random_header_range(start + 1..end, head.hash()); + let head = random_header(start, None); + let headers = random_header_range(start + 1..end, head.hash()); let result = headers.iter().rev().cloned().collect::>(); - let runner = HeadersTestRunner::with_downloader(TestDownloader::new(Ok(result))); + let runner = HeadersTestRunner::with_downloader(TestHeaderDownloader::new(Ok(result))); runner.insert_header(&head).expect("failed to insert header"); let rx = runner.execute(ExecInput { @@ -278,15 +294,15 @@ mod tests { Ok(ExecOutput { done, reached_tip, stage_progress }) if done && reached_tip && stage_progress == tip.number ); - assert!(headers.iter().try_for_each(|h| runner.validate_db_header(&h)).is_ok()); + assert!(headers.iter().try_for_each(|h| runner.validate_db_header(h)).is_ok()); } + /// Execute the stage with linear downloader #[tokio::test] - // Execute the stage with linear downloader async fn execute_with_linear_downloader() { let (start, end) = (1000, 1200); - let head = gen_random_header(start, None); - let headers = gen_random_header_range(start + 1..end, head.hash()); + let head = random_header(start, None); + let headers = random_header_range(start + 1..end, head.hash()); let runner = HeadersTestRunner::with_linear_downloader(); runner.insert_header(&head).expect("failed to insert header"); @@ -315,11 +331,11 @@ mod tests { Ok(ExecOutput { done, reached_tip, stage_progress }) if done && reached_tip && stage_progress == tip.number ); - assert!(headers.iter().try_for_each(|h| runner.validate_db_header(&h)).is_ok()); + assert!(headers.iter().try_for_each(|h| runner.validate_db_header(h)).is_ok()); } + /// Check that unwind does not panic on empty database. #[tokio::test] - // Check that unwind does not panic on empty database. async fn unwind_empty_db() { let unwind_to = 100; let runner = HeadersTestRunner::default(); @@ -331,13 +347,13 @@ mod tests { ); } + /// Check that unwind can remove headers across gaps #[tokio::test] - // Check that unwind can remove headers across gaps async fn unwind_db_gaps() { let runner = HeadersTestRunner::default(); - let head = gen_random_header(0, None); - let first_range = gen_random_header_range(1..20, head.hash()); - let second_range = gen_random_header_range(50..100, H256::zero()); + let head = random_header(0, None); + let first_range = random_header_range(1..20, head.hash()); + let second_range = random_header_range(50..100, H256::zero()); runner.insert_header(&head).expect("failed to insert header"); runner .insert_headers(first_range.iter().chain(second_range.iter())) @@ -374,36 +390,34 @@ mod tests { stages::headers::HeaderStage, util::test_utils::{StageTestDB, StageTestRunner}, }; - use async_trait::async_trait; use reth_headers_downloaders::linear::{LinearDownloadBuilder, LinearDownloader}; use reth_interfaces::{ - consensus::ForkchoiceState, db::{self, models::blocks::BlockNumHash, tables, DbTx}, - p2p::headers::downloader::{DownloadError, Downloader}, - test_utils::{TestConsensus, TestHeadersClient}, + p2p::headers::downloader::HeaderDownloader, + test_utils::{TestConsensus, TestHeaderDownloader, TestHeadersClient}, }; use reth_primitives::{rpc::BigEndianHash, SealedHeader, H256, U256}; - use std::{ops::Deref, sync::Arc, time::Duration}; + use std::{ops::Deref, sync::Arc}; - pub(crate) struct HeadersTestRunner { + pub(crate) struct HeadersTestRunner { pub(crate) consensus: Arc, pub(crate) client: Arc, downloader: Arc, db: StageTestDB, } - impl Default for HeadersTestRunner { + impl Default for HeadersTestRunner { fn default() -> Self { Self { client: Arc::new(TestHeadersClient::default()), consensus: Arc::new(TestConsensus::default()), - downloader: Arc::new(TestDownloader::new(Ok(Vec::default()))), + downloader: Arc::new(TestHeaderDownloader::new(Ok(Vec::default()))), db: StageTestDB::default(), } } } - impl StageTestRunner for HeadersTestRunner { + impl StageTestRunner for HeadersTestRunner { type S = HeaderStage, TestConsensus, TestHeadersClient>; fn db(&self) -> &StageTestDB { @@ -423,13 +437,14 @@ mod tests { pub(crate) fn with_linear_downloader() -> Self { let client = Arc::new(TestHeadersClient::default()); let consensus = Arc::new(TestConsensus::default()); - let downloader = - Arc::new(LinearDownloadBuilder::new().build(consensus.clone(), client.clone())); + let downloader = Arc::new( + LinearDownloadBuilder::default().build(consensus.clone(), client.clone()), + ); Self { client, consensus, downloader, db: StageTestDB::default() } } } - impl HeadersTestRunner { + impl HeadersTestRunner { pub(crate) fn with_downloader(downloader: D) -> Self { HeadersTestRunner { client: Arc::new(TestHeadersClient::default()), @@ -501,42 +516,5 @@ mod tests { Ok(()) } } - - #[derive(Debug)] - pub(crate) struct TestDownloader { - result: Result, DownloadError>, - } - - impl TestDownloader { - pub(crate) fn new(result: Result, DownloadError>) -> Self { - Self { result } - } - } - - #[async_trait] - impl Downloader for TestDownloader { - type Consensus = TestConsensus; - type Client = TestHeadersClient; - - fn timeout(&self) -> Duration { - Duration::from_secs(1) - } - - fn consensus(&self) -> &Self::Consensus { - unimplemented!() - } - - fn client(&self) -> &Self::Client { - unimplemented!() - } - - async fn download( - &self, - _: &SealedHeader, - _: &ForkchoiceState, - ) -> Result, DownloadError> { - self.result.clone() - } - } } } diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 2eb1b34831..b75b4e2695 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -1,3 +1,5 @@ +/// The bodies stage. +pub mod bodies; /// The headers stage. pub mod headers; /// The cumulative transaction index stage. diff --git a/crates/stages/src/stages/tx_index.rs b/crates/stages/src/stages/tx_index.rs index a2d986ba71..64ac970212 100644 --- a/crates/stages/src/stages/tx_index.rs +++ b/crates/stages/src/stages/tx_index.rs @@ -37,13 +37,13 @@ impl Stage for TxIndex { let last_block = input.stage_progress.unwrap_or_default(); let last_hash = tx .get::(last_block)? - .ok_or(DatabaseIntegrityError::CannonicalHeader { number: last_block })?; + .ok_or(DatabaseIntegrityError::CanonicalHeader { number: last_block })?; // The start block for this iteration let start_block = last_block + 1; let start_hash = tx .get::(start_block)? - .ok_or(DatabaseIntegrityError::CannonicalHeader { number: start_block })?; + .ok_or(DatabaseIntegrityError::CanonicalHeader { number: start_block })?; // The maximum block that this stage should insert to let max_block = input.previous_stage.as_ref().map(|(_, block)| *block).unwrap_or_default(); @@ -65,8 +65,8 @@ impl Stage for TxIndex { // Aggregate and insert cumulative transaction count for each block number for entry in entries { - let (key, tx_count) = entry?; - count += tx_count as u64; + let (key, body) = entry?; + count += body.tx_amount; cursor.append(key, count)?; } @@ -89,7 +89,7 @@ mod tests { use super::*; use crate::util::test_utils::{StageTestDB, StageTestRunner}; use assert_matches::assert_matches; - use reth_interfaces::{db::models::BlockNumHash, test_utils::gen_random_header_range}; + use reth_interfaces::{db::models::BlockNumHash, test_utils::generators::random_header_range}; use reth_primitives::H256; const TEST_STAGE: StageId = StageId("PrevStage"); @@ -100,14 +100,14 @@ mod tests { let rx = runner.execute(ExecInput::default()); assert_matches!( rx.await.unwrap(), - Err(StageError::DatabaseIntegrity(DatabaseIntegrityError::CannonicalHeader { .. })) + Err(StageError::DatabaseIntegrity(DatabaseIntegrityError::CanonicalHeader { .. })) ); } #[tokio::test] async fn execute_no_prev_tx_count() { let runner = TxIndexTestRunner::default(); - let headers = gen_random_header_range(0..10, H256::zero()); + let headers = random_header_range(0..10, H256::zero()); runner .db() .map_put::(&headers, |h| (h.number, h.hash())) @@ -129,7 +129,7 @@ mod tests { async fn execute() { let runner = TxIndexTestRunner::default(); let (start, pivot, end) = (0, 100, 200); - let headers = gen_random_header_range(start..end, H256::zero()); + let headers = random_header_range(start..end, H256::zero()); runner .db() .map_put::(&headers, |h| (h.number, h.hash())) @@ -170,7 +170,7 @@ mod tests { #[tokio::test] async fn unwind_no_input() { let runner = TxIndexTestRunner::default(); - let headers = gen_random_header_range(0..10, H256::zero()); + let headers = random_header_range(0..10, H256::zero()); runner .db() .transform_append::(&headers, |prev, h| { @@ -195,8 +195,8 @@ mod tests { #[tokio::test] async fn unwind_with_db_gaps() { let runner = TxIndexTestRunner::default(); - let first_range = gen_random_header_range(0..20, H256::zero()); - let second_range = gen_random_header_range(50..100, H256::zero()); + let first_range = random_header_range(0..20, H256::zero()); + let second_range = random_header_range(50..100, H256::zero()); runner .db() .transform_append::( diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index ffd23ded0e..8a80d6d478 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -67,8 +67,8 @@ //! The final `TransactionPool` is made up of two layers: //! //! The lowest layer is the actual pool implementations that manages (validated) transactions: -//! [`TxPool`](crate::pool::TxPool). This is contained in a higher level pool type that guards the -//! low level pool and handles additional listeners or metrics: +//! [`TxPool`](crate::pool::txpool::TxPool). This is contained in a higher level pool type that +//! guards the low level pool and handles additional listeners or metrics: //! [`PoolInner`](crate::pool::PoolInner) //! //! The transaction pool will be used by separate consumers (RPC, P2P), to make sharing easier, the From b60ced1de1b8f7d949717ee06f690056e9689f7d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Nov 2022 10:34:28 +0100 Subject: [PATCH 5/9] feat: add missing message handling (#200) * feat: add missing message handling * refactor: new block message handling * feat: add events and commands for transaction handling * more work in transactions * chore: silence warnings --- Cargo.lock | 10 ++ crates/interfaces/src/p2p/error.rs | 2 + crates/net/network/Cargo.toml | 1 + crates/net/network/src/cache.rs | 58 +++++++ crates/net/network/src/config.rs | 19 ++- crates/net/network/src/fetch.rs | 185 +++++++++++++++++++---- crates/net/network/src/import.rs | 42 +++++ crates/net/network/src/lib.rs | 2 + crates/net/network/src/manager.rs | 157 +++++++++++++------ crates/net/network/src/message.rs | 30 ++-- crates/net/network/src/network.rs | 33 +++- crates/net/network/src/session/handle.rs | 10 +- crates/net/network/src/session/mod.rs | 21 ++- crates/net/network/src/state.rs | 93 ++++++++++-- crates/net/network/src/swarm.rs | 32 +++- crates/net/network/src/transactions.rs | 89 ++++++++++- 16 files changed, 655 insertions(+), 129 deletions(-) create mode 100644 crates/net/network/src/cache.rs create mode 100644 crates/net/network/src/import.rs diff --git a/Cargo.lock b/Cargo.lock index 6a6dc2c52a..c077783af8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2155,6 +2155,15 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "lock_api" version = "0.4.9" @@ -3302,6 +3311,7 @@ dependencies = [ "either", "fnv", "futures", + "linked_hash_set", "parking_lot 0.12.1", "pin-project", "rand 0.8.5", diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 856e5f65e4..d7debf954f 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -11,6 +11,8 @@ pub enum RequestError { ChannelClosed, #[error("Not connected to the peer.")] NotConnected, + #[error("Connection to a peer dropped while handling the request.")] + ConnectionDropped, #[error("Capability Message is not supported by remote peer.")] UnsupportedCapability, #[error("Request timed out while awaiting response.")] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index dfe555aeaf..7a1a7043ab 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -35,6 +35,7 @@ parking_lot = "0.12" async-trait = "0.1" bytes = "1.2" either = "1.8" +linked_hash_set = "0.1" secp256k1 = { version = "0.24", features = [ "global-context", diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs new file mode 100644 index 0000000000..96bdff23a5 --- /dev/null +++ b/crates/net/network/src/cache.rs @@ -0,0 +1,58 @@ +use linked_hash_set::LinkedHashSet; +use std::{borrow::Borrow, hash::Hash, num::NonZeroUsize}; + +/// A minimal LRU cache based on a `LinkedHashSet` with limited capacity. +/// +/// If the length exceeds the set capacity, the oldest element will be removed +/// In the limit, for each element inserted the oldest existing element will be removed. +#[derive(Debug, Clone)] +pub struct LruCache { + limit: NonZeroUsize, + inner: LinkedHashSet, +} + +impl LruCache { + /// Creates a new `LruCache` using the given limit + pub fn new(limit: NonZeroUsize) -> Self { + Self { inner: LinkedHashSet::new(), limit } + } + + /// Insert an element into the set. + /// + /// If the element is new (did not exist before [`LruCache::insert()`]) was called, then the + /// given length will be enforced and the oldest element will be removed if the limit was + /// exceeded. + /// + /// If the set did not have this value present, true is returned. + /// If the set did have this value present, false is returned. + pub fn insert(&mut self, entry: T) -> bool { + if self.inner.insert(entry) { + if self.limit.get() == self.inner.len() { + // remove the oldest element in the set + self.inner.pop_front(); + } + return true + } + false + } + + /// Returns `true` if the set contains a value. + pub fn contains(&self, value: &Q) -> bool + where + T: Borrow, + Q: Hash + Eq, + { + self.inner.contains(value) + } +} + +impl Extend for LruCache +where + T: Eq + Hash, +{ + fn extend>(&mut self, iter: I) { + for item in iter.into_iter() { + self.insert(item); + } + } +} diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 4eb0641cec..a999019e21 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -1,4 +1,8 @@ -use crate::{peers::PeersConfig, session::SessionsConfig}; +use crate::{ + import::{BlockImport, NoopBlockImport}, + peers::PeersConfig, + session::SessionsConfig, +}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_PORT}; use reth_primitives::{Chain, ForkId, H256}; use secp256k1::SecretKey; @@ -30,6 +34,8 @@ pub struct NetworkConfig { pub chain: Chain, /// Genesis hash of the network pub genesis_hash: H256, + /// The block importer type. + pub block_import: Box, } // === impl NetworkConfig === @@ -82,6 +88,8 @@ pub struct NetworkConfigBuilder { chain: Chain, /// Network genesis hash genesis_hash: H256, + /// The block importer type. + block_import: Box, } // === impl NetworkConfigBuilder === @@ -100,6 +108,7 @@ impl NetworkConfigBuilder { fork_id: None, chain: Chain::Named(reth_primitives::rpc::Chain::Mainnet), genesis_hash: Default::default(), + block_import: Box::::default(), } } @@ -109,6 +118,12 @@ impl NetworkConfigBuilder { self } + /// Sets the [`BlockImport`] type to configure. + pub fn block_import(mut self, block_import: T) -> Self { + self.block_import = Box::new(block_import); + self + } + /// Consumes the type and creates the actual [`NetworkConfig`] pub fn build(self) -> NetworkConfig { let Self { @@ -122,6 +137,7 @@ impl NetworkConfigBuilder { fork_id, chain, genesis_hash, + block_import, } = self; NetworkConfig { client, @@ -138,6 +154,7 @@ impl NetworkConfigBuilder { fork_id, chain, genesis_hash, + block_import, } } } diff --git a/crates/net/network/src/fetch.rs b/crates/net/network/src/fetch.rs index c870b61491..151051b761 100644 --- a/crates/net/network/src/fetch.rs +++ b/crates/net/network/src/fetch.rs @@ -1,10 +1,13 @@ //! Fetch data from the network. -use crate::message::BlockRequest; +use crate::{message::BlockRequest, peers::ReputationChange}; use futures::StreamExt; -use reth_eth_wire::{BlockBody, EthMessage}; -use reth_interfaces::p2p::{error::RequestResult, headers::client::HeadersRequest}; -use reth_primitives::{Header, PeerId, H256, U256}; +use reth_eth_wire::{BlockBody, GetBlockBodies}; +use reth_interfaces::p2p::{ + error::{RequestError, RequestResult}, + headers::client::HeadersRequest, +}; +use reth_primitives::{Header, PeerId, H256}; use std::{ collections::{HashMap, VecDeque}, task::{Context, Poll}, @@ -20,6 +23,8 @@ use tokio_stream::wrappers::UnboundedReceiverStream; pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests inflight_headers_requests: HashMap>>>, + /// Currently active [`GetBlockBodies`] requests + inflight_bodies_requests: HashMap, RequestResult>>>, /// The list of available peers for requests. peers: HashMap, /// Requests queued for processing @@ -34,26 +39,55 @@ pub struct StateFetcher { impl StateFetcher { /// Invoked when connected to a new peer. - pub(crate) fn new_connected_peer(&mut self, _node_id: PeerId, _best_hash: H256) {} + pub(crate) fn new_connected_peer( + &mut self, + peer_id: PeerId, + best_hash: H256, + best_number: Option, + ) { + self.peers.insert(peer_id, Peer { state: PeerState::Idle, best_hash, best_number }); + } /// Invoked when an active session was closed. - pub(crate) fn on_session_closed(&mut self, _peer: &PeerId) {} + /// + /// This cancels als inflight request and sends an error to the receiver. + pub(crate) fn on_session_closed(&mut self, peer: &PeerId) { + self.peers.remove(peer); + if let Some(req) = self.inflight_headers_requests.remove(peer) { + let _ = req.response.send(Err(RequestError::ConnectionDropped)); + } + if let Some(req) = self.inflight_bodies_requests.remove(peer) { + let _ = req.response.send(Err(RequestError::ConnectionDropped)); + } + } /// Invoked when an active session is about to be disconnected. - pub(crate) fn on_pending_disconnect(&mut self, _peer: &PeerId) {} + pub(crate) fn on_pending_disconnect(&mut self, peer_id: &PeerId) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.state = PeerState::Closing; + } + } + + /// Returns the next idle peer that's ready to accept a request + fn next_peer(&mut self) -> Option<(&PeerId, &mut Peer)> { + self.peers.iter_mut().find(|(_, peer)| peer.state.is_idle()) + } /// Returns the next action to return fn poll_action(&mut self) -> Option { - // TODO find matching peers + if self.queued_requests.is_empty() { + return None + } - // if let Some(request) = self.queued_requests.pop_front() { - // if let Some(action) = self.on_download_request(request) { - // return Poll::Ready(action) - // } - // } - None + let peer_id = *self.next_peer()?.0; + + let request = self.queued_requests.pop_front().expect("not empty; qed"); + let request = self.prepare_block_request(peer_id, request); + + Some(FetchAction::BlockRequest { peer_id, request }) } + /// Received a request via a downloader fn on_download_request(&mut self, request: DownloadRequest) -> Option { match request { DownloadRequest::GetBlockHeaders { request: _, response: _ } => {} @@ -91,21 +125,79 @@ impl StateFetcher { Poll::Pending } + /// Handles a new request to a peer. + /// + /// Caution: this assumes the peer exists and is idle + fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { + // update the peer's state + if let Some(peer) = self.peers.get_mut(&peer_id) { + peer.state = req.peer_state(); + } + + let started = Instant::now(); + match req { + DownloadRequest::GetBlockHeaders { request, response } => { + let inflight = Request { request, response, started }; + self.inflight_headers_requests.insert(peer_id, inflight); + + unimplemented!("unify start types"); + + // BlockRequest::GetBlockHeaders(GetBlockHeaders { + // // TODO: this should be converted + // start_block: BlockHashOrNumber::Number(0), + // limit: request.limit, + // skip: 0, + // reverse: request.reverse, + // }) + } + DownloadRequest::GetBlockBodies { request, response } => { + let inflight = Request { request: request.clone(), response, started }; + self.inflight_bodies_requests.insert(peer_id, inflight); + BlockRequest::GetBlockBodies(GetBlockBodies(request)) + } + } + } + + /// Returns a new followup request for the peer. + /// + /// Caution: this expects that the peer is _not_ closed + fn followup_request(&mut self, peer_id: PeerId) -> Option { + let req = self.queued_requests.pop_front()?; + let req = self.prepare_block_request(peer_id, req); + Some(BlockResponseOutcome::Request(peer_id, req)) + } + /// Called on a `GetBlockHeaders` response from a peer pub(crate) fn on_block_headers_response( &mut self, - _peer: PeerId, - _res: RequestResult>, + peer_id: PeerId, + res: RequestResult>, ) -> Option { + if let Some(resp) = self.inflight_headers_requests.remove(&peer_id) { + let _ = resp.response.send(res); + } + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.state.on_request_finished() { + return self.followup_request(peer_id) + } + } None } /// Called on a `GetBlockBodies` response from a peer pub(crate) fn on_block_bodies_response( &mut self, - _peer: PeerId, - _res: RequestResult>, + peer_id: PeerId, + res: RequestResult>, ) -> Option { + if let Some(resp) = self.inflight_bodies_requests.remove(&peer_id) { + let _ = resp.response.send(res); + } + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.state.on_request_finished() { + return self.followup_request(peer_id) + } + } None } @@ -120,6 +212,7 @@ impl Default for StateFetcher { let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel(); Self { inflight_headers_requests: Default::default(), + inflight_bodies_requests: Default::default(), peers: Default::default(), queued_requests: Default::default(), download_requests_rx: UnboundedReceiverStream::new(download_requests_rx), @@ -148,14 +241,12 @@ impl HeadersDownloader { /// Represents a connected peer struct Peer { - /// Identifier for requests. - request_id: u64, /// The state this peer currently resides in. state: PeerState, /// Best known hash that the peer has best_hash: H256, - /// Best known number the peer has. - best_number: U256, + /// Tracks the best number of the peer. + best_number: Option, } /// Tracks the state of an individual peer @@ -164,6 +255,32 @@ enum PeerState { Idle, /// Peer is handling a `GetBlockHeaders` request. GetBlockHeaders, + /// Peer is handling a `GetBlockBodies` request. + GetBlockBodies, + /// Peer session is about to close + Closing, +} + +// === impl PeerState === + +impl PeerState { + /// Returns true if the peer is currently idle. + fn is_idle(&self) -> bool { + matches!(self, PeerState::Idle) + } + + /// Resets the state on a received response. + /// + /// If the state was already marked as `Closing` do nothing. + /// + /// Returns `true` if the peer is ready for another request. + fn on_request_finished(&mut self) -> bool { + if !matches!(self, PeerState::Closing) { + *self = PeerState::Idle; + return true + } + false + } } /// A request that waits for a response from the network so it can send it back through the response @@ -185,13 +302,26 @@ enum DownloadRequest { GetBlockBodies { request: Vec, response: oneshot::Sender>> }, } +// === impl DownloadRequest === + +impl DownloadRequest { + /// Returns the corresponding state for a peer that handles the request. + fn peer_state(&self) -> PeerState { + match self { + DownloadRequest::GetBlockHeaders { .. } => PeerState::GetBlockHeaders, + DownloadRequest::GetBlockBodies { .. } => PeerState::GetBlockBodies, + } + } +} + /// An action the syncer can emit. pub(crate) enum FetchAction { /// Dispatch an eth request to the given peer. - EthRequest { - node_id: PeerId, + BlockRequest { + /// The targeted recipient for the request + peer_id: PeerId, /// The request to send - request: EthMessage, + request: BlockRequest, }, } @@ -202,7 +332,6 @@ pub(crate) enum FetchAction { pub(crate) enum BlockResponseOutcome { /// Continue with another request to the peer. Request(PeerId, BlockRequest), - /// How to handle a bad response - // TODO this should include some form of reputation change - BadResponse(PeerId), + /// How to handle a bad response and the reputation change to apply. + BadResponse(PeerId, ReputationChange), } diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs new file mode 100644 index 0000000000..3a54d76dd2 --- /dev/null +++ b/crates/net/network/src/import.rs @@ -0,0 +1,42 @@ +use crate::message::NewBlockMessage; +use reth_primitives::PeerId; +use std::task::{Context, Poll}; + +/// Abstraction over block import. +pub trait BlockImport: Send + Sync { + /// Invoked for a received `NewBlock` broadcast message from the peer. + /// + /// > When a `NewBlock` announcement message is received from a peer, the client first verifies + /// > the basic header validity of the block, checking whether the proof-of-work value is valid. + /// + /// This is supposed to start verification. The results are then expected to be returned via + /// [`BlockImport::poll`]. + fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); + + /// Returns the results of a [`BlockImport::on_new_block`] + fn poll(&mut self, cx: &mut Context<'_>) -> Poll; +} + +/// Outcome of the [`BlockImport`]'s block handling. +pub struct BlockImportOutcome { + /// Sender of the `NewBlock` message. + pub peer: PeerId, + /// The result after validating the block + pub result: Result, +} + +/// Represents the error case of a failed block import +pub enum BlockImportError {} + +/// An implementation of `BlockImport` that does nothing +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopBlockImport; + +impl BlockImport for NoopBlockImport { + fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} + + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + Poll::Pending + } +} diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 45b1c70979..831f1a897d 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -15,10 +15,12 @@ //! port of that network. This includes public identities (public key) and addresses (where to reach //! them). +mod cache; mod config; mod discovery; pub mod error; mod fetch; +mod import; mod listener; mod manager; mod message; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 340efe1137..0c5bc208e4 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -19,7 +19,9 @@ use crate::{ config::NetworkConfig, discovery::Discovery, error::NetworkError, + import::{BlockImport, BlockImportOutcome}, listener::ConnectionListener, + message::{NewBlockMessage, PeerMessage, PeerRequest, PeerRequestSender}, network::{NetworkHandle, NetworkHandleMessage}, peers::PeersManager, session::SessionManager, @@ -30,9 +32,9 @@ use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, - EthMessage, + GetPooledTransactions, NewPooledTransactionHashes, PooledTransactions, Transactions, }; -use reth_interfaces::provider::BlockProvider; +use reth_interfaces::{p2p::error::RequestResult, provider::BlockProvider}; use reth_primitives::PeerId; use std::{ net::SocketAddr, @@ -43,7 +45,7 @@ use std::{ }, task::{Context, Poll}, }; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{error, trace}; @@ -77,8 +79,8 @@ pub struct NetworkManager { handle: NetworkHandle, /// Receiver half of the command channel set up between this type and the [`NetworkHandle`] from_handle_rx: UnboundedReceiverStream, - /// Handles block imports. - block_import_sink: (), + /// Handles block imports according to the `eth` protocol. + block_import: Box, /// The address of this node that listens for incoming connections. listener_address: Arc>, /// All listeners for [`Network`] events. @@ -112,6 +114,7 @@ where peers_config, sessions_config, genesis_hash, + block_import, .. } = config; @@ -145,7 +148,7 @@ where swarm, handle, from_handle_rx: UnboundedReceiverStream::new(from_handle_rx), - block_import_sink: (), + block_import, listener_address, event_listeners: Default::default(), num_active_peers, @@ -171,41 +174,47 @@ where // TODO: disconnect? } - /// Handles a received [`CapabilityMessage`] from the peer. - fn on_capability_message(&mut self, _node_id: PeerId, msg: CapabilityMessage) { - match msg { - CapabilityMessage::Eth(eth) => { - match eth { - EthMessage::Status(_) => {} - EthMessage::NewBlockHashes(_) => { - // update peer's state, to track what blocks this peer has seen - } - EthMessage::NewBlock(_) => { - // emit new block and track that the peer knows this block - } - EthMessage::Transactions(_) => { - // need to emit this as event/send to tx handler - } - EthMessage::NewPooledTransactionHashes(_) => { - // need to emit this as event/send to tx handler - } + /// Handle an incoming request from the peer + fn on_eth_request(&mut self, peer_id: PeerId, req: PeerRequest) { + match req { + PeerRequest::GetBlockHeaders { .. } => {} + PeerRequest::GetBlockBodies { .. } => {} + PeerRequest::GetPooledTransactions { request, response } => { + // notify listeners about this request + self.event_listeners.send(NetworkEvent::GetPooledTransactions { + peer_id, + request, + response: Arc::new(response), + }); + } + PeerRequest::GetNodeData { .. } => {} + PeerRequest::GetReceipts { .. } => {} + } + } - // TODO: should remove the response types here, as they are handled separately - EthMessage::GetBlockHeaders(_) => {} - EthMessage::BlockHeaders(_) => {} - EthMessage::GetBlockBodies(_) => {} - EthMessage::BlockBodies(_) => {} - EthMessage::GetPooledTransactions(_) => {} - EthMessage::PooledTransactions(_) => {} - EthMessage::GetNodeData(_) => {} - EthMessage::NodeData(_) => {} - EthMessage::GetReceipts(_) => {} - EthMessage::Receipts(_) => {} - } + /// Handles a received Message from the peer. + fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { + match msg { + PeerMessage::NewBlockHashes(hashes) => { + // update peer's state, to track what blocks this peer has seen + self.swarm.state_mut().on_new_block_hashes(peer_id, hashes.0) } - CapabilityMessage::Other(_) => { - // other subprotocols + PeerMessage::NewBlock(block) => { + self.swarm.state_mut().on_new_block(peer_id, block.hash); + // start block import process + self.block_import.on_new_block(peer_id, block); } + PeerMessage::PooledTransactions(msg) => { + self.event_listeners + .send(NetworkEvent::IncomingPooledTransactionHashes { peer_id, msg }); + } + PeerMessage::Transactions(msg) => { + self.event_listeners.send(NetworkEvent::IncomingTransactions { peer_id, msg }); + } + PeerMessage::EthRequest(req) => { + self.on_eth_request(peer_id, req); + } + PeerMessage::Other(_) => {} } } @@ -215,10 +224,25 @@ where NetworkHandleMessage::EventListener(tx) => { self.event_listeners.listeners.push(tx); } - NetworkHandleMessage::NewestBlock(_, _) => {} - _ => {} + NetworkHandleMessage::AnnounceBlock(block, hash) => { + let msg = NewBlockMessage { hash, block: Arc::new(block) }; + self.swarm.state_mut().announce_new_block(msg); + } + NetworkHandleMessage::EthRequest { peer_id, request } => { + self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::EthRequest(request)) + } + NetworkHandleMessage::SendTransaction { peer_id, msg } => { + self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::Transactions(msg)) + } + NetworkHandleMessage::SendPooledTransactionHashes { peer_id, msg } => self + .swarm + .sessions_mut() + .send_message(&peer_id, PeerMessage::PooledTransactions(msg)), } } + + /// Invoked after a `NewBlock` message from the peer was validated + fn on_block_import_result(&mut self, _outcome: BlockImportOutcome) {} } impl Future for NetworkManager @@ -230,6 +254,11 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); + // poll new block imports + while let Poll::Ready(outcome) = this.block_import.poll(cx) { + this.on_block_import_result(outcome); + } + // process incoming messages from a handle loop { match this.from_handle_rx.poll_next_unpin(cx) { @@ -248,8 +277,8 @@ where while let Poll::Ready(Some(event)) = this.swarm.poll_next_unpin(cx) { // handle event match event { - SwarmEvent::CapabilityMessage { node_id, message } => { - this.on_capability_message(node_id, message) + SwarmEvent::ValidMessage { node_id, message } => { + this.on_peer_message(node_id, message) } SwarmEvent::InvalidCapabilityMessage { node_id, capabilities, message } => { this.on_invalid_message(node_id, capabilities, message) @@ -266,25 +295,38 @@ where SwarmEvent::OutgoingTcpConnection { remote_addr } => { trace!(?remote_addr, target = "net", "Starting outbound connection."); } - SwarmEvent::SessionEstablished { node_id, remote_addr } => { + SwarmEvent::SessionEstablished { + node_id: peer_id, + remote_addr, + capabilities, + messages, + } => { let total_active = this.num_active_peers.fetch_add(1, Ordering::Relaxed) + 1; trace!( ?remote_addr, - ?node_id, + ?peer_id, ?total_active, target = "net", "Session established" ); + + this.event_listeners.send(NetworkEvent::SessionEstablished { + peer_id, + capabilities, + messages, + }); } - SwarmEvent::SessionClosed { node_id, remote_addr } => { + SwarmEvent::SessionClosed { node_id: peer_id, remote_addr } => { let total_active = this.num_active_peers.fetch_sub(1, Ordering::Relaxed) - 1; trace!( ?remote_addr, - ?node_id, + ?peer_id, ?total_active, target = "net", "Session disconnected" ); + + this.event_listeners.send(NetworkEvent::SessionClosed { peer_id }); } SwarmEvent::IncomingPendingSessionClosed { .. } => {} SwarmEvent::OutgoingPendingSessionClosed { .. } => {} @@ -292,14 +334,33 @@ where } } - todo!() + Poll::Pending } } /// Events emitted by the network that are of interest for subscribers. +/// +/// This includes any event types that may be relevant to tasks #[derive(Debug, Clone)] pub enum NetworkEvent { - EthMessage { node_id: PeerId, message: EthMessage }, + /// Closed the peer session. + SessionClosed { peer_id: PeerId }, + /// Established a new session with the given peer. + SessionEstablished { + peer_id: PeerId, + capabilities: Arc, + messages: PeerRequestSender, + }, + /// Received list of transactions to the given peer. + IncomingTransactions { peer_id: PeerId, msg: Arc }, + /// Received list of transactions hashes to the given peer. + IncomingPooledTransactionHashes { peer_id: PeerId, msg: Arc }, + /// Incoming `GetPooledTransactions` request from a peer. + GetPooledTransactions { + peer_id: PeerId, + request: GetPooledTransactions, + response: Arc>>, + }, } /// Bundles all listeners for [`NetworkEvent`]s. diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 3aaaca3cc5..20abe492e3 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -5,26 +5,38 @@ use futures::FutureExt; use reth_eth_wire::{ - BlockBodies, BlockBody, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, - GetPooledTransactions, GetReceipts, NewBlock, NewBlockHashes, NodeData, PooledTransactions, - Receipts, Transactions, + capability::CapabilityMessage, BlockBodies, BlockBody, BlockHeaders, GetBlockBodies, + GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewBlockHashes, + NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, Transactions, }; -use std::task::{ready, Context, Poll}; - -use reth_eth_wire::capability::CapabilityMessage; use reth_interfaces::p2p::error::RequestResult; -use reth_primitives::{Header, PeerId, Receipt, TransactionSigned}; +use reth_primitives::{Header, PeerId, Receipt, TransactionSigned, H256}; +use std::{ + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::sync::{mpsc, mpsc::error::TrySendError, oneshot}; +/// Internal form of a `NewBlock` message +#[derive(Debug, Clone)] +pub struct NewBlockMessage { + /// Hash of the block + pub hash: H256, + /// Raw received message + pub block: Arc, +} + /// Represents all messages that can be sent to a peer session #[derive(Debug)] pub enum PeerMessage { /// Announce new block hashes NewBlockHashes(NewBlockHashes), /// Broadcast new block. - NewBlock(Box), + NewBlock(NewBlockMessage), /// Broadcast transactions. - Transactions(Transactions), + Transactions(Arc), + /// + PooledTransactions(Arc), /// All `eth` request variants. EthRequest(PeerRequest), /// Other than eth namespace message diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index c38986e94d..9c8f079de7 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,6 +1,7 @@ -use crate::{manager::NetworkEvent, peers::PeersHandle}; +use crate::{manager::NetworkEvent, message::PeerRequest, peers::PeersHandle}; use parking_lot::Mutex; -use reth_primitives::{PeerId, H256, U256}; +use reth_eth_wire::{NewBlock, NewPooledTransactionHashes, Transactions}; +use reth_primitives::{PeerId, H256}; use std::{ net::SocketAddr, sync::{atomic::AtomicUsize, Arc}, @@ -47,6 +48,16 @@ impl NetworkHandle { let _ = self.manager().send(NetworkHandleMessage::EventListener(tx)); rx } + + /// Sends a [`NetworkHandleMessage`] to the manager + fn send_message(&self, msg: NetworkHandleMessage) { + let _ = self.inner.to_manager_tx.send(msg); + } + + /// Sends a [`PeerRequest`] to the given peer's session. + pub fn send_request(&mut self, peer_id: PeerId, request: PeerRequest) { + self.send_message(NetworkHandleMessage::EthRequest { peer_id, request }) + } } struct NetworkInner { @@ -59,15 +70,25 @@ struct NetworkInner { /// The identifier used by this node. local_node_id: PeerId, /// Access to the all the nodes - peers: PeersHandle, // TODO need something to access + peers: PeersHandle, } /// Internal messages that can be passed to the [`NetworkManager`](crate::NetworkManager). +#[allow(missing_docs)] pub(crate) enum NetworkHandleMessage { /// Add a new listener for [`NetworkEvent`]. EventListener(UnboundedSender), /// Broadcast event to announce a new block to all nodes. - AnnounceBlock, - /// Returns the newest imported block by the network. - NewestBlock(H256, U256), + AnnounceBlock(NewBlock, H256), + /// Sends the list of transactions to the given peer. + SendTransaction { peer_id: PeerId, msg: Arc }, + /// Sends the list of transactions hashes to the given peer. + SendPooledTransactionHashes { peer_id: PeerId, msg: Arc }, + /// Send an `eth` protocol request to the peer. + EthRequest { + /// The peer to send the request to. + peer_id: PeerId, + /// The request to send to the peer's sessions. + request: PeerRequest, + }, } diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index f7cd579ca6..6a9248e3dc 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -1,5 +1,8 @@ //! Session handles -use crate::session::{Direction, SessionId}; +use crate::{ + message::PeerMessage, + session::{Direction, SessionId}, +}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, @@ -93,7 +96,8 @@ pub(crate) enum PendingSessionEvent { pub(crate) enum SessionCommand { /// Disconnect the connection Disconnect, - Message(CapabilityMessage), + /// Sends a message to the peer + Message(PeerMessage), } /// Message variants an active session can produce and send back to the @@ -107,7 +111,7 @@ pub(crate) enum ActiveSessionMessage { /// Identifier of the remote peer. node_id: PeerId, /// Message received from the peer. - message: CapabilityMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index c3faecba2a..5c66574223 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -1,9 +1,13 @@ //! Support for handling peer sessions. pub use crate::message::PeerRequestSender; -use crate::session::{ - active::ActiveSession, - handle::{ - ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, +use crate::{ + message::PeerMessage, + session::{ + active::ActiveSession, + handle::{ + ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, + SessionCommand, + }, }, }; use fnv::FnvHashMap; @@ -194,6 +198,13 @@ impl SessionManager { } } + /// Sends a message to the peer's session + pub(crate) fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { + if let Some(session) = self.active_sessions.get_mut(peer_id) { + let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)); + } + } + /// This polls all the session handles and returns [`SessionEvent`]. /// /// Active sessions are prioritized. @@ -406,7 +417,7 @@ pub(crate) enum SessionEvent { ValidMessage { node_id: PeerId, /// Message received from the peer. - message: CapabilityMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index b2404c0841..38645098a7 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -1,29 +1,31 @@ //! Keeps track of the state of the network. use crate::{ + cache::LruCache, discovery::{Discovery, DiscoveryEvent}, - fetch::StateFetcher, - message::{PeerRequestSender, PeerResponse}, + fetch::{BlockResponseOutcome, StateFetcher}, + message::{ + BlockRequest, NewBlockMessage, PeerRequest, PeerRequestSender, PeerResponse, + PeerResponseResult, + }, peers::{PeerAction, PeersManager}, }; - -use reth_eth_wire::{capability::Capabilities, Status}; +use reth_eth_wire::{capability::Capabilities, BlockHashNumber, Status}; use reth_interfaces::provider::BlockProvider; use reth_primitives::{PeerId, H256}; use std::{ collections::{HashMap, VecDeque}, net::SocketAddr, + num::NonZeroUsize, sync::Arc, task::{Context, Poll}, }; use tokio::sync::oneshot; - -use crate::{ - fetch::BlockResponseOutcome, - message::{BlockRequest, PeerRequest, PeerResponseResult}, -}; use tracing::trace; +/// Cache limit of blocks to keep track of for a single peer. +const PEER_BLOCK_CACHE_LIMIT: usize = 512; + /// The [`NetworkState`] keeps track of the state of all peers in the network. /// /// This includes: @@ -90,7 +92,9 @@ where // TODO add capacity check debug_assert!(self.connected_peers.contains_key(&peer), "Already connected; not possible"); - self.state_fetcher.new_connected_peer(peer, status.blockhash); + // find the corresponding block number + let block_number = self.client.block_number(status.blockhash).ok().flatten(); + self.state_fetcher.new_connected_peer(peer, status.blockhash, block_number); self.connected_peers.insert( peer, @@ -99,6 +103,7 @@ where capabilities, request_tx, pending_response: None, + blocks: LruCache::new(NonZeroUsize::new(PEER_BLOCK_CACHE_LIMIT).unwrap()), }, ); @@ -111,12 +116,59 @@ where self.state_fetcher.on_session_closed(&peer); } - /// Propagates Block to peers. - pub(crate) fn announce_block(&mut self, _hash: H256, _block: ()) { - // TODO propagate the newblock messages to all connected peers that haven't seen the block - // yet + /// Starts propagating the new block to peers that haven't reported the block yet. + /// + /// This is supposed to be invoked after the block was validated. + /// + /// > It then sends the block to a small fraction of connected peers (usually the square root of + /// > the total number of peers) using the `NewBlock` message. + /// + /// See also + pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { + // send a `NewBlock` message to a fraction fo the connected peers (square root of the total + // number of peers) + let num_propagate = (self.connected_peers.len() as f64).sqrt() as u64 + 1; - todo!() + let mut count = 0; + for (peer_id, peer) in self.connected_peers.iter_mut() { + if peer.blocks.contains(&msg.hash) { + // skip peers which already reported the block + continue + } + + // Queue a `NewBlock` message for the peer + if count < num_propagate { + self.queued_messages + .push_back(StateAction::NewBlock { peer_id: *peer_id, block: msg.clone() }); + + // mark the block as seen by the peer + peer.blocks.insert(msg.hash); + + count += 1; + } + + if count >= num_propagate { + break + } + } + } + + /// Invoked after a `NewBlock` message was received by the peer. + /// + /// This will keep track of blocks we know a peer has + pub(crate) fn on_new_block(&mut self, peer_id: PeerId, hash: H256) { + // Mark the blocks as seen + if let Some(peer) = self.connected_peers.get_mut(&peer_id) { + peer.blocks.insert(hash); + } + } + + /// Invoked for a `NewBlockHashes` broadcast message. + pub(crate) fn on_new_block_hashes(&mut self, peer_id: PeerId, hashes: Vec) { + // Mark the blocks as seen + if let Some(peer) = self.connected_peers.get_mut(&peer_id) { + peer.blocks.extend(hashes.into_iter().map(|b| b.hash)); + } } /// Event hook for events received from the discovery service. @@ -183,7 +235,7 @@ where BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); } - BlockResponseOutcome::BadResponse(_) => { + BlockResponseOutcome::BadResponse(_peer, _reputation_change) => { // TODO handle reputation change } } @@ -277,10 +329,19 @@ pub struct ConnectedPeer { pub(crate) request_tx: PeerRequestSender, /// The response receiver for a currently active request to that peer. pub(crate) pending_response: Option, + /// Blocks we know the peer has. + pub(crate) blocks: LruCache, } /// Message variants triggered by the [`State`] pub enum StateAction { + /// Dispatch a `NewBlock` message to the peer + NewBlock { + /// Target of the message + peer_id: PeerId, + /// The `NewBlock` message + block: NewBlockMessage, + }, /// Create a new connection to the given node. Connect { remote_addr: SocketAddr, node_id: PeerId }, /// Disconnect an existing connection diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 136f11abdb..9acb9a86f8 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -1,5 +1,6 @@ use crate::{ listener::{ConnectionListener, ListenerEvent}, + message::{PeerMessage, PeerRequestSender}, session::{SessionEvent, SessionId, SessionManager}, state::{AddSessionError, NetworkState, StateAction}, }; @@ -56,6 +57,11 @@ where &mut self.state } + /// Mutable access to the [`SessionManager`]. + pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { + &mut self.sessions + } + /// Triggers a new outgoing connection to the given node pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id) @@ -70,8 +76,18 @@ where capabilities, status, messages, - } => match self.state.on_session_activated(node_id, capabilities, status, messages) { - Ok(_) => Some(SwarmEvent::SessionEstablished { node_id, remote_addr }), + } => match self.state.on_session_activated( + node_id, + capabilities.clone(), + status, + messages.clone(), + ) { + Ok(_) => Some(SwarmEvent::SessionEstablished { + node_id, + remote_addr, + capabilities, + messages, + }), Err(err) => { match err { AddSessionError::AtCapacity { peer } => self.sessions.disconnect(peer), @@ -80,7 +96,7 @@ where } }, SessionEvent::ValidMessage { node_id, message } => { - Some(SwarmEvent::CapabilityMessage { node_id, message }) + Some(SwarmEvent::ValidMessage { node_id, message }) } SessionEvent::InvalidMessage { node_id, capabilities, message } => { Some(SwarmEvent::InvalidCapabilityMessage { node_id, capabilities, message }) @@ -133,6 +149,10 @@ where StateAction::Disconnect { node_id } => { self.sessions.disconnect(node_id); } + StateAction::NewBlock { peer_id, block: msg } => { + let msg = PeerMessage::NewBlock(msg); + self.sessions.send_message(&peer_id, msg); + } } None } @@ -191,11 +211,11 @@ where /// network. pub enum SwarmEvent { /// Events related to the actual network protocol. - CapabilityMessage { + ValidMessage { /// The peer that sent the message node_id: PeerId, /// Message received from the peer - message: CapabilityMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidCapabilityMessage { @@ -230,6 +250,8 @@ pub enum SwarmEvent { SessionEstablished { node_id: PeerId, remote_addr: SocketAddr, + capabilities: Arc, + messages: PeerRequestSender, }, SessionClosed { node_id: PeerId, diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index d5f12cd28e..9a0a07630b 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -1,12 +1,25 @@ //! Transaction management for the p2p network. -use crate::{manager::NetworkEvent, NetworkHandle}; -use reth_primitives::{Transaction, H256}; +use crate::{cache::LruCache, manager::NetworkEvent, message::PeerRequestSender, NetworkHandle}; +use futures::stream::FuturesUnordered; +use reth_primitives::{PeerId, Transaction, H256}; use reth_transaction_pool::TransactionPool; -use std::collections::HashMap; +use std::{ + collections::{hash_map::Entry, HashMap}, + future::Future, + num::NonZeroUsize, + pin::Pin, + sync::Arc, +}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; +/// Cache limit of transactions to keep track of for a single peer. +const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024; + +/// The future for inserting a function into the pool +pub type PoolImportFuture = Pin + Send>>; + /// Api to interact with [`TransactionsManager`] task. pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] @@ -39,10 +52,15 @@ pub struct TransactionsManager { /// /// From which we get all new incoming transaction related messages. network_events: UnboundedReceiverStream, - /// All currently pending transactions - pending_transactions: (), - /// All the peers that have sent the same transactions. - peers: HashMap>, + /// All currently pending transactions grouped by peers. + /// + /// This way we can track incoming transactions and prevent multiple pool imports for the same + /// transaction + transactions_by_peers: HashMap>, + /// Transactions that are currently imported into the `Pool` + pool_imports: FuturesUnordered, + /// All the connected peers. + peers: HashMap, /// Send half for the command channel. command_tx: mpsc::UnboundedSender, /// Incoming commands from [`TransactionsHandle`]. @@ -64,7 +82,8 @@ where pool, network, network_events: UnboundedReceiverStream::new(network_events), - pending_transactions: (), + transactions_by_peers: Default::default(), + pool_imports: Default::default(), peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), @@ -76,10 +95,64 @@ where TransactionsHandle { manager_tx: self.command_tx.clone() } } + /// Handles a received event + async fn on_event(&mut self, event: NetworkEvent) { + match event { + NetworkEvent::SessionClosed { peer_id } => { + // remove the peer + self.peers.remove(&peer_id); + } + NetworkEvent::SessionEstablished { peer_id, messages, .. } => { + // insert a new peer + self.peers.insert( + peer_id, + Peer { + transactions: LruCache::new( + NonZeroUsize::new(PEER_TRANSACTION_CACHE_LIMIT).unwrap(), + ), + request_tx: messages, + }, + ); + + // TODO send `NewPooledTransactionHashes + } + NetworkEvent::IncomingTransactions { peer_id, msg } => { + let transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone()); + + if let Some(peer) = self.peers.get_mut(&peer_id) { + for tx in transactions.0 { + // track that the peer knows this transaction + peer.transactions.insert(tx.hash); + + match self.transactions_by_peers.entry(tx.hash) { + Entry::Occupied(mut entry) => { + // transaction was already inserted + entry.get_mut().push(peer_id); + } + Entry::Vacant(_) => { + // TODO import into the pool + } + } + } + } + } + NetworkEvent::IncomingPooledTransactionHashes { .. } => {} + NetworkEvent::GetPooledTransactions { .. } => {} + } + } + /// Executes an endless future pub async fn run(self) {} } +/// Tracks a single peer +struct Peer { + /// Keeps track of transactions that we know the peer has seen. + transactions: LruCache, + /// A communication channel directly to the session task. + request_tx: PeerRequestSender, +} + /// Commands to send to the [`TransactionManager`] enum TransactionsCommand { Propagate(H256), From 391a50944371aed0f07141022a689f5bcac71472 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Nov 2022 16:24:13 +0100 Subject: [PATCH 6/9] feat: add FromRecoveredTransaction conversion trait (#207) --- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/transaction/mod.rs | 32 +++++++++---- crates/transaction-pool/src/test_util/mock.rs | 47 ++++++++++++++++++- crates/transaction-pool/src/traits.rs | 4 +- 4 files changed, 74 insertions(+), 13 deletions(-) diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 4c172e4d3c..aaff276389 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -42,8 +42,8 @@ pub use log::Log; pub use receipt::Receipt; pub use storage::StorageEntry; pub use transaction::{ - AccessList, AccessListItem, Signature, Transaction, TransactionKind, TransactionSigned, - TransactionSignedEcRecovered, TxType, + AccessList, AccessListItem, FromRecoveredTransaction, Signature, Transaction, TransactionKind, + TransactionSigned, TransactionSignedEcRecovered, TxType, }; /// A block hash. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c6953f14ca..6a6dd826b0 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,8 +1,3 @@ -mod access_list; -mod signature; -mod tx_type; -mod util; - use crate::{Address, Bytes, ChainId, TxHash, H256, U256}; pub use access_list::{AccessList, AccessListItem}; use bytes::{Buf, BytesMut}; @@ -13,13 +8,18 @@ use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header, EMPT pub use signature::Signature; pub use tx_type::TxType; +mod access_list; +mod signature; +mod tx_type; +mod util; + /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Transaction { - /// Legacy transaciton. + /// Legacy transaction. Legacy { /// Added as EIP-155: Simple replay attack protection chain_id: Option, @@ -707,10 +707,25 @@ impl TransactionSignedEcRecovered { } } +/// A transaction type that can be created from a [`TransactionSignedEcRecovered`] transaction. +/// +/// This is a conversion trait that'll ensure transactions received via P2P can be converted to the +/// transaction type that the transaction pool uses. +pub trait FromRecoveredTransaction { + /// Converts to this type from the given [`TransactionSignedEcRecovered`]. + fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self; +} + +// Noop conversion +impl FromRecoveredTransaction for TransactionSignedEcRecovered { + #[inline] + fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { + tx + } +} + #[cfg(test)] mod tests { - use std::str::FromStr; - use crate::{ transaction::{signature::Signature, TransactionKind}, AccessList, Address, Bytes, Transaction, TransactionSigned, H256, U256, @@ -718,6 +733,7 @@ mod tests { use bytes::BytesMut; use ethers_core::utils::hex; use reth_rlp::{Decodable, Encodable}; + use std::str::FromStr; #[test] fn test_decode_create() { diff --git a/crates/transaction-pool/src/test_util/mock.rs b/crates/transaction-pool/src/test_util/mock.rs index d5e5d3795e..31da05f5a8 100644 --- a/crates/transaction-pool/src/test_util/mock.rs +++ b/crates/transaction-pool/src/test_util/mock.rs @@ -11,7 +11,10 @@ use rand::{ distributions::{Uniform, WeightedIndex}, prelude::Distribution, }; -use reth_primitives::{Address, TxHash, H256, U256}; +use reth_primitives::{ + Address, FromRecoveredTransaction, Transaction, TransactionSignedEcRecovered, TxHash, H256, + U256, +}; use std::{ops::Range, sync::Arc, time::Instant}; pub type MockTxPool = TxPool; @@ -333,6 +336,48 @@ impl PoolTransaction for MockTransaction { } } +impl FromRecoveredTransaction for MockTransaction { + fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { + let sender = tx.signer(); + let transaction = tx.into_signed(); + let hash = transaction.hash; + match transaction.transaction { + Transaction::Legacy { chain_id, nonce, gas_price, gas_limit, to, value, input } => { + MockTransaction::Legacy { + hash, + sender, + nonce, + gas_price: gas_price.into(), + gas_limit, + value, + } + } + Transaction::Eip1559 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + input, + access_list, + } => MockTransaction::Eip1559 { + hash, + sender, + nonce, + max_fee_per_gas: max_fee_per_gas.into(), + max_priority_fee_per_gas: max_priority_fee_per_gas.into(), + gas_limit, + value, + }, + Transaction::Eip2930 { .. } => { + unimplemented!() + } + } + } +} + #[derive(Default)] pub struct MockTransactionFactory { pub ids: SenderIdentifiers, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 81caa7206c..e07821eebf 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,6 +1,6 @@ use crate::{error::PoolResult, pool::state::SubPool, validate::ValidPoolTransaction, BlockID}; use futures::{channel::mpsc::Receiver, future::Shared}; -use reth_primitives::{Address, TxHash, H256, U256}; +use reth_primitives::{Address, FromRecoveredTransaction, TxHash, H256, U256}; use std::{fmt, sync::Arc}; /// General purpose abstraction fo a transaction-pool. @@ -174,7 +174,7 @@ impl BestTransactions for std::iter::Empty { } /// Trait for transaction types used inside the pool -pub trait PoolTransaction: fmt::Debug + Send + Sync + 'static { +pub trait PoolTransaction: fmt::Debug + Send + Sync + FromRecoveredTransaction { /// Hash of the transaction. fn hash(&self) -> &TxHash; From f0388e403274fad6957c1d30f7a457d6e931a2d6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Nov 2022 18:44:07 +0100 Subject: [PATCH 7/9] refactor: move BlockHashOrNumber to primitives (#203) --- crates/interfaces/src/p2p/headers/client.rs | 5 +- .../interfaces/src/p2p/headers/downloader.rs | 8 +-- crates/net/eth-wire/src/types/blocks.rs | 59 ++---------------- crates/net/headers-downloaders/src/linear.rs | 9 ++- crates/primitives/src/block.rs | 60 +++++++++++++++++++ crates/primitives/src/lib.rs | 2 +- 6 files changed, 75 insertions(+), 68 deletions(-) diff --git a/crates/interfaces/src/p2p/headers/client.rs b/crates/interfaces/src/p2p/headers/client.rs index 3416006035..5ff872887c 100644 --- a/crates/interfaces/src/p2p/headers/client.rs +++ b/crates/interfaces/src/p2p/headers/client.rs @@ -1,8 +1,9 @@ use crate::p2p::MessageStream; -use reth_primitives::{rpc::BlockId, Header, H256, H512}; +use reth_primitives::{Header, H256, H512}; use async_trait::async_trait; +use reth_primitives::BlockHashOrNumber; use std::{collections::HashSet, fmt::Debug}; /// Each peer returns a list of headers and the request id corresponding @@ -31,7 +32,7 @@ impl From<(u64, Vec
)> for HeadersResponse { #[derive(Clone, Debug)] pub struct HeadersRequest { /// The starting block - pub start: BlockId, + pub start: BlockHashOrNumber, /// The response max size pub limit: u64, /// Flag indicating whether the blocks should diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/interfaces/src/p2p/headers/downloader.rs index 2657b37be7..97fbe9c5b2 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/interfaces/src/p2p/headers/downloader.rs @@ -1,9 +1,7 @@ use super::client::{HeadersClient, HeadersRequest, HeadersStream}; -use crate::consensus::Consensus; - -use crate::p2p::headers::error::DownloadError; +use crate::{consensus::Consensus, p2p::headers::error::DownloadError}; use async_trait::async_trait; -use reth_primitives::{rpc::BlockId, Header, SealedHeader}; +use reth_primitives::{BlockHashOrNumber, Header, SealedHeader}; use reth_rpc_types::engine::ForkchoiceState; use std::time::Duration; use tokio_stream::StreamExt; @@ -46,7 +44,7 @@ pub trait HeaderDownloader: Sync + Send { async fn download_headers( &self, stream: &mut HeadersStream, - start: BlockId, + start: BlockHashOrNumber, limit: u64, ) -> Result, DownloadError> { let request_id = rand::random(); diff --git a/crates/net/eth-wire/src/types/blocks.rs b/crates/net/eth-wire/src/types/blocks.rs index 7f90bf6c90..b5b00a6cc2 100644 --- a/crates/net/eth-wire/src/types/blocks.rs +++ b/crates/net/eth-wire/src/types/blocks.rs @@ -1,59 +1,8 @@ //! Implements the `GetBlockHeaders`, `GetBlockBodies`, `BlockHeaders`, and `BlockBodies` message //! types. -use reth_primitives::{Header, TransactionSigned, H256}; -use reth_rlp::{ - Decodable, DecodeError, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, - RlpEncodableWrapper, -}; - use super::RawBlockBody; - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// Either a block hash _or_ a block number -pub enum BlockHashOrNumber { - /// A block hash - Hash(H256), - /// A block number - Number(u64), -} - -/// Allows for RLP encoding of either a block hash or block number -impl Encodable for BlockHashOrNumber { - fn length(&self) -> usize { - match self { - Self::Hash(block_hash) => block_hash.length(), - Self::Number(block_number) => block_number.length(), - } - } - fn encode(&self, out: &mut dyn bytes::BufMut) { - match self { - Self::Hash(block_hash) => block_hash.encode(out), - Self::Number(block_number) => block_number.encode(out), - } - } -} - -/// Allows for RLP decoding of a block hash or block number -impl Decodable for BlockHashOrNumber { - fn decode(buf: &mut &[u8]) -> Result { - let header: u8 = *buf.first().ok_or(DecodeError::InputTooShort)?; - // if the byte string is exactly 32 bytes, decode it into a Hash - // 0xa0 = 0x80 (start of string) + 0x20 (32, length of string) - if header == 0xa0 { - // strip the first byte, parsing the rest of the string. - // If the rest of the string fails to decode into 32 bytes, we'll bubble up the - // decoding error. - let hash = H256::decode(buf)?; - Ok(Self::Hash(hash)) - } else { - // a block number when encoded as bytes ranges from 0 to any number of bytes - we're - // going to accept numbers which fit in less than 64 bytes. - // Any data larger than this which is not caught by the Hash decoding should error and - // is considered an invalid block number. - Ok(Self::Number(u64::decode(buf)?)) - } - } -} +use reth_primitives::{BlockHashOrNumber, Header, TransactionSigned, H256}; +use reth_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -152,11 +101,11 @@ mod test { }; use hex_literal::hex; use reth_primitives::{ - Header, Signature, Transaction, TransactionKind, TransactionSigned, U256, + BlockHashOrNumber, Header, Signature, Transaction, TransactionKind, TransactionSigned, U256, }; use reth_rlp::{Decodable, Encodable}; - use super::{BlockBody, BlockHashOrNumber}; + use super::BlockBody; #[test] fn decode_hash() { diff --git a/crates/net/headers-downloaders/src/linear.rs b/crates/net/headers-downloaders/src/linear.rs index b52718d0ed..b2a2d34ca0 100644 --- a/crates/net/headers-downloaders/src/linear.rs +++ b/crates/net/headers-downloaders/src/linear.rs @@ -9,7 +9,7 @@ use reth_interfaces::{ error::DownloadError, }, }; -use reth_primitives::{rpc::BlockId, SealedHeader}; +use reth_primitives::SealedHeader; use reth_rpc_types::engine::ForkchoiceState; /// Download headers in batches @@ -101,8 +101,7 @@ impl LinearDownloader { ) -> Result { // Request headers starting from tip or earliest cached let start = earliest.map_or(forkchoice.head_block_hash, |h| h.parent_hash); - let mut headers = - self.download_headers(stream, BlockId::Hash(start), self.batch_size).await?; + let mut headers = self.download_headers(stream, start.into(), self.batch_size).await?; headers.sort_unstable_by_key(|h| h.number); let mut out = Vec::with_capacity(headers.len()); @@ -207,7 +206,7 @@ mod tests { TestConsensus, TestHeadersClient, }, }; - use reth_primitives::{rpc::BlockId, SealedHeader}; + use reth_primitives::{BlockHashOrNumber, SealedHeader}; use assert_matches::assert_matches; use once_cell::sync::Lazy; @@ -301,7 +300,7 @@ mod tests { assert_matches!( request, Some((_, HeadersRequest { start, .. })) - if matches!(start, BlockId::Hash(hash) if *hash == tip_hash) + if matches!(start, BlockHashOrNumber::Hash(hash) if *hash == tip_hash) ); let request = request.unwrap(); diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index f826880fac..a48fc12cf8 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,4 +1,5 @@ use crate::{Header, Receipt, SealedHeader, Transaction, TransactionSigned, H256}; +use reth_rlp::{Decodable, DecodeError, Encodable}; use std::ops::Deref; /// Ethereum full block. @@ -47,3 +48,62 @@ impl Deref for BlockLocked { self.header.as_ref() } } + +/// Either a block hash _or_ a block number +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum BlockHashOrNumber { + /// A block hash + Hash(H256), + /// A block number + Number(u64), +} + +impl From for BlockHashOrNumber { + fn from(value: H256) -> Self { + BlockHashOrNumber::Hash(value) + } +} + +impl From for BlockHashOrNumber { + fn from(value: u64) -> Self { + BlockHashOrNumber::Number(value) + } +} + +/// Allows for RLP encoding of either a block hash or block number +impl Encodable for BlockHashOrNumber { + fn length(&self) -> usize { + match self { + Self::Hash(block_hash) => block_hash.length(), + Self::Number(block_number) => block_number.length(), + } + } + fn encode(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Hash(block_hash) => block_hash.encode(out), + Self::Number(block_number) => block_number.encode(out), + } + } +} + +/// Allows for RLP decoding of a block hash or block number +impl Decodable for BlockHashOrNumber { + fn decode(buf: &mut &[u8]) -> Result { + let header: u8 = *buf.first().ok_or(DecodeError::InputTooShort)?; + // if the byte string is exactly 32 bytes, decode it into a Hash + // 0xa0 = 0x80 (start of string) + 0x20 (32, length of string) + if header == 0xa0 { + // strip the first byte, parsing the rest of the string. + // If the rest of the string fails to decode into 32 bytes, we'll bubble up the + // decoding error. + let hash = H256::decode(buf)?; + Ok(Self::Hash(hash)) + } else { + // a block number when encoded as bytes ranges from 0 to any number of bytes - we're + // going to accept numbers which fit in less than 64 bytes. + // Any data larger than this which is not caught by the Hash decoding should error and + // is considered an invalid block number. + Ok(Self::Number(u64::decode(buf)?)) + } + } +} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index aaff276389..e810143dc1 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -29,7 +29,7 @@ mod transaction; pub mod proofs; pub use account::Account; -pub use block::{Block, BlockLocked}; +pub use block::{Block, BlockHashOrNumber, BlockLocked}; pub use chain::Chain; pub use constants::MAINNET_GENESIS; pub use forkid::{ForkFilter, ForkHash, ForkId, ValidationError}; From f8fddcdfa4e72d140c94dcdb5add5604f5c01708 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Nov 2022 22:27:41 +0100 Subject: [PATCH 8/9] feat(p2p): integrate txpool in p2p (#208) --- Cargo.lock | 3 +- crates/net/network/src/network.rs | 2 +- crates/net/network/src/transactions.rs | 245 ++++++++++++++++--- crates/primitives/src/lib.rs | 4 +- crates/primitives/src/transaction/mod.rs | 18 ++ crates/transaction-pool/Cargo.toml | 3 +- crates/transaction-pool/src/lib.rs | 21 +- crates/transaction-pool/src/pool/listener.rs | 4 +- crates/transaction-pool/src/pool/mod.rs | 34 ++- crates/transaction-pool/src/pool/txpool.rs | 14 +- crates/transaction-pool/src/traits.rs | 18 +- 11 files changed, 305 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c077783af8..3fececbd57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3465,7 +3465,7 @@ dependencies = [ "async-trait", "bitflags", "fnv", - "futures", + "futures-util", "linked-hash-map", "parking_lot 0.12.1", "paste", @@ -3473,6 +3473,7 @@ dependencies = [ "reth-primitives", "serde", "thiserror", + "tokio", "tracing", ] diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 9c8f079de7..3b797c9f4c 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -50,7 +50,7 @@ impl NetworkHandle { } /// Sends a [`NetworkHandleMessage`] to the manager - fn send_message(&self, msg: NetworkHandleMessage) { + pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { let _ = self.inner.to_manager_tx.send(msg); } diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 9a0a07630b..d952439bec 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -1,24 +1,35 @@ //! Transaction management for the p2p network. -use crate::{cache::LruCache, manager::NetworkEvent, message::PeerRequestSender, NetworkHandle}; -use futures::stream::FuturesUnordered; -use reth_primitives::{PeerId, Transaction, H256}; -use reth_transaction_pool::TransactionPool; +use crate::{ + cache::LruCache, + manager::NetworkEvent, + message::{PeerRequest, PeerRequestSender}, + network::NetworkHandleMessage, + NetworkHandle, +}; +use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; +use reth_eth_wire::{GetPooledTransactions, NewPooledTransactionHashes, PooledTransactions}; +use reth_interfaces::p2p::error::RequestResult; +use reth_primitives::{ + FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, H256, +}; +use reth_transaction_pool::{error::PoolResult, TransactionPool}; use std::{ collections::{hash_map::Entry, HashMap}, future::Future, num::NonZeroUsize, pin::Pin, sync::Arc, + task::{Context, Poll}, }; -use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio::sync::{mpsc, oneshot, oneshot::Sender}; +use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; /// Cache limit of transactions to keep track of for a single peer. -const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024; +const PEER_TRANSACTION_CACHE_LIMIT: usize = 1024 * 10; /// The future for inserting a function into the pool -pub type PoolImportFuture = Pin + Send>>; +pub type PoolImportFuture = Pin> + Send + 'static>>; /// Api to interact with [`TransactionsManager`] task. pub struct TransactionsHandle { @@ -52,11 +63,13 @@ pub struct TransactionsManager { /// /// From which we get all new incoming transaction related messages. network_events: UnboundedReceiverStream, + /// All currently active requests for pooled transactions. + inflight_requests: Vec, /// All currently pending transactions grouped by peers. /// /// This way we can track incoming transactions and prevent multiple pool imports for the same /// transaction - transactions_by_peers: HashMap>, + transactions_by_peers: HashMap>, /// Transactions that are currently imported into the `Pool` pool_imports: FuturesUnordered, /// All the connected peers. @@ -65,28 +78,36 @@ pub struct TransactionsManager { command_tx: mpsc::UnboundedSender, /// Incoming commands from [`TransactionsHandle`]. command_rx: UnboundedReceiverStream, + /// Incoming commands from [`TransactionsHandle`]. + pending_transactions: ReceiverStream, } // === impl TransactionsManager === impl TransactionsManager where - Pool: TransactionPool, + Pool: TransactionPool + Clone, + ::Transaction: IntoRecoveredTransaction, { /// Sets up a new instance. pub fn new(network: NetworkHandle, pool: Pool) -> Self { let network_events = network.event_listener(); let (command_tx, command_rx) = mpsc::unbounded_channel(); + // install a listener for new transactions + let pending = pool.pending_transactions_listener(); + Self { pool, network, network_events: UnboundedReceiverStream::new(network_events), + inflight_requests: Default::default(), transactions_by_peers: Default::default(), pool_imports: Default::default(), peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), + pending_transactions: ReceiverStream::new(pending), } } @@ -95,8 +116,63 @@ where TransactionsHandle { manager_tx: self.command_tx.clone() } } + /// Request handler for an incoming request for transactions + fn on_get_pooled_transactions( + &mut self, + peer_id: PeerId, + request: GetPooledTransactions, + response: Sender>, + ) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + let transactions = self + .pool + .get_all(request.0) + .into_iter() + .map(|tx| tx.transaction.to_recovered_transaction().into_signed()) + .collect::>(); + + // we sent a response at which point we assume that the peer is aware of the transaction + peer.transactions.extend(transactions.iter().map(|tx| tx.hash())); + + let resp = PooledTransactions(transactions); + let _ = response.send(Ok(resp)); + } + } + + /// Request handler for an incoming `NewPooledTransactionHashes` + fn on_new_pooled_transactions( + &mut self, + peer_id: PeerId, + msg: Arc, + ) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + let mut transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone()).0; + + // keep track of the transactions the peer knows + peer.transactions.extend(transactions.clone()); + + self.pool.retain_unknown(&mut transactions); + + if transactions.is_empty() { + // nothing to request + return + } + + // request the missing transactions + let (response, rx) = oneshot::channel(); + let req = PeerRequest::GetPooledTransactions { + request: GetPooledTransactions(transactions), + response, + }; + + if peer.request_tx.try_send(req).is_ok() { + self.inflight_requests.push(GetPooledTxRequest { peer_id, response: rx }) + } + } + } + /// Handles a received event - async fn on_event(&mut self, event: NetworkEvent) { + fn on_event(&mut self, event: NetworkEvent) { match event { NetworkEvent::SessionClosed { peer_id } => { // remove the peer @@ -114,35 +190,140 @@ where }, ); - // TODO send `NewPooledTransactionHashes + // Send a `NewPooledTransactionHashes` to the peer with _all_ transactions in the + // pool + let msg = Arc::new(NewPooledTransactionHashes(self.pool.pooled_transactions())); + self.network.send_message(NetworkHandleMessage::SendPooledTransactionHashes { + peer_id, + msg, + }) } NetworkEvent::IncomingTransactions { peer_id, msg } => { let transactions = Arc::try_unwrap(msg).unwrap_or_else(|arc| (*arc).clone()); - - if let Some(peer) = self.peers.get_mut(&peer_id) { - for tx in transactions.0 { - // track that the peer knows this transaction - peer.transactions.insert(tx.hash); - - match self.transactions_by_peers.entry(tx.hash) { - Entry::Occupied(mut entry) => { - // transaction was already inserted - entry.get_mut().push(peer_id); - } - Entry::Vacant(_) => { - // TODO import into the pool - } - } - } + self.import_transactions(peer_id, transactions.0); + } + NetworkEvent::IncomingPooledTransactionHashes { peer_id, msg } => { + self.on_new_pooled_transactions(peer_id, msg) + } + NetworkEvent::GetPooledTransactions { peer_id, request, response } => { + if let Ok(response) = Arc::try_unwrap(response) { + // TODO(mattsse): there should be a dedicated channel for the transaction + // manager instead + self.on_get_pooled_transactions(peer_id, request, response) } } - NetworkEvent::IncomingPooledTransactionHashes { .. } => {} - NetworkEvent::GetPooledTransactions { .. } => {} } } - /// Executes an endless future - pub async fn run(self) {} + /// Starts the import process for the given transactions. + fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + for tx in transactions { + // recover transaction + let tx = if let Some(tx) = tx.into_ecrecovered() { + tx + } else { + // TODO: report peer? + continue + }; + + // track that the peer knows this transaction + peer.transactions.insert(tx.hash); + + match self.transactions_by_peers.entry(tx.hash) { + Entry::Occupied(mut entry) => { + // transaction was already inserted + entry.get_mut().push(peer_id); + } + Entry::Vacant(entry) => { + // this is a new transaction that should be imported into the pool + let pool_transaction = ::from_recovered_transaction(tx); + + let pool = self.pool.clone(); + let import = Box::pin(async move { + pool.add_external_transaction(pool_transaction).await + }); + + self.pool_imports.push(import); + entry.insert(vec![peer_id]); + } + } + } + } + } + + fn on_good_import(&mut self, hash: TxHash) { + if let Some(_peers) = self.transactions_by_peers.remove(&hash) { + // TODO report good peer? + } + } + + fn on_bad_import(&mut self, hash: TxHash) { + if let Some(_peers) = self.transactions_by_peers.remove(&hash) { + // TODO report bad peer? + } + } +} + +/// An endless future. +/// +/// This should be spawned or used as part of `tokio::select!`. +impl Future for TransactionsManager +where + Pool: TransactionPool + Clone + Unpin, + ::Transaction: IntoRecoveredTransaction, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Advance all imports + while let Poll::Ready(Some(import_res)) = this.pool_imports.poll_next_unpin(cx) { + match import_res { + Ok(hash) => { + this.on_good_import(hash); + } + Err(err) => { + this.on_bad_import(*err.hash()); + } + } + } + + // handle new transactions + while let Poll::Ready(Some(_hash)) = this.pending_transactions.poll_next_unpin(cx) { + // TODO(mattsse): propagate new transactions + } + + // Advance all requests. + // We remove each request one by one and add them back. + for idx in (0..this.inflight_requests.len()).rev() { + let mut req = this.inflight_requests.swap_remove(idx); + match req.response.poll_unpin(cx) { + Poll::Pending => { + this.inflight_requests.push(req); + } + Poll::Ready(Ok(Ok(txs))) => { + this.import_transactions(req.peer_id, txs.0); + } + Poll::Ready(Ok(Err(_))) => { + // TODO report bad peer + } + Poll::Ready(Err(_)) => { + // TODO report bad peer + } + } + } + + Poll::Pending + } +} + +/// An inflight request for `PooledTransactions` from a peer +#[allow(missing_docs)] +struct GetPooledTxRequest { + peer_id: PeerId, + response: oneshot::Receiver>, } /// Tracks a single peer diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index e810143dc1..a513e468e1 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -42,8 +42,8 @@ pub use log::Log; pub use receipt::Receipt; pub use storage::StorageEntry; pub use transaction::{ - AccessList, AccessListItem, FromRecoveredTransaction, Signature, Transaction, TransactionKind, - TransactionSigned, TransactionSignedEcRecovered, TxType, + AccessList, AccessListItem, FromRecoveredTransaction, IntoRecoveredTransaction, Signature, + Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxType, }; /// A block hash. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 6a6dd826b0..5915959872 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -571,6 +571,8 @@ impl TransactionSigned { } /// Recover signer from signature and hash. + /// + /// Returns `None` if the transaction's signature is invalid. pub fn recover_signer(&self) -> Option
{ let signature_hash = self.signature_hash(); self.signature.recover_signer(signature_hash) @@ -724,6 +726,22 @@ impl FromRecoveredTransaction for TransactionSignedEcRecovered { } } +/// The inverse of [`FromRecoveredTransaction`] that ensure the transaction can be sent over the +/// network +pub trait IntoRecoveredTransaction { + /// Converts to this type into a [`TransactionSignedEcRecovered`]. + /// + /// Note: this takes `&self` since indented usage is via `Arc`. + fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered; +} + +impl IntoRecoveredTransaction for TransactionSignedEcRecovered { + #[inline] + fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { + self.clone() + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index a739870e0a..eb1ee3276f 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -16,8 +16,9 @@ reth-primitives = { path = "../primitives" } # async/futures async-trait = "0.1" -futures = "0.3" +futures-util = "0.3" parking_lot = "0.12" +tokio = { version = "1", default-features = false, features = ["sync"] } # misc aquamarine = "0.1" # docs diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8a80d6d478..a29554212f 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -87,9 +87,9 @@ use crate::{ traits::{NewTransactionEvent, PoolStatus, TransactionOrigin}, validate::ValidPoolTransaction, }; -use futures::channel::mpsc::Receiver; use reth_primitives::{BlockID, TxHash, U256, U64}; use std::{collections::HashMap, sync::Arc}; +use tokio::sync::mpsc::Receiver; mod config; pub mod error; @@ -131,11 +131,12 @@ where origin: TransactionOrigin, transactions: impl IntoIterator, ) -> PoolResult>> { - let outcome = - futures::future::join_all(transactions.into_iter().map(|tx| self.validate(origin, tx))) - .await - .into_iter() - .collect::>(); + let outcome = futures_util::future::join_all( + transactions.into_iter().map(|tx| self.validate(origin, tx)), + ) + .await + .into_iter() + .collect::>(); Ok(outcome) } @@ -209,6 +210,10 @@ where self.pool.add_transaction_listener() } + fn pooled_transactions(&self) -> Vec { + self.pool.pooled_transactions() + } + fn best_transactions( &self, ) -> Box>>> { @@ -222,6 +227,10 @@ where todo!() } + fn retain_unknown(&self, hashes: &mut Vec) { + self.pool.retain_unknown(hashes) + } + fn get(&self, tx_hash: &TxHash) -> Option>> { self.inner().get(tx_hash) } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index b9d2592134..05c84261f8 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -1,9 +1,9 @@ //! Listeners for the transaction-pool use crate::pool::events::TransactionEvent; -use futures::channel::mpsc::UnboundedSender; use reth_primitives::H256; use std::{collections::HashMap, hash}; +use tokio::sync::mpsc::UnboundedSender; type EventSink = UnboundedSender>; @@ -75,7 +75,7 @@ struct PoolEventNotifier { impl PoolEventNotifier { fn notify(&mut self, event: TransactionEvent) { - self.senders.retain(|sender| sender.unbounded_send(event.clone()).is_ok()) + self.senders.retain(|sender| sender.send(event.clone()).is_ok()) } fn is_done(&self) -> bool { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index eb6dfda91a..6d8c3c4074 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -73,7 +73,6 @@ use crate::{ }; use best::BestTransactions; pub use events::TransactionEvent; -use futures::channel::mpsc::{channel, Receiver, Sender}; use parking_lot::{Mutex, RwLock}; use reth_primitives::{Address, TxHash, H256}; use std::{ @@ -81,6 +80,7 @@ use std::{ sync::Arc, time::Instant, }; +use tokio::sync::mpsc; use tracing::warn; mod best; @@ -107,9 +107,9 @@ pub struct PoolInner { /// Manages listeners for transaction state change events. event_listener: RwLock>, /// Listeners for new ready transactions. - pending_transaction_listener: Mutex>>, + pending_transaction_listener: Mutex>>, /// Listeners for new transactions added to the pool. - transaction_listener: Mutex>>>, + transaction_listener: Mutex>>>, } // === impl PoolInner === @@ -149,17 +149,23 @@ where /// Adds a new transaction listener to the pool that gets notified about every new _ready_ /// transaction - pub fn add_pending_listener(&self) -> Receiver { + pub fn add_pending_listener(&self) -> mpsc::Receiver { const TX_LISTENER_BUFFER_SIZE: usize = 2048; - let (tx, rx) = channel(TX_LISTENER_BUFFER_SIZE); + let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); self.pending_transaction_listener.lock().push(tx); rx } + /// Returns hashes of _all_ transactions in the pool. + pub(crate) fn pooled_transactions(&self) -> Vec { + let pool = self.pool.read(); + pool.all().hashes_iter().collect() + } + /// Adds a new transaction listener to the pool that gets notified about every new transaction - pub fn add_transaction_listener(&self) -> Receiver> { + pub fn add_transaction_listener(&self) -> mpsc::Receiver> { const TX_LISTENER_BUFFER_SIZE: usize = 1024; - let (tx, rx) = channel(TX_LISTENER_BUFFER_SIZE); + let (tx, rx) = mpsc::channel(TX_LISTENER_BUFFER_SIZE); self.transaction_listener.lock().push(tx); rx } @@ -256,8 +262,8 @@ where let mut transaction_listeners = self.pending_transaction_listener.lock(); transaction_listeners.retain_mut(|listener| match listener.try_send(*ready) { Ok(()) => true, - Err(e) => { - if e.is_full() { + Err(err) => { + if matches!(err, mpsc::error::TrySendError::Full(_)) { warn!( target: "txpool", "[{:?}] dropping full ready transaction listener", @@ -277,8 +283,8 @@ where transaction_listeners.retain_mut(|listener| match listener.try_send(event.clone()) { Ok(()) => true, - Err(e) => { - if e.is_full() { + Err(err) => { + if matches!(err, mpsc::error::TrySendError::Full(_)) { warn!( target: "txpool", "dropping full transaction listener", @@ -325,6 +331,12 @@ where self.pool.read().best_transactions() } + /// Removes all transactions transactions that are missing in the pool. + pub(crate) fn retain_unknown(&self, hashes: &mut Vec) { + let pool = self.pool.read(); + hashes.retain(|tx| !pool.contains(tx)) + } + /// Returns the transaction by hash. pub(crate) fn get( &self, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 391496c576..0cbc6ed121 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -102,6 +102,11 @@ impl TxPool { } } + /// Returns access to the [`AllTransactions`] container. + pub(crate) fn all(&self) -> &AllTransactions { + &self.all_transactions + } + /// Returns stats about the pool. pub(crate) fn status(&self) -> PoolStatus { PoolStatus { @@ -417,10 +422,6 @@ impl TxPool { #[cfg(test)] #[allow(missing_docs)] impl TxPool { - pub(crate) fn all(&self) -> &AllTransactions { - &self.all_transactions - } - pub(crate) fn pending(&self) -> &PendingPool { &self.pending_pool } @@ -463,6 +464,11 @@ impl AllTransactions { Self { max_account_slots, ..Default::default() } } + /// Returns an iterator over all _unique_ hashes in the pool + pub(crate) fn hashes_iter(&self) -> impl Iterator + '_ { + self.by_hash.keys().copied() + } + /// Returns if the transaction for the given hash is already included in this pool pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool { self.by_hash.contains_key(tx_hash) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index e07821eebf..47b4790646 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,7 +1,7 @@ use crate::{error::PoolResult, pool::state::SubPool, validate::ValidPoolTransaction, BlockID}; -use futures::{channel::mpsc::Receiver, future::Shared}; use reth_primitives::{Address, FromRecoveredTransaction, TxHash, H256, U256}; use std::{fmt, sync::Arc}; +use tokio::sync::mpsc::Receiver; /// General purpose abstraction fo a transaction-pool. /// @@ -27,6 +27,8 @@ pub trait TransactionPool: Send + Sync + 'static { /// /// This is intended to be used by the network to insert incoming transactions received over the /// p2p network. + /// + /// Consumer: P2P async fn add_external_transaction(&self, transaction: Self::Transaction) -> PoolResult { self.add_transaction(TransactionOrigin::External, transaction).await } @@ -59,6 +61,13 @@ pub trait TransactionPool: Send + Sync + 'static { /// Returns a new stream that yields new valid transactions added to the pool. fn transactions_listener(&self) -> Receiver>; + /// Returns hashes of all transactions in the pool. + /// + /// Note: This returns a `Vec` but should guarantee that all hashes are unique. + /// + /// Consumer: P2P + fn pooled_transactions(&self) -> Vec; + /// Returns an iterator that yields transactions that are ready for block production. /// /// Consumer: Block production @@ -76,6 +85,13 @@ pub trait TransactionPool: Send + Sync + 'static { tx_hashes: &[TxHash], ) -> Vec>>; + /// Retains only those hashes that are unknown to the pool. + /// In other words, removes all transactions from the given set that are currently present in + /// the pool. + /// + /// Consumer: P2P + fn retain_unknown(&self, hashes: &mut Vec); + /// Returns if the transaction for the given hash is already included in this pool. fn contains(&self, tx_hash: &TxHash) -> bool { self.get(tx_hash).is_some() From 6b336c62fbdb264979709423e9f2ac42be838fe7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Nov 2022 23:33:03 +0100 Subject: [PATCH 9/9] feat: add blog propagation handlers (#205) --- crates/net/eth-wire/src/types/broadcast.rs | 31 ++++++++++++++ crates/net/network/src/fetch.rs | 18 ++++++++- crates/net/network/src/import.rs | 26 +++++++++++- crates/net/network/src/manager.rs | 25 ++++++++++-- crates/net/network/src/message.rs | 11 ++++- crates/net/network/src/state.rs | 47 +++++++++++++++++++++- crates/net/network/src/swarm.rs | 4 ++ 7 files changed, 151 insertions(+), 11 deletions(-) diff --git a/crates/net/eth-wire/src/types/broadcast.rs b/crates/net/eth-wire/src/types/broadcast.rs index 601aaac380..a80dc23789 100644 --- a/crates/net/eth-wire/src/types/broadcast.rs +++ b/crates/net/eth-wire/src/types/broadcast.rs @@ -10,6 +10,20 @@ pub struct NewBlockHashes( pub Vec, ); +// === impl NewBlockHashes === + +impl NewBlockHashes { + /// Returns the latest block in the list of blocks. + pub fn latest(&self) -> Option<&BlockHashNumber> { + self.0.iter().fold(None, |latest, block| { + if let Some(latest) = latest { + return if latest.number > block.number { Some(latest) } else { Some(block) } + } + Some(block) + }) + } +} + /// A block hash _and_ a block number. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)] pub struct BlockHashNumber { @@ -87,3 +101,20 @@ impl From> for NewPooledTransactionHashes { NewPooledTransactionHashes(v) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn can_return_latest_block() { + let mut blocks = NewBlockHashes(vec![BlockHashNumber { hash: H256::random(), number: 0 }]); + let latest = blocks.latest().unwrap(); + assert_eq!(latest.number, 0); + + blocks.0.push(BlockHashNumber { hash: H256::random(), number: 100 }); + blocks.0.push(BlockHashNumber { hash: H256::random(), number: 2 }); + let latest = blocks.latest().unwrap(); + assert_eq!(latest.number, 100); + } +} diff --git a/crates/net/network/src/fetch.rs b/crates/net/network/src/fetch.rs index 151051b761..f732fd2e07 100644 --- a/crates/net/network/src/fetch.rs +++ b/crates/net/network/src/fetch.rs @@ -43,7 +43,7 @@ impl StateFetcher { &mut self, peer_id: PeerId, best_hash: H256, - best_number: Option, + best_number: u64, ) { self.peers.insert(peer_id, Peer { state: PeerState::Idle, best_hash, best_number }); } @@ -61,6 +61,20 @@ impl StateFetcher { } } + /// Updates the block information for the peer. + /// + /// Returns `true` if this a newer block + pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: H256, number: u64) -> bool { + if let Some(peer) = self.peers.get_mut(peer_id) { + if number > peer.best_number { + peer.best_hash = hash; + peer.best_number = number; + return true + } + } + false + } + /// Invoked when an active session is about to be disconnected. pub(crate) fn on_pending_disconnect(&mut self, peer_id: &PeerId) { if let Some(peer) = self.peers.get_mut(peer_id) { @@ -246,7 +260,7 @@ struct Peer { /// Best known hash that the peer has best_hash: H256, /// Tracks the best number of the peer. - best_number: Option, + best_number: u64, } /// Tracks the state of an individual peer diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 3a54d76dd2..b5fb3342ec 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -22,11 +22,33 @@ pub struct BlockImportOutcome { /// Sender of the `NewBlock` message. pub peer: PeerId, /// The result after validating the block - pub result: Result, + pub result: Result, +} + +/// Represents the successful validation of a received `NewBlock` message. +#[derive(Debug)] +pub enum BlockValidation { + /// Basic Header validity check, after which the block should be relayed to peers via a + /// `NewBlock` message + ValidHeader { + /// received block + block: NewBlockMessage, + }, + /// Successfully imported: state-root matches after execution. The block should be relayed via + /// `NewBlockHashes` + ValidBlock { + /// validated block. + block: NewBlockMessage, + }, } /// Represents the error case of a failed block import -pub enum BlockImportError {} +#[derive(Debug, thiserror::Error)] +pub enum BlockImportError { + /// Consensus error + #[error(transparent)] + Consensus(#[from] reth_interfaces::consensus::Error), +} /// An implementation of `BlockImport` that does nothing #[derive(Debug, Default)] diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 0c5bc208e4..399c9eaf1b 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -19,7 +19,7 @@ use crate::{ config::NetworkConfig, discovery::Discovery, error::NetworkError, - import::{BlockImport, BlockImportOutcome}, + import::{BlockImport, BlockImportOutcome, BlockValidation}, listener::ConnectionListener, message::{NewBlockMessage, PeerMessage, PeerRequest, PeerRequestSender}, network::{NetworkHandle, NetworkHandleMessage}, @@ -192,10 +192,30 @@ where } } + /// Invoked after a `NewBlock` message from the peer was validated + fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { + let BlockImportOutcome { peer, result } = outcome; + match result { + Ok(validated_block) => match validated_block { + BlockValidation::ValidHeader { block } => { + self.swarm.state_mut().update_peer_block(&peer, block.hash, block.number()); + self.swarm.state_mut().announce_new_block(block); + } + BlockValidation::ValidBlock { block } => { + self.swarm.state_mut().announce_new_block_hash(block); + } + }, + Err(_err) => { + // TODO report peer for bad block + } + } + } + /// Handles a received Message from the peer. fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(hashes) => { + let hashes = Arc::try_unwrap(hashes).unwrap_or_else(|arc| (*arc).clone()); // update peer's state, to track what blocks this peer has seen self.swarm.state_mut().on_new_block_hashes(peer_id, hashes.0) } @@ -240,9 +260,6 @@ where .send_message(&peer_id, PeerMessage::PooledTransactions(msg)), } } - - /// Invoked after a `NewBlock` message from the peer was validated - fn on_block_import_result(&mut self, _outcome: BlockImportOutcome) {} } impl Future for NetworkManager diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 20abe492e3..7613cac777 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -26,11 +26,20 @@ pub struct NewBlockMessage { pub block: Arc, } +// === impl NewBlockMessage === + +impl NewBlockMessage { + /// Returns the block number of the block + pub fn number(&self) -> u64 { + self.block.block.header.number + } +} + /// Represents all messages that can be sent to a peer session #[derive(Debug)] pub enum PeerMessage { /// Announce new block hashes - NewBlockHashes(NewBlockHashes), + NewBlockHashes(Arc), /// Broadcast new block. NewBlock(NewBlockMessage), /// Broadcast transactions. diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 38645098a7..a27ac007c3 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -10,7 +10,7 @@ use crate::{ }, peers::{PeerAction, PeersManager}, }; -use reth_eth_wire::{capability::Capabilities, BlockHashNumber, Status}; +use reth_eth_wire::{capability::Capabilities, BlockHashNumber, NewBlockHashes, Status}; use reth_interfaces::provider::BlockProvider; use reth_primitives::{PeerId, H256}; use std::{ @@ -93,7 +93,8 @@ where debug_assert!(self.connected_peers.contains_key(&peer), "Already connected; not possible"); // find the corresponding block number - let block_number = self.client.block_number(status.blockhash).ok().flatten(); + let block_number = + self.client.block_number(status.blockhash).ok().flatten().unwrap_or_default(); self.state_fetcher.new_connected_peer(peer, status.blockhash, block_number); self.connected_peers.insert( @@ -129,6 +130,7 @@ where // number of peers) let num_propagate = (self.connected_peers.len() as f64).sqrt() as u64 + 1; + let number = msg.block.block.header.number; let mut count = 0; for (peer_id, peer) in self.connected_peers.iter_mut() { if peer.blocks.contains(&msg.hash) { @@ -141,6 +143,11 @@ where self.queued_messages .push_back(StateAction::NewBlock { peer_id: *peer_id, block: msg.clone() }); + // update peer block info + if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) { + peer.best_hash = msg.hash; + } + // mark the block as seen by the peer peer.blocks.insert(msg.hash); @@ -153,6 +160,36 @@ where } } + /// Completes the block propagation process started in [`NetworkState::announce_new_block()`] + /// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet. + pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { + let number = msg.block.block.header.number; + let hashes = Arc::new(NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }])); + for (peer_id, peer) in self.connected_peers.iter_mut() { + if peer.blocks.contains(&msg.hash) { + // skip peers which already reported the block + continue + } + + if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) { + peer.best_hash = msg.hash; + } + + self.queued_messages.push_back(StateAction::NewBlockHashes { + peer_id: *peer_id, + hashes: Arc::clone(&hashes), + }); + } + } + + /// Updates the block information for the peer. + pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: H256, number: u64) { + if let Some(peer) = self.connected_peers.get_mut(peer_id) { + peer.best_hash = hash; + } + self.state_fetcher.update_peer_block(peer_id, hash, number); + } + /// Invoked after a `NewBlock` message was received by the peer. /// /// This will keep track of blocks we know a peer has @@ -342,6 +379,12 @@ pub enum StateAction { /// The `NewBlock` message block: NewBlockMessage, }, + NewBlockHashes { + /// Target of the message + peer_id: PeerId, + /// `NewBlockHashes` message to send to the peer. + hashes: Arc, + }, /// Create a new connection to the given node. Connect { remote_addr: SocketAddr, node_id: PeerId }, /// Disconnect an existing connection diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 9acb9a86f8..ceb40fa86f 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -153,6 +153,10 @@ where let msg = PeerMessage::NewBlock(msg); self.sessions.send_message(&peer_id, msg); } + StateAction::NewBlockHashes { peer_id, hashes } => { + let msg = PeerMessage::NewBlockHashes(hashes); + self.sessions.send_message(&peer_id, msg); + } } None }