Merge branch 'main' into matt/scaffold-ethapi

This commit is contained in:
Emilia Hane
2024-06-17 10:01:32 +02:00
194 changed files with 2882 additions and 3846 deletions

View File

@@ -72,7 +72,7 @@ jobs:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: "1.76" # MSRV
toolchain: "1.79" # MSRV
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -115,7 +115,7 @@ jobs:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: "1.76" # MSRV
toolchain: "1.79" # MSRV
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

414
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
[workspace.package]
version = "1.0.0-rc.1"
edition = "2021"
rust-version = "1.76"
rust-version = "1.79"
license = "MIT OR Apache-2.0"
homepage = "https://paradigmxyz.github.io/reth"
repository = "https://github.com/paradigmxyz/reth"
@@ -100,9 +100,9 @@ members = [
"crates/tokio-util/",
"crates/tracing/",
"crates/transaction-pool/",
"crates/trie/common",
"crates/trie/parallel/",
"crates/trie/trie",
"crates/trie/types",
"examples/beacon-api-sidecar-fetcher/",
"examples/beacon-api-sse/",
"examples/bsc-p2p",
@@ -334,8 +334,8 @@ reth-tokio-util = { path = "crates/tokio-util" }
reth-tracing = { path = "crates/tracing" }
reth-transaction-pool = { path = "crates/transaction-pool" }
reth-trie = { path = "crates/trie/trie" }
reth-trie-common = { path = "crates/trie/common" }
reth-trie-parallel = { path = "crates/trie/parallel" }
reth-trie-types = { path = "crates/trie/types" }
# revm
revm = { version = "9.0.0", features = [
@@ -346,7 +346,7 @@ revm = { version = "9.0.0", features = [
revm-primitives = { version = "4.0.0", features = [
"std",
], default-features = false }
revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "5e3058a" }
revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "52f632e" }
# eth
alloy-chains = "0.1.15"
@@ -355,28 +355,33 @@ alloy-dyn-abi = "0.7.2"
alloy-sol-types = "0.7.2"
alloy-rlp = "0.3.4"
alloy-trie = "0.4"
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d", default-features = false, features = [
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false, features = [
"eth",
] }
alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false, features = [
"reqwest",
] }
alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "14ed25d" }
alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d", features = ["reqwest-rustls-tls"], default-features = false }
alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "14ed25d" }
alloy-eips = { git = "https://github.com/alloy-rs/alloy", default-features = false, rev = "00d81d7" }
alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-signer-wallet = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7" }
alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", features = [
"reqwest-rustls-tls",
], default-features = false }
alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "00d81d7", default-features = false }
# misc
auto_impl = "1"
@@ -389,6 +394,7 @@ derive_more = "0.99.17"
fdlimit = "0.3.0"
eyre = "0.6"
generic-array = "0.14"
linked_hash_set = "0.1"
tracing = "0.1.0"
tracing-appender = "0.2"
thiserror = "1.0"
@@ -434,9 +440,10 @@ tokio-util = { version = "0.7.4", features = ["codec"] }
# async
async-stream = "0.3"
async-trait = "0.1.68"
futures = "0.3.26"
futures = "0.3"
futures-util = "0.3"
futures-core = "0.3"
pin-project = "1.0.12"
futures-util = "0.3.25"
hyper = "1.3"
hyper-util = "0.1.5"
reqwest = { version = "0.12", default-features = false }
@@ -456,6 +463,7 @@ jsonrpsee-http-client = "0.23"
# http
http = "1.0"
http-body = "1.0"
jsonwebtoken = "9"
# crypto
secp256k1 = { version = "0.28", default-features = false, features = [
@@ -488,7 +496,7 @@ similar-asserts = "1.5.0"
test-fuzz = "5"
[patch.crates-io]
revm = { git = "https://github.com/bluealloy/revm", rev = "a28a543" }
revm-interpreter = { git = "https://github.com/bluealloy/revm", rev = "a28a543" }
revm-precompile = { git = "https://github.com/bluealloy/revm", rev = "a28a543" }
revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "a28a543" }
revm = { git = "https://github.com/bluealloy/revm.git", rev = "8f4c153" }
revm-interpreter = { git = "https://github.com/bluealloy/revm.git", rev = "8f4c153" }
revm-precompile = { git = "https://github.com/bluealloy/revm.git", rev = "8f4c153" }
revm-primitives = { git = "https://github.com/bluealloy/revm.git", rev = "8f4c153" }

View File

@@ -89,7 +89,7 @@ When updating this, also update:
- .github/workflows/lint.yml
-->
The Minimum Supported Rust Version (MSRV) of this project is [1.76.0](https://blog.rust-lang.org/2024/02/08/Rust-1.76.0.html).
The Minimum Supported Rust Version (MSRV) of this project is [1.79.0](https://blog.rust-lang.org/2024/06/13/Rust-1.79.0.html).
See the book for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source.html).

View File

@@ -21,7 +21,7 @@ reth-node-core.workspace = true
reth-node-api.workspace = true
reth-rpc-types.workspace = true
reth-rpc-types-compat.workspace = true
reth-primitives = { workspace = true, features = ["clap", "alloy-compat"] }
reth-primitives = { workspace = true, features = ["alloy-compat"] }
reth-tracing.workspace = true
# alloy

View File

@@ -1,20 +1,10 @@
//! Contains various benchmark output formats, either for logging or for
//! serialization to / from files.
//!
//! This also contains common constants for units, for example [GIGAGAS].
use reth_primitives::constants::gas_units::GIGAGAS;
use serde::{ser::SerializeStruct, Serialize};
use std::time::Duration;
/// Represents one Kilogas, or `1_000` gas.
const KILOGAS: u64 = 1_000;
/// Represents one Megagas, or `1_000_000` gas.
const MEGAGAS: u64 = KILOGAS * 1_000;
/// Represents one Gigagas, or `1_000_000_000` gas.
const GIGAGAS: u64 = MEGAGAS * 1_000;
/// This is the suffix for gas output csv files.
pub(crate) const GAS_OUTPUT_SUFFIX: &str = "total_gas.csv";

View File

@@ -5,7 +5,6 @@
use alloy_provider::{ext::EngineApi, Network};
use alloy_rpc_types_engine::{
ExecutionPayloadInputV2, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus,
PayloadStatusEnum,
};
use alloy_transport::{Transport, TransportResult};
use reth_node_api::EngineApiMessageVersion;
@@ -76,8 +75,8 @@ where
payload: ExecutionPayloadV1,
) -> TransportResult<PayloadStatus> {
let mut status = self.new_payload_v1(payload.clone()).await?;
while status.status != PayloadStatusEnum::Valid {
if status.status.is_invalid() {
while !status.is_valid() {
if status.is_invalid() {
error!(?status, ?payload, "Invalid newPayloadV1",);
panic!("Invalid newPayloadV1: {status:?}");
}
@@ -91,8 +90,8 @@ where
payload: ExecutionPayloadInputV2,
) -> TransportResult<PayloadStatus> {
let mut status = self.new_payload_v2(payload.clone()).await?;
while status.status != PayloadStatusEnum::Valid {
if status.status.is_invalid() {
while !status.is_valid() {
if status.is_invalid() {
error!(?status, ?payload, "Invalid newPayloadV2",);
panic!("Invalid newPayloadV2: {status:?}");
}
@@ -110,8 +109,8 @@ where
let mut status = self
.new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root)
.await?;
while status.status != PayloadStatusEnum::Valid {
if status.status.is_invalid() {
while !status.is_valid() {
if status.is_invalid() {
error!(
?status,
?payload,
@@ -136,8 +135,8 @@ where
let mut status =
self.fork_choice_updated_v1(fork_choice_state, payload_attributes.clone()).await?;
while status.payload_status.status != PayloadStatusEnum::Valid {
if status.payload_status.status.is_invalid() {
while !status.is_valid() {
if status.is_invalid() {
error!(
?status,
?fork_choice_state,
@@ -161,8 +160,8 @@ where
let mut status =
self.fork_choice_updated_v2(fork_choice_state, payload_attributes.clone()).await?;
while status.payload_status.status != PayloadStatusEnum::Valid {
if status.payload_status.status.is_invalid() {
while !status.is_valid() {
if status.is_invalid() {
error!(
?status,
?fork_choice_state,
@@ -186,8 +185,8 @@ where
let mut status =
self.fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()).await?;
while status.payload_status.status != PayloadStatusEnum::Valid {
if status.payload_status.status.is_invalid() {
while !status.is_valid() {
if status.is_invalid() {
error!(
?status,
?fork_choice_state,

View File

@@ -15,7 +15,7 @@ workspace = true
[dependencies]
# reth
reth-config.workspace = true
reth-primitives = { workspace = true, features = ["arbitrary", "clap"] }
reth-primitives.workspace = true
reth-fs-util.workspace = true
reth-db = { workspace = true, features = ["mdbx"] }
reth-db-api.workspace = true
@@ -49,7 +49,8 @@ reth-payload-validator.workspace = true
reth-basic-payload-builder.workspace = true
reth-discv4.workspace = true
reth-discv5.workspace = true
reth-static-file = { workspace = true }
reth-static-file.workspace = true
reth-static-file-types = { workspace = true, features = ["clap"] }
reth-trie = { workspace = true, features = ["metrics"] }
reth-nippy-jar.workspace = true
reth-node-api.workspace = true

View File

@@ -76,4 +76,5 @@
- [Execution Extensions](./developers/exex/exex.md)
- [How do ExExes work?](./developers/exex/how-it-works.md)
- [Hello World](./developers/exex/hello-world.md)
- [Remote](./developers/exex/remote.md)
- [Contribute](./developers/contribute.md)

View File

@@ -2,17 +2,27 @@
## What are Execution Extensions?
Execution Extensions allow developers to build their own infrastructure that relies on Reth
Execution Extensions (or ExExes, for short) allow developers to build their own infrastructure that relies on Reth
as a base for driving the chain (be it [Ethereum](../../run/mainnet.md) or [OP Stack](../../run/optimism.md)) forward.
An Execution Extension is a task that derives its state from changes in Reth's state.
Some examples of such state derivations are rollups, bridges, and indexers.
They are called Execution Extensions because the main trigger for them is the execution of new blocks (or reorgs of old blocks)
initiated by Reth.
Read more about things you can build with Execution Extensions in the [Paradigm blog](https://www.paradigm.xyz/2024/05/reth-exex).
## What Execution Extensions are not
Execution Extensions are not separate processes that connect to the main Reth node process.
Instead, ExExes are compiled into the same binary as Reth, and run alongside it, using shared memory for communication.
If you want to build an Execution Extension that sends data into a separate process, check out the [Remote](./remote.md) chapter.
## How do I build an Execution Extension?
Let's dive into how to build our own ExEx (short for Execution Extension) from scratch, add tests for it,
Let's dive into how to build our own ExEx from scratch, add tests for it,
and run it on the Holesky testnet.
1. [How do ExExes work?](./how-it-works.md)

View File

@@ -0,0 +1,3 @@
# Remote Execution Extensions
WIP

View File

@@ -101,7 +101,7 @@ op-node \
[l1-el-spec]: https://github.com/ethereum/execution-specs
[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md
[op-geth-forkdiff]: https://op-geth.optimism.io
[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/introduction.md#sequencers
[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers
[op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs
[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md
[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md

View File

@@ -1,2 +1,2 @@
msrv = "1.76"
msrv = "1.79"
too-large-for-stack = 128

View File

@@ -39,7 +39,7 @@ metrics.workspace = true
# misc
aquamarine.workspace = true
linked_hash_set = "0.1.4"
linked_hash_set.workspace = true
[dev-dependencies]
reth-db = { workspace = true, features = ["test-utils"] }

View File

@@ -1383,7 +1383,7 @@ mod tests {
use reth_primitives::{
constants::{EIP1559_INITIAL_BASE_FEE, EMPTY_ROOT_HASH, ETHEREUM_BLOCK_GAS_LIMIT},
keccak256,
proofs::{calculate_transaction_root, state_root_unhashed},
proofs::calculate_transaction_root,
revm_primitives::AccountInfo,
Account, Address, ChainSpecBuilder, Genesis, GenesisAccount, Header, Signature,
Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, Withdrawals, B256,
@@ -1394,7 +1394,7 @@ mod tests {
ProviderFactory,
};
use reth_stages_api::StageCheckpoint;
use reth_trie::StateRoot;
use reth_trie::{root::state_root_unhashed, StateRoot};
use std::collections::HashMap;
fn setup_externals(

View File

@@ -12,7 +12,7 @@ pub(crate) struct CanonicalChain {
}
impl CanonicalChain {
pub(crate) fn new(chain: BTreeMap<BlockNumber, BlockHash>) -> Self {
pub(crate) const fn new(chain: BTreeMap<BlockNumber, BlockHash>) -> Self {
Self { chain }
}

View File

@@ -27,7 +27,7 @@ pub struct NoopBlockchainTree {
impl NoopBlockchainTree {
/// Create a new `NoopBlockchainTree` with a canon state notification sender.
pub fn with_canon_state_notifications(
pub const fn with_canon_state_notifications(
canon_state_notification_sender: CanonStateNotificationSender,
) -> Self {
Self { canon_state_notification_sender: Some(canon_state_notification_sender) }

View File

@@ -56,7 +56,7 @@ pub struct AutoSealConsensus {
impl AutoSealConsensus {
/// Create a new instance of [`AutoSealConsensus`]
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
}

View File

@@ -86,7 +86,7 @@ impl OnForkChoiceUpdated {
}
/// If the forkchoice update was successful and no payload attributes were provided, this method
pub(crate) fn updated_with_pending_payload_id(
pub(crate) const fn updated_with_pending_payload_id(
payload_status: PayloadStatus,
pending_payload_id: oneshot::Receiver<Result<PayloadId, PayloadBuilderError>>,
) -> Self {

View File

@@ -438,7 +438,7 @@ mod tests {
impl TestPipelineBuilder {
/// Create a new [`TestPipelineBuilder`].
fn new() -> Self {
const fn new() -> Self {
Self {
pipeline_exec_outputs: VecDeque::new(),
executor_results: Vec::new(),

View File

@@ -57,7 +57,7 @@ pub struct TestEnv<DB> {
}
impl<DB> TestEnv<DB> {
fn new(
const fn new(
db: DB,
tip_rx: watch::Receiver<B256>,
engine_handle: BeaconConsensusEngineHandle<EthEngineTypes>,

View File

@@ -6,59 +6,33 @@ use reth_primitives::{
eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK},
MAXIMUM_EXTRA_DATA_SIZE,
},
eip4844::calculate_excess_blob_gas,
ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader,
};
/// Validate header standalone
pub fn validate_header_standalone(
header: &SealedHeader,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
// Gas used needs to be less than gas limit. Gas used is going to be checked after execution.
/// Gas used needs to be less than gas limit. Gas used is going to be checked after execution.
#[inline]
pub fn validate_header_gas(header: &SealedHeader) -> Result<(), ConsensusError> {
if header.gas_used > header.gas_limit {
return Err(ConsensusError::HeaderGasUsedExceedsGasLimit {
gas_used: header.gas_used,
gas_limit: header.gas_limit,
})
}
Ok(())
}
// Check if base fee is set.
/// Ensure the EIP-1559 base fee is set if the London hardfork is active.
#[inline]
pub fn validate_header_base_fee(
header: &SealedHeader,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
if chain_spec.fork(Hardfork::London).active_at_block(header.number) &&
header.base_fee_per_gas.is_none()
{
return Err(ConsensusError::BaseFeeMissing)
}
let wd_root_missing = header.withdrawals_root.is_none() && !chain_spec.is_optimism();
// EIP-4895: Beacon chain push withdrawals as operations
if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && wd_root_missing {
return Err(ConsensusError::WithdrawalsRootMissing)
} else if !chain_spec.is_shanghai_active_at_timestamp(header.timestamp) &&
header.withdrawals_root.is_some()
{
return Err(ConsensusError::WithdrawalsRootUnexpected)
}
// Ensures that EIP-4844 fields are valid once cancun is active.
if chain_spec.is_cancun_active_at_timestamp(header.timestamp) {
validate_4844_header_standalone(header)?;
} else if header.blob_gas_used.is_some() {
return Err(ConsensusError::BlobGasUsedUnexpected)
} else if header.excess_blob_gas.is_some() {
return Err(ConsensusError::ExcessBlobGasUnexpected)
} else if header.parent_beacon_block_root.is_some() {
return Err(ConsensusError::ParentBeaconBlockRootUnexpected)
}
if chain_spec.is_prague_active_at_timestamp(header.timestamp) {
if header.requests_root.is_none() {
return Err(ConsensusError::RequestsRootMissing)
}
} else if header.requests_root.is_some() {
return Err(ConsensusError::RequestsRootUnexpected)
}
Ok(())
}
@@ -175,6 +149,7 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons
///
/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block.
/// This must be 32 bytes or fewer; formally Hx.
#[inline]
pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> {
if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE {
Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() })
@@ -183,6 +158,113 @@ pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError>
}
}
/// Validates against the parent hash and number.
///
/// This function ensures that the header block number is sequential and that the hash of the parent
/// header matches the parent hash in the header.
#[inline]
pub fn validate_against_parent_hash_number(
header: &SealedHeader,
parent: &SealedHeader,
) -> Result<(), ConsensusError> {
// Parent number is consistent.
if parent.number + 1 != header.number {
return Err(ConsensusError::ParentBlockNumberMismatch {
parent_block_number: parent.number,
block_number: header.number,
})
}
if parent.hash() != header.parent_hash {
return Err(ConsensusError::ParentHashMismatch(
GotExpected { got: header.parent_hash, expected: parent.hash() }.into(),
))
}
Ok(())
}
/// Validates the base fee against the parent and EIP-1559 rules.
#[inline]
pub fn validate_against_parent_eip1559_base_fee(
header: &SealedHeader,
parent: &SealedHeader,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
if chain_spec.fork(Hardfork::London).active_at_block(header.number) {
let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?;
let expected_base_fee =
if chain_spec.fork(Hardfork::London).transitions_at_block(header.number) {
reth_primitives::constants::EIP1559_INITIAL_BASE_FEE
} else {
// This BaseFeeMissing will not happen as previous blocks are checked to have
// them.
parent
.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp))
.ok_or(ConsensusError::BaseFeeMissing)?
};
if expected_base_fee != base_fee {
return Err(ConsensusError::BaseFeeDiff(GotExpected {
expected: expected_base_fee,
got: base_fee,
}))
}
}
Ok(())
}
/// Validates the timestamp against the parent to make sure it is in the past.
#[inline]
pub fn validate_against_parent_timestamp(
header: &SealedHeader,
parent: &SealedHeader,
) -> Result<(), ConsensusError> {
if header.is_timestamp_in_past(parent.timestamp) {
return Err(ConsensusError::TimestampIsInPast {
parent_timestamp: parent.timestamp,
timestamp: header.timestamp,
})
}
Ok(())
}
/// Validates that the EIP-4844 header fields are correct with respect to the parent block. This
/// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and
/// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the
/// parent header fields.
pub fn validate_against_parent_4844(
header: &SealedHeader,
parent: &SealedHeader,
) -> Result<(), ConsensusError> {
// From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension):
//
// > For the first post-fork block, both parent.blob_gas_used and parent.excess_blob_gas
// > are evaluated as 0.
//
// This means in the first post-fork block, calculate_excess_blob_gas will return 0.
let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0);
let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0);
if header.blob_gas_used.is_none() {
return Err(ConsensusError::BlobGasUsedMissing)
}
let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?;
let expected_excess_blob_gas =
calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used);
if expected_excess_blob_gas != excess_blob_gas {
return Err(ConsensusError::ExcessBlobGasDiff {
diff: GotExpected { got: excess_blob_gas, expected: expected_excess_blob_gas },
parent_excess_blob_gas,
parent_blob_gas_used,
})
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@@ -410,22 +492,6 @@ mod tests {
.return_const(Ok(Some(Withdrawal { index: 2, ..Default::default() })));
}
#[test]
fn shanghai_block_zero_withdrawals() {
// ensures that if shanghai is activated, and we include a block with a withdrawals root,
// that the header is valid
let chain_spec = ChainSpecBuilder::mainnet().shanghai_activated().build();
let header = Header {
base_fee_per_gas: Some(1337u64),
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
..Default::default()
}
.seal_slow();
assert_eq!(validate_header_standalone(&header, &chain_spec), Ok(()));
}
#[test]
fn cancun_block_incorrect_blob_gas_used() {
let chain_spec = ChainSpecBuilder::mainnet().cancun_activated().build();

View File

@@ -10,8 +10,8 @@
#![cfg_attr(not(feature = "std"), no_std)]
use reth_primitives::{
BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, GotExpectedBoxed, Header,
HeaderValidationError, InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader,
constants::MINIMUM_GAS_LIMIT, BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected,
GotExpectedBoxed, Header, InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader,
B256, U256,
};
@@ -206,6 +206,10 @@ pub enum ConsensusError {
block_number: BlockNumber,
},
/// Error when the parent hash does not match the expected parent hash.
#[error("mismatched parent hash: {0}")]
ParentHashMismatch(GotExpectedBoxed<B256>),
/// Error when the block timestamp is in the future compared to our clock time.
#[error("block timestamp {timestamp} is in the future compared to our clock time {present_timestamp}")]
TimestampIsInFuture {
@@ -329,9 +333,60 @@ pub enum ConsensusError {
#[error(transparent)]
InvalidTransaction(#[from] InvalidTransactionError),
/// Error type transparently wrapping `HeaderValidationError`.
#[error(transparent)]
HeaderValidationError(#[from] HeaderValidationError),
/// Error when the block's base fee is different from the expected base fee.
#[error("block base fee mismatch: {0}")]
BaseFeeDiff(GotExpected<u64>),
/// Error when there is an invalid excess blob gas.
#[error(
"invalid excess blob gas: {diff}; \
parent excess blob gas: {parent_excess_blob_gas}, \
parent blob gas used: {parent_blob_gas_used}"
)]
ExcessBlobGasDiff {
/// The excess blob gas diff.
diff: GotExpected<u64>,
/// The parent excess blob gas.
parent_excess_blob_gas: u64,
/// The parent blob gas used.
parent_blob_gas_used: u64,
},
/// Error when the child gas limit exceeds the maximum allowed increase.
#[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")]
GasLimitInvalidIncrease {
/// The parent gas limit.
parent_gas_limit: u64,
/// The child gas limit.
child_gas_limit: u64,
},
/// Error indicating that the child gas limit is below the minimum allowed limit.
///
/// This error occurs when the child gas limit is less than the specified minimum gas limit.
#[error("child gas limit {child_gas_limit} is below the minimum allowed limit ({MINIMUM_GAS_LIMIT})")]
GasLimitInvalidMinimum {
/// The child gas limit.
child_gas_limit: u64,
},
/// Error when the child gas limit exceeds the maximum allowed decrease.
#[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")]
GasLimitInvalidDecrease {
/// The parent gas limit.
parent_gas_limit: u64,
/// The child gas limit.
child_gas_limit: u64,
},
/// Error when the block timestamp is in the past compared to the parent timestamp.
#[error("block timestamp {timestamp} is in the past compared to the parent timestamp {parent_timestamp}")]
TimestampIsInPast {
/// The parent block's timestamp.
parent_timestamp: u64,
/// The block's timestamp.
timestamp: u64,
},
}
impl ConsensusError {

View File

@@ -9,14 +9,17 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use core::fmt;
use reth_payload_primitives::{
pub use reth_payload_primitives::{
BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes,
PayloadTypes,
};
use reth_primitives::ChainSpec;
use serde::{de::DeserializeOwned, ser::Serialize};
/// The types that are used by the engine API.
/// This type defines the versioned types of the engine API.
///
/// This includes the execution payload types and payload attributes that are used to trigger a
/// payload job. Hence this trait is also [`PayloadTypes`].
pub trait EngineTypes:
PayloadTypes + DeserializeOwned + Serialize + fmt::Debug + Unpin + Send + Sync + Clone
{

View File

@@ -10,11 +10,14 @@
use reth_consensus::{Consensus, ConsensusError, PostExecutionInput};
use reth_consensus_common::validation::{
validate_block_pre_execution, validate_header_extradata, validate_header_standalone,
validate_4844_header_standalone, validate_against_parent_4844,
validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number,
validate_against_parent_timestamp, validate_block_pre_execution, validate_header_base_fee,
validate_header_extradata, validate_header_gas,
};
use reth_primitives::{
BlockWithSenders, Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader,
EMPTY_OMMER_ROOT_HASH, U256,
constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Chain, ChainSpec, Hardfork, Header,
SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256,
};
use std::{sync::Arc, time::SystemTime};
@@ -32,14 +35,90 @@ pub struct EthBeaconConsensus {
impl EthBeaconConsensus {
/// Create a new instance of [`EthBeaconConsensus`]
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
/// Checks the gas limit for consistency between parent and self headers.
///
/// The maximum allowable difference between self and parent gas limits is determined by the
/// parent's gas limit divided by the elasticity multiplier (1024).
fn validate_against_parent_gas_limit(
&self,
header: &SealedHeader,
parent: &SealedHeader,
) -> Result<(), ConsensusError> {
// Determine the parent gas limit, considering elasticity multiplier on the London fork.
let parent_gas_limit =
if self.chain_spec.fork(Hardfork::London).transitions_at_block(header.number) {
parent.gas_limit *
self.chain_spec
.base_fee_params_at_timestamp(header.timestamp)
.elasticity_multiplier as u64
} else {
parent.gas_limit
};
// Check for an increase in gas limit beyond the allowed threshold.
if header.gas_limit > parent_gas_limit {
if header.gas_limit - parent_gas_limit >= parent_gas_limit / 1024 {
return Err(ConsensusError::GasLimitInvalidIncrease {
parent_gas_limit,
child_gas_limit: header.gas_limit,
})
}
}
// Check for a decrease in gas limit beyond the allowed threshold.
else if parent_gas_limit - header.gas_limit >= parent_gas_limit / 1024 {
return Err(ConsensusError::GasLimitInvalidDecrease {
parent_gas_limit,
child_gas_limit: header.gas_limit,
})
}
// Check if the self gas limit is below the minimum required limit.
else if header.gas_limit < MINIMUM_GAS_LIMIT {
return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit })
}
Ok(())
}
}
impl Consensus for EthBeaconConsensus {
fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> {
validate_header_standalone(header, &self.chain_spec)?;
validate_header_gas(header)?;
validate_header_base_fee(header, &self.chain_spec)?;
// EIP-4895: Beacon chain push withdrawals as operations
if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) &&
header.withdrawals_root.is_none()
{
return Err(ConsensusError::WithdrawalsRootMissing)
} else if !self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) &&
header.withdrawals_root.is_some()
{
return Err(ConsensusError::WithdrawalsRootUnexpected)
}
// Ensures that EIP-4844 fields are valid once cancun is active.
if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) {
validate_4844_header_standalone(header)?;
} else if header.blob_gas_used.is_some() {
return Err(ConsensusError::BlobGasUsedUnexpected)
} else if header.excess_blob_gas.is_some() {
return Err(ConsensusError::ExcessBlobGasUnexpected)
} else if header.parent_beacon_block_root.is_some() {
return Err(ConsensusError::ParentBeaconBlockRootUnexpected)
}
if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) {
if header.requests_root.is_none() {
return Err(ConsensusError::RequestsRootMissing)
}
} else if header.requests_root.is_some() {
return Err(ConsensusError::RequestsRootUnexpected)
}
Ok(())
}
@@ -48,7 +127,21 @@ impl Consensus for EthBeaconConsensus {
header: &SealedHeader,
parent: &SealedHeader,
) -> Result<(), ConsensusError> {
header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?;
validate_against_parent_hash_number(header, parent)?;
validate_against_parent_timestamp(header, parent)?;
// TODO Check difficulty increment between parent and self
// Ace age did increment it by some formula that we need to follow.
self.validate_against_parent_gas_limit(header, parent)?;
validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?;
// ensure that the blob gas fields for this block
if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) {
validate_against_parent_4844(header, parent)?;
}
Ok(())
}
@@ -127,3 +220,97 @@ impl Consensus for EthBeaconConsensus {
validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests)
}
}
#[cfg(test)]
mod tests {
use reth_primitives::{proofs, ChainSpecBuilder, B256};
use super::*;
fn header_with_gas_limit(gas_limit: u64) -> SealedHeader {
let header = Header { gas_limit, ..Default::default() };
header.seal(B256::ZERO)
}
#[test]
fn test_valid_gas_limit_increase() {
let parent = header_with_gas_limit(1024 * 10);
let child = header_with_gas_limit(parent.gas_limit + 5);
assert_eq!(
EthBeaconConsensus::new(Arc::new(ChainSpec::default()))
.validate_against_parent_gas_limit(&child, &parent),
Ok(())
);
}
#[test]
fn test_gas_limit_below_minimum() {
let parent = header_with_gas_limit(MINIMUM_GAS_LIMIT);
let child = header_with_gas_limit(MINIMUM_GAS_LIMIT - 1);
assert_eq!(
EthBeaconConsensus::new(Arc::new(ChainSpec::default()))
.validate_against_parent_gas_limit(&child, &parent),
Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: child.gas_limit })
);
}
#[test]
fn test_invalid_gas_limit_increase_exceeding_limit() {
let parent = header_with_gas_limit(1024 * 10);
let child = header_with_gas_limit(parent.gas_limit + parent.gas_limit / 1024 + 1);
assert_eq!(
EthBeaconConsensus::new(Arc::new(ChainSpec::default()))
.validate_against_parent_gas_limit(&child, &parent),
Err(ConsensusError::GasLimitInvalidIncrease {
parent_gas_limit: parent.gas_limit,
child_gas_limit: child.gas_limit,
})
);
}
#[test]
fn test_valid_gas_limit_decrease_within_limit() {
let parent = header_with_gas_limit(1024 * 10);
let child = header_with_gas_limit(parent.gas_limit - 5);
assert_eq!(
EthBeaconConsensus::new(Arc::new(ChainSpec::default()))
.validate_against_parent_gas_limit(&child, &parent),
Ok(())
);
}
#[test]
fn test_invalid_gas_limit_decrease_exceeding_limit() {
let parent = header_with_gas_limit(1024 * 10);
let child = header_with_gas_limit(parent.gas_limit - parent.gas_limit / 1024 - 1);
assert_eq!(
EthBeaconConsensus::new(Arc::new(ChainSpec::default()))
.validate_against_parent_gas_limit(&child, &parent),
Err(ConsensusError::GasLimitInvalidDecrease {
parent_gas_limit: parent.gas_limit,
child_gas_limit: child.gas_limit,
})
);
}
#[test]
fn shanghai_block_zero_withdrawals() {
// ensures that if shanghai is activated, and we include a block with a withdrawals root,
// that the header is valid
let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build());
let header = Header {
base_fee_per_gas: Some(1337u64),
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
..Default::default()
}
.seal_slow();
assert_eq!(EthBeaconConsensus::new(chain_spec).validate_header(&header), Ok(()));
}
}

View File

@@ -39,7 +39,7 @@ pub struct EthBuiltPayload {
impl EthBuiltPayload {
/// Initializes the payload with the given initial block.
pub fn new(id: PayloadId, block: SealedBlock, fees: U256) -> Self {
pub const fn new(id: PayloadId, block: SealedBlock, fees: U256) -> Self {
Self { id, block, fees, sidecars: Vec::new() }
}

View File

@@ -54,7 +54,7 @@ impl EthExecutorProvider {
impl<EvmConfig> EthExecutorProvider<EvmConfig> {
/// Creates a new executor provider.
pub fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig) -> Self {
Self { chain_spec, evm_config }
}
}
@@ -236,7 +236,7 @@ pub struct EthBlockExecutor<EvmConfig, DB> {
impl<EvmConfig, DB> EthBlockExecutor<EvmConfig, DB> {
/// Creates a new Ethereum block executor.
pub fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig, state: State<DB>) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig, state: State<DB>) -> Self {
Self { executor: EthEvmExecutor { chain_spec, evm_config }, state }
}

View File

@@ -488,7 +488,7 @@ pub enum ChainSplit {
#[cfg(test)]
mod tests {
use super::*;
use reth_primitives::B256;
use reth_primitives::{Receipt, Receipts, TxType, B256};
use revm::primitives::{AccountInfo, HashMap};
#[test]
@@ -625,4 +625,84 @@ mod tests {
ChainSplit::NoSplitPending(chain)
);
}
#[test]
fn receipts_by_block_hash() {
// Create a default SealedBlockWithSenders object
let block: SealedBlockWithSenders = SealedBlockWithSenders::default();
// Define block hashes for block1 and block2
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
// Clone the default block into block1 and block2
let mut block1 = block.clone();
let mut block2 = block;
// Set the hashes of block1 and block2
block1.block.header.set_hash(block1_hash);
block2.block.header.set_hash(block2_hash);
// Create a random receipt object, receipt1
let receipt1 = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
#[cfg(feature = "optimism")]
deposit_nonce: Some(18),
#[cfg(feature = "optimism")]
deposit_receipt_version: Some(34),
};
// Create another random receipt object, receipt2
let receipt2 = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 1325345,
logs: vec![],
success: true,
#[cfg(feature = "optimism")]
deposit_nonce: Some(18),
#[cfg(feature = "optimism")]
deposit_receipt_version: Some(34),
};
// Create a Receipts object with a vector of receipt vectors
let receipts =
Receipts { receipt_vec: vec![vec![Some(receipt1.clone())], vec![Some(receipt2)]] };
// Create an ExecutionOutcome object with the created bundle, receipts, an empty requests
// vector, and first_block set to 10
let execution_outcome = ExecutionOutcome {
bundle: Default::default(),
receipts,
requests: vec![],
first_block: 10,
};
// Create a Chain object with a BTreeMap of blocks mapped to their block numbers,
// including block1_hash and block2_hash, and the execution_outcome
let chain = Chain {
blocks: BTreeMap::from([(10, block1), (11, block2)]),
execution_outcome: execution_outcome.clone(),
..Default::default()
};
// Assert that the proper receipt vector is returned for block1_hash
assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1]));
// Create an ExecutionOutcome object with a single receipt vector containing receipt1
let execution_outcome1 = ExecutionOutcome {
bundle: Default::default(),
receipts: Receipts { receipt_vec: vec![vec![Some(receipt1)]] },
requests: vec![],
first_block: 10,
};
// Assert that the execution outcome at the first block contains only the first receipt
assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1));
// Assert that the execution outcome at the tip block contains the whole execution outcome
assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome));
}
}

View File

@@ -216,17 +216,17 @@ impl ExecutionOutcome {
&self.receipts[index]
}
/// Is bundle state empty of blocks.
/// Is execution outcome empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Number of blocks in bundle state.
/// Number of blocks in the execution outcome.
pub fn len(&self) -> usize {
self.receipts.len()
}
/// Return first block of the bundle
/// Return first block of the execution outcome
pub const fn first_block(&self) -> BlockNumber {
self.first_block
}
@@ -247,6 +247,8 @@ impl ExecutionOutcome {
// remove receipts
self.receipts.truncate(new_len);
// remove requests
self.requests.truncate(new_len);
// Revert last n reverts.
self.bundle.revert(rm_trx);
@@ -274,6 +276,11 @@ impl ExecutionOutcome {
// Truncate higher state to [at..].
let at_idx = higher_state.block_number_to_index(at).unwrap();
higher_state.receipts = higher_state.receipts.split_off(at_idx).into();
// Ensure that there are enough requests to truncate.
// Sometimes we just have receipts and no requests.
if at_idx < higher_state.requests.len() {
higher_state.requests = higher_state.requests.split_off(at_idx);
}
higher_state.bundle.take_n_reverts(at_idx);
higher_state.first_block = at;
@@ -595,37 +602,51 @@ mod tests {
#[test]
fn test_revert_to() {
// Create a random receipt object
let receipt = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
#[cfg(feature = "optimism")]
deposit_nonce: Some(18),
#[cfg(feature = "optimism")]
deposit_receipt_version: Some(34),
};
// Create a Receipts object with a vector of receipt vectors
let receipts = Receipts {
receipt_vec: vec![vec![Some(Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
#[cfg(feature = "optimism")]
deposit_nonce: Some(18),
#[cfg(feature = "optimism")]
deposit_receipt_version: Some(34),
})]],
receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]],
};
// Define the first block number
let first_block = 123;
// Create a DepositRequest object with specific attributes.
let request = Request::DepositRequest(DepositRequest {
pubkey: FixedBytes::<48>::from([1; 48]),
withdrawal_credentials: B256::from([0; 32]),
amount: 1111,
signature: FixedBytes::<96>::from([2; 96]),
index: 222,
});
// Create a vector of Requests containing the request.
let requests = vec![Requests(vec![request]), Requests(vec![request])];
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let mut exec_res = ExecutionOutcome {
bundle: Default::default(),
receipts: receipts.clone(),
requests: vec![],
first_block,
};
let mut exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Assert that the revert_to method returns true when reverting to the initial block number.
assert!(exec_res.revert_to(123));
// Assert that the receipts remain unchanged after reverting to the initial block number.
assert_eq!(exec_res.receipts, receipts);
// Assert that the receipts are properly cut after reverting to the initial block number.
assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] });
// Assert that the requests are properly cut after reverting to the initial block number.
assert_eq!(exec_res.requests, vec![Requests(vec![request])]);
// Assert that the revert_to method returns false when attempting to revert to a block
// number greater than the initial block number.
@@ -688,4 +709,77 @@ mod tests {
}
);
}
#[test]
fn test_split_at_execution_outcome() {
// Create a random receipt object
let receipt = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
#[cfg(feature = "optimism")]
deposit_nonce: Some(18),
#[cfg(feature = "optimism")]
deposit_receipt_version: Some(34),
};
// Create a Receipts object with a vector of receipt vectors
let receipts = Receipts {
receipt_vec: vec![
vec![Some(receipt.clone())],
vec![Some(receipt.clone())],
vec![Some(receipt.clone())],
],
};
// Define the first block number
let first_block = 123;
// Create a DepositRequest object with specific attributes.
let request = Request::DepositRequest(DepositRequest {
pubkey: FixedBytes::<48>::from([1; 48]),
withdrawal_credentials: B256::from([0; 32]),
amount: 1111,
signature: FixedBytes::<96>::from([2; 96]),
index: 222,
});
// Create a vector of Requests containing the request.
let requests =
vec![Requests(vec![request]), Requests(vec![request]), Requests(vec![request])];
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Split the ExecutionOutcome at block number 124
let result = exec_res.clone().split_at(124);
// Define the expected lower ExecutionOutcome after splitting
let lower_execution_outcome = ExecutionOutcome {
bundle: Default::default(),
receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] },
requests: vec![Requests(vec![request])],
first_block,
};
// Define the expected higher ExecutionOutcome after splitting
let higher_execution_outcome = ExecutionOutcome {
bundle: Default::default(),
receipts: Receipts {
receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]],
},
requests: vec![Requests(vec![request]), Requests(vec![request])],
first_block: 124,
};
// Assert that the split result matches the expected lower and higher outcomes
assert_eq!(result.0, Some(lower_execution_outcome));
assert_eq!(result.1, higher_execution_outcome);
// Assert that splitting at the first block number returns None for the lower outcome
assert_eq!(exec_res.clone().split_at(123), (None, exec_res));
}
}

View File

@@ -8,8 +8,8 @@
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod bundle;
pub use bundle::*;
mod execution_outcome;
pub use execution_outcome::*;
mod chain;
pub use chain::*;

View File

@@ -16,4 +16,4 @@ workspace = true
alloy-primitives.workspace = true
# async
tokio = { workspace = true, features = ["full"] }
tokio = { workspace = true, features = ["time"] }

View File

@@ -10,7 +10,4 @@
pub mod ban_list;
/// Traits related to tokio streams
pub mod stream;
pub mod ratelimit;

View File

@@ -1,13 +0,0 @@
use std::net::SocketAddr;
use tokio::net::TcpStream;
/// This trait is for instrumenting a `TCPStream` with a socket addr
pub trait HasRemoteAddr {
/// Maybe returns a [`SocketAddr`]
fn remote_addr(&self) -> Option<SocketAddr>;
}
impl HasRemoteAddr for TcpStream {
fn remote_addr(&self) -> Option<SocketAddr> {
self.peer_addr().ok()
}
}

View File

@@ -13,12 +13,13 @@ workspace = true
[dependencies]
# reth
reth-primitives.workspace = true
reth-net-common.workspace = true
reth-ethereum-forks.workspace = true
reth-net-nat.workspace = true
reth-network-peers = { workspace = true, features = ["secp256k1"] }
# ethereum
alloy-primitives.workspace = true
alloy-rlp = { workspace = true, features = ["derive"] }
discv5.workspace = true
secp256k1 = { workspace = true, features = [
@@ -28,6 +29,7 @@ secp256k1 = { workspace = true, features = [
"serde",
] }
enr.workspace = true
# async/futures
tokio = { workspace = true, features = ["io-util", "net", "time"] }
tokio-stream.workspace = true
@@ -42,6 +44,7 @@ generic-array.workspace = true
serde = { workspace = true, optional = true }
[dev-dependencies]
reth-primitives.workspace = true
assert_matches.workspace = true
rand.workspace = true
tokio = { workspace = true, features = ["macros"] }

View File

@@ -3,10 +3,11 @@
//! This basis of this file has been taken from the discv5 codebase:
//! <https://github.com/sigp/discv5>
use alloy_primitives::bytes::Bytes;
use alloy_rlp::Encodable;
use reth_net_common::ban_list::BanList;
use reth_net_nat::{NatResolver, ResolveNatInterval};
use reth_primitives::{bytes::Bytes, NodeRecord};
use reth_network_peers::NodeRecord;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::{

View File

@@ -28,6 +28,7 @@ use crate::{
error::{DecodePacketError, Discv4Error},
proto::{FindNode, Message, Neighbours, Packet, Ping, Pong},
};
use alloy_primitives::{bytes::Bytes, hex, B256};
use discv5::{
kbucket,
kbucket::{
@@ -39,8 +40,8 @@ use discv5::{
use enr::Enr;
use parking_lot::Mutex;
use proto::{EnrRequest, EnrResponse};
use reth_ethereum_forks::ForkId;
use reth_network_peers::{pk2id, PeerId};
use reth_primitives::{bytes::Bytes, hex, ForkId, B256};
use secp256k1::SecretKey;
use std::{
cell::RefCell,
@@ -76,7 +77,7 @@ use node::{kad_key, NodeKey};
mod table;
// reexport NodeRecord primitive
pub use reth_primitives::NodeRecord;
pub use reth_network_peers::NodeRecord;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;

View File

@@ -1,6 +1,6 @@
use alloy_primitives::keccak256;
use generic_array::GenericArray;
use reth_network_peers::PeerId;
use reth_primitives::{keccak256, NodeRecord};
use reth_network_peers::{NodeRecord, PeerId};
/// The key type for the table.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]

View File

@@ -1,13 +1,14 @@
//! Discovery v4 protocol implementation.
use crate::{error::DecodePacketError, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
use alloy_primitives::{
bytes::{Buf, BufMut, Bytes, BytesMut},
keccak256, B256,
};
use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable};
use enr::Enr;
use reth_network_peers::{pk2id, PeerId};
use reth_primitives::{
bytes::{Buf, BufMut, Bytes, BytesMut},
keccak256, EnrForkIdEntry, ForkId, NodeRecord, B256,
};
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_peers::{pk2id, NodeRecord, PeerId};
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
SecretKey, SECP256K1,

View File

@@ -5,9 +5,10 @@ use crate::{
receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent,
IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
};
use alloy_primitives::{hex, B256};
use rand::{thread_rng, Rng, RngCore};
use reth_network_peers::pk2id;
use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256};
use reth_ethereum_forks::{ForkHash, ForkId};
use reth_network_peers::{pk2id, NodeRecord};
use secp256k1::{SecretKey, SECP256K1};
use std::{
collections::{HashMap, HashSet},

View File

@@ -13,11 +13,12 @@ workspace = true
[dependencies]
# reth
reth-primitives.workspace = true
reth-ethereum-forks.workspace = true
reth-net-common.workspace = true
reth-network-peers = { workspace = true, features = ["secp256k1"] }
# ethereum
alloy-primitives.workspace = true
secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] }
enr.workspace = true
@@ -30,16 +31,18 @@ trust-dns-resolver = "0.23"
# misc
data-encoding = "2"
linked_hash_set = "0.1"
linked_hash_set.workspace = true
schnellru.workspace = true
thiserror.workspace = true
tracing.workspace = true
parking_lot.workspace = true
serde = { workspace = true, optional = true }
serde_with = { version = "3.3.0", optional = true }
serde_with = { workspace = true, optional = true }
[dev-dependencies]
reth-primitives.workspace = true
alloy-rlp.workspace = true
alloy-chains.workspace = true
tokio = { workspace = true, features = ["sync", "rt", "rt-multi-thread"] }
reth-tracing.workspace = true
rand.workspace = true

View File

@@ -22,8 +22,8 @@ use crate::{
pub use config::DnsDiscoveryConfig;
use enr::Enr;
use error::ParseDnsEntryError;
use reth_network_peers::pk2id;
use reth_primitives::{EnrForkIdEntry, ForkId, NodeRecord};
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_peers::{pk2id, NodeRecord};
use schnellru::{ByLength, LruMap};
use secp256k1::SecretKey;
use std::{
@@ -411,9 +411,11 @@ fn convert_enr_node_record(enr: &Enr<SecretKey>) -> Option<DnsNodeRecordUpdate>
mod tests {
use super::*;
use crate::tree::TreeRootEntry;
use alloy_chains::Chain;
use alloy_rlp::{Decodable, Encodable};
use enr::EnrKey;
use reth_primitives::{Chain, ForkHash, Hardfork, MAINNET};
use reth_ethereum_forks::{ForkHash, Hardfork};
use reth_primitives::MAINNET;
use secp256k1::rand::thread_rng;
use std::{future::poll_fn, net::Ipv4Addr};

View File

@@ -21,9 +21,9 @@ use crate::error::{
ParseDnsEntryError::{FieldNotFound, UnknownEntry},
ParseEntryResult,
};
use alloy_primitives::{hex, Bytes};
use data_encoding::{BASE32_NOPAD, BASE64URL_NOPAD};
use enr::{Enr, EnrKey, EnrKeyUnambiguous, EnrPublicKey, Error as EnrError};
use reth_primitives::{hex, Bytes};
use secp256k1::SecretKey;
#[cfg(feature = "serde")]
use serde_with::{DeserializeFromStr, SerializeDisplay};

View File

@@ -11,19 +11,18 @@ repository.workspace = true
workspace = true
[dependencies]
reth-primitives.workspace = true
reth-net-common.workspace = true
reth-network-peers = { workspace = true, features = ["secp256k1"] }
alloy-rlp = { workspace = true, features = ["derive"] }
alloy-primitives = { workspace = true, features = ["rand", "rlp"] }
alloy-rlp = { workspace = true, features = ["derive", "arrayvec"] }
futures.workspace = true
thiserror.workspace = true
tokio = { workspace = true, features = ["full"] }
tokio = { workspace = true, features = ["time"] }
tokio-stream.workspace = true
tokio-util = { workspace = true, features = ["codec"] }
pin-project.workspace = true
educe = "0.4.19"
tracing.workspace = true
# HeaderBytes
@@ -37,7 +36,7 @@ ctr = "0.9.2"
digest = "0.10.5"
secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] }
concat-kdf = "0.1.0"
sha2 = "0.10.6"
sha2.workspace = true
sha3 = "0.10.5"
aes = "0.8.1"
hmac = "0.12.1"

View File

@@ -7,17 +7,16 @@ use crate::{
ECIESError,
};
use aes::{cipher::StreamCipher, Aes128, Aes256};
use alloy_primitives::{
bytes::{BufMut, Bytes, BytesMut},
B128, B256, B512 as PeerId,
};
use alloy_rlp::{Encodable, Rlp, RlpEncodable, RlpMaxEncodedLen};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use ctr::Ctr64BE;
use digest::{crypto_common::KeyIvInit, Digest};
use educe::Educe;
use rand::{thread_rng, Rng};
use reth_network_peers::{id2pk, pk2id};
use reth_primitives::{
bytes::{BufMut, Bytes, BytesMut},
B128, B256, B512 as PeerId,
};
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
PublicKey, SecretKey, SECP256K1,
@@ -51,17 +50,13 @@ fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) {
concat_kdf::derive_key_into::<Sha256>(secret.as_slice(), s1, dest).unwrap();
}
#[derive(Educe)]
#[educe(Debug)]
pub struct ECIES {
#[educe(Debug(ignore))]
secret_key: SecretKey,
public_key: PublicKey,
remote_public_key: Option<PublicKey>,
pub(crate) remote_id: Option<PeerId>,
#[educe(Debug(ignore))]
ephemeral_secret_key: SecretKey,
ephemeral_public_key: PublicKey,
ephemeral_shared_secret: Option<B256>,
@@ -70,9 +65,7 @@ pub struct ECIES {
nonce: B256,
remote_nonce: Option<B256>,
#[educe(Debug(ignore))]
ingress_aes: Option<Ctr64BE<Aes256>>,
#[educe(Debug(ignore))]
egress_aes: Option<Ctr64BE<Aes256>>,
ingress_mac: Option<MAC>,
egress_mac: Option<MAC>,
@@ -83,6 +76,27 @@ pub struct ECIES {
body_size: Option<usize>,
}
impl core::fmt::Debug for ECIES {
#[inline]
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("ECIES")
.field("public_key", &self.public_key)
.field("remote_public_key", &self.remote_public_key)
.field("remote_id", &self.remote_id)
.field("ephemeral_public_key", &self.ephemeral_public_key)
.field("ephemeral_shared_secret", &self.ephemeral_shared_secret)
.field("remote_ephemeral_public_key", &self.remote_ephemeral_public_key)
.field("nonce", &self.nonce)
.field("remote_nonce", &self.remote_nonce)
.field("ingress_mac", &self.ingress_mac)
.field("egress_mac", &self.egress_mac)
.field("init_msg", &self.init_msg)
.field("remote_init_msg", &self.remote_init_msg)
.field("body_size", &self.body_size)
.finish()
}
}
fn split_at_mut<T>(arr: &mut [T], idx: usize) -> Result<(&mut [T], &mut [T]), ECIESError> {
if idx > arr.len() {
return Err(ECIESErrorImpl::OutOfBounds { idx, len: arr.len() }.into())
@@ -721,7 +735,7 @@ impl ECIES {
#[cfg(test)]
mod tests {
use super::*;
use reth_primitives::{b256, hex};
use alloy_primitives::{b256, hex};
#[test]
fn ecdh() {

View File

@@ -1,5 +1,5 @@
use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue};
use reth_primitives::{BytesMut, B512 as PeerId};
use alloy_primitives::{bytes::BytesMut, B512 as PeerId};
use secp256k1::SecretKey;
use std::{fmt::Debug, io};
use tokio_util::codec::{Decoder, Encoder};

View File

@@ -18,7 +18,7 @@ pub use error::ECIESError;
mod codec;
use reth_primitives::{
use alloy_primitives::{
bytes::{Bytes, BytesMut},
B512 as PeerId,
};

View File

@@ -10,11 +10,11 @@
//! For more information, refer to the [Ethereum MAC specification](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac).
use aes::Aes256Enc;
use alloy_primitives::{B128, B256};
use block_padding::NoPadding;
use cipher::BlockEncrypt;
use digest::KeyInit;
use generic_array::GenericArray;
use reth_primitives::{B128, B256};
use sha3::{Digest, Keccak256};
use typenum::U16;

View File

@@ -3,12 +3,11 @@
use crate::{
codec::ECIESCodec, error::ECIESErrorImpl, ECIESError, EgressECIESValue, IngressECIESValue,
};
use futures::{ready, Sink, SinkExt};
use reth_net_common::stream::HasRemoteAddr;
use reth_primitives::{
use alloy_primitives::{
bytes::{Bytes, BytesMut},
B512 as PeerId,
};
use futures::{ready, Sink, SinkExt};
use secp256k1::SecretKey;
use std::{
fmt::Debug,
@@ -38,10 +37,10 @@ pub struct ECIESStream<Io> {
impl<Io> ECIESStream<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + HasRemoteAddr,
Io: AsyncRead + AsyncWrite + Unpin,
{
/// Connect to an `ECIES` server
#[instrument(skip(transport, secret_key), fields(peer=&*format!("{:?}", transport.remote_addr())))]
#[instrument(skip(transport, secret_key))]
pub async fn connect(
transport: Io,
secret_key: SecretKey,
@@ -98,7 +97,6 @@ where
}
/// Listen on a just connected ECIES client
#[instrument(skip_all, fields(peer=&*format!("{:?}", transport.remote_addr())))]
pub async fn incoming(transport: Io, secret_key: SecretKey) -> Result<Self, ECIESError> {
let ecies = ECIESCodec::new_server(secret_key)?;

View File

@@ -1,7 +1,7 @@
//! Utility functions for hashing and encoding.
use alloy_primitives::B256;
use hmac::{Hmac, Mac};
use reth_primitives::B256;
use sha2::{Digest, Sha256};
/// Hashes the input data with SHA256.

View File

@@ -71,14 +71,17 @@ impl Discovery {
/// This will spawn the [`reth_discv4::Discv4Service`] onto a new task and establish a listener
/// channel to receive all discovered nodes.
pub async fn new(
tcp_addr: SocketAddr,
discovery_v4_addr: SocketAddr,
sk: SecretKey,
discv4_config: Option<Discv4Config>,
discv5_config: Option<reth_discv5::Config>, // contains discv5 listen address
dns_discovery_config: Option<DnsDiscoveryConfig>,
) -> Result<Self, NetworkError> {
// setup discv4
let local_enr = NodeRecord::from_secret_key(discovery_v4_addr, &sk);
// setup discv4 with the discovery address and tcp port
let local_enr =
NodeRecord::from_secret_key(discovery_v4_addr, &sk).with_tcp_port(tcp_addr.port());
let discv4_future = async {
let Some(disc_config) = discv4_config else { return Ok((None, None, None)) };
let (discv4, mut discv4_service) =
@@ -342,6 +345,7 @@ mod tests {
let (secret_key, _) = SECP256K1.generate_keypair(&mut rng);
let discovery_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0));
let _discovery = Discovery::new(
discovery_addr,
discovery_addr,
secret_key,
Default::default(),
@@ -370,9 +374,16 @@ mod tests {
.discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build())
.build();
Discovery::new(discv4_addr, secret_key, Some(discv4_config), Some(discv5_config), None)
.await
.expect("should build discv5 with discv4 downgrade")
Discovery::new(
discv4_addr,
discv4_addr,
secret_key,
Some(discv4_config),
Some(discv5_config),
None,
)
.await
.expect("should build discv5 with discv4 downgrade")
}
#[tokio::test(flavor = "multi_thread")]

View File

@@ -1,7 +1,6 @@
//! Contains connection-oriented interfaces.
use futures::{ready, Stream};
use std::{
io,
net::SocketAddr,

View File

@@ -197,7 +197,9 @@ where
let incoming = ConnectionListener::bind(listener_addr).await.map_err(|err| {
NetworkError::from_io_error(err, ServiceKind::Listener(listener_addr))
})?;
let listener_address = Arc::new(Mutex::new(incoming.local_address()));
// retrieve the tcp address of the socket
let listener_addr = incoming.local_address();
// resolve boot nodes
let mut resolved_boot_nodes = vec![];
@@ -214,6 +216,7 @@ where
});
let discovery = Discovery::new(
listener_addr,
discovery_v4_addr,
secret_key,
discovery_v4_config,
@@ -248,7 +251,7 @@ where
let handle = NetworkHandle::new(
Arc::clone(&num_active_peers),
listener_address,
Arc::new(Mutex::new(listener_addr)),
to_manager_tx,
secret_key,
local_peer_id,
@@ -314,7 +317,7 @@ where
NetworkBuilder { network: self, transactions: (), request_handler: () }
}
/// Returns the [`SocketAddr`] that listens for incoming connections.
/// Returns the [`SocketAddr`] that listens for incoming tcp connections.
pub const fn local_addr(&self) -> SocketAddr {
self.swarm.listener().local_address()
}

View File

@@ -15,7 +15,6 @@ use reth_eth_wire::{
UnauthedP2PStream,
};
use reth_metrics::common::mpsc::MeteredPollSender;
use reth_net_common::stream::HasRemoteAddr;
use reth_network_peers::PeerId;
use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head};
use reth_tasks::TaskSpawner;
@@ -927,7 +926,7 @@ async fn authenticate(
/// Returns an [`ECIESStream`] if it can be built. If not, send a
/// [`PendingSessionEvent::EciesAuthError`] and returns `None`
async fn get_eciess_stream<Io: AsyncRead + AsyncWrite + Unpin + HasRemoteAddr>(
async fn get_eciess_stream<Io: AsyncRead + AsyncWrite + Unpin>(
stream: Io,
secret_key: SecretKey,
direction: Direction,

View File

@@ -1152,7 +1152,7 @@ pub struct GetPooledTxRequestFut {
impl GetPooledTxRequestFut {
#[inline]
fn new(
const fn new(
peer_id: PeerId,
requested_hashes: RequestTxHashes,
response: oneshot::Receiver<RequestResult<PooledTransactions>>,

View File

@@ -3,7 +3,7 @@ use reth_network::{
error::{NetworkError, ServiceKind},
Discovery, NetworkConfigBuilder, NetworkManager,
};
use reth_network_api::NetworkInfo;
use reth_network_api::{NetworkInfo, PeersInfo};
use reth_provider::test_utils::NoopProvider;
use secp256k1::SecretKey;
use std::{
@@ -59,8 +59,46 @@ async fn test_discovery_addr_in_use() {
let any_port_listener = TcpListener::bind(addr).await.unwrap();
let port = any_port_listener.local_addr().unwrap().port();
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port));
let _discovery = Discovery::new(addr, secret_key, Some(disc_config), None, None).await.unwrap();
let _discovery =
Discovery::new(addr, addr, secret_key, Some(disc_config), None, None).await.unwrap();
let disc_config = Discv4Config::default();
let result = Discovery::new(addr, secret_key, Some(disc_config), None, None).await;
let result = Discovery::new(addr, addr, secret_key, Some(disc_config), None, None).await;
assert!(is_addr_in_use_kind(&result.err().unwrap(), ServiceKind::Discovery(addr)));
}
// <https://github.com/paradigmxyz/reth/issues/8851>
#[tokio::test(flavor = "multi_thread")]
async fn test_tcp_port_node_record_no_discovery() {
let secret_key = SecretKey::new(&mut rand::thread_rng());
let config = NetworkConfigBuilder::new(secret_key)
.listener_port(0)
.disable_discovery()
.build_with_noop_provider();
let network = NetworkManager::new(config).await.unwrap();
let local_addr = network.local_addr();
// ensure we retrieved the port the OS chose
assert_ne!(local_addr.port(), 0);
let record = network.handle().local_node_record();
assert_eq!(record.tcp_port, local_addr.port());
}
// <https://github.com/paradigmxyz/reth/issues/8851>
#[tokio::test(flavor = "multi_thread")]
async fn test_tcp_port_node_record_discovery() {
let secret_key = SecretKey::new(&mut rand::thread_rng());
let config = NetworkConfigBuilder::new(secret_key)
.listener_port(0)
.discovery_port(0)
.build_with_noop_provider();
let network = NetworkManager::new(config).await.unwrap();
let local_addr = network.local_addr();
// ensure we retrieved the port the OS chose
assert_ne!(local_addr.port(), 0);
let record = network.handle().local_node_record();
assert_eq!(record.tcp_port, local_addr.port());
assert_ne!(record.udp_port, 0);
}

View File

@@ -38,7 +38,7 @@ pub struct TestHeaderDownloader {
impl TestHeaderDownloader {
/// Instantiates the downloader with the mock responses
pub fn new(
pub const fn new(
client: TestHeadersClient,
consensus: Arc<TestConsensus>,
limit: u64,

View File

@@ -42,8 +42,10 @@ pub struct NodeRecord {
}
impl NodeRecord {
/// Derive the [`NodeRecord`] from the secret key and addr.
///
/// Note: this will set both the TCP and UDP ports to the port of the addr.
#[cfg(feature = "secp256k1")]
/// Derive the [`NodeRecord`] from the secret key and addr
pub fn from_secret_key(addr: SocketAddr, sk: &secp256k1::SecretKey) -> Self {
let pk = secp256k1::PublicKey::from_secret_key(secp256k1::SECP256K1, sk);
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
@@ -73,8 +75,19 @@ impl NodeRecord {
self
}
/// Sets the tcp port
pub const fn with_tcp_port(mut self, port: u16) -> Self {
self.tcp_port = port;
self
}
/// Sets the udp port
pub const fn with_udp_port(mut self, port: u16) -> Self {
self.udp_port = port;
self
}
/// Creates a new record from a socket addr and peer id.
#[allow(dead_code)]
pub const fn new(addr: SocketAddr, id: PeerId) -> Self {
Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id }
}

View File

@@ -205,7 +205,7 @@ impl NetworkArgs {
/// Sets the p2p and discovery ports to zero, allowing the OD to assign a random unused port
/// when network components bind to sockets.
pub fn with_unused_ports(mut self) -> Self {
pub const fn with_unused_ports(mut self) -> Self {
self = self.with_unused_p2p_port();
self.discovery = self.discovery.with_unused_discovery_port();
self

View File

@@ -129,7 +129,7 @@ pub struct EngineStoreStream<S> {
impl<S> EngineStoreStream<S> {
/// Create new engine store stream wrapper.
pub fn new(stream: S, path: PathBuf) -> Self {
pub const fn new(stream: S, path: PathBuf) -> Self {
Self { stream, store: EngineMessageStore::new(path) }
}
}

View File

@@ -22,7 +22,7 @@ pub struct NodeExitFuture {
impl NodeExitFuture {
/// Create a new `NodeExitFuture`.
pub fn new(
pub const fn new(
consensus_engine_rx: oneshot::Receiver<Result<(), BeaconConsensusEngineError>>,
terminate: bool,
) -> Self {

View File

@@ -31,7 +31,7 @@ pub struct NodeBuilderWithTypes<T: FullNodeTypes> {
impl<T: FullNodeTypes> NodeBuilderWithTypes<T> {
/// Creates a new instance of the node builder with the given configuration and types.
pub fn new(config: NodeConfig, database: T::DB) -> Self {
pub const fn new(config: NodeConfig, database: T::DB) -> Self {
Self { config, adapter: NodeTypesAdapter::new(database) }
}

View File

@@ -20,7 +20,9 @@ reth-prune.workspace = true
reth-static-file.workspace = true
reth-db-api.workspace = true
reth-primitives.workspace = true
reth-rpc-types.workspace = true
# alloy
alloy-rpc-types-engine.workspace = true
# async
tokio.workspace = true

View File

@@ -1,6 +1,7 @@
//! Support for handling events emitted by node components.
use crate::cl::ConsensusLayerHealthEvent;
use alloy_rpc_types_engine::ForkchoiceState;
use futures::Stream;
use reth_beacon_consensus::{
BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus,
@@ -10,7 +11,6 @@ use reth_network::{NetworkEvent, NetworkHandle};
use reth_network_api::PeersInfo;
use reth_primitives::{constants, BlockNumber, B256};
use reth_prune::PrunerEvent;
use reth_rpc_types::engine::ForkchoiceState;
use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId};
use reth_static_file::StaticFileProducerEvent;
use std::{

View File

@@ -11,7 +11,10 @@
use reth_consensus::{Consensus, ConsensusError, PostExecutionInput};
use reth_consensus_common::validation::{
validate_block_pre_execution, validate_header_extradata, validate_header_standalone,
validate_against_parent_4844, validate_against_parent_eip1559_base_fee,
validate_against_parent_hash_number, validate_against_parent_timestamp,
validate_block_pre_execution, validate_header_base_fee, validate_header_extradata,
validate_header_gas,
};
use reth_primitives::{
BlockWithSenders, ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256,
@@ -44,8 +47,8 @@ impl OptimismBeaconConsensus {
impl Consensus for OptimismBeaconConsensus {
fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> {
validate_header_standalone(header, &self.chain_spec)?;
Ok(())
validate_header_gas(header)?;
validate_header_base_fee(header, &self.chain_spec)
}
fn validate_header_against_parent(
@@ -53,7 +56,19 @@ impl Consensus for OptimismBeaconConsensus {
header: &SealedHeader,
parent: &SealedHeader,
) -> Result<(), ConsensusError> {
header.validate_against_parent(parent, &self.chain_spec).map_err(ConsensusError::from)?;
validate_against_parent_hash_number(header, parent)?;
if self.chain_spec.is_bedrock_active_at_block(header.number) {
validate_against_parent_timestamp(header, parent)?;
}
validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?;
// ensure that the blob gas fields for this block
if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) {
validate_against_parent_4844(header, parent)?;
}
Ok(())
}

View File

@@ -44,7 +44,7 @@ impl OpExecutorProvider {
impl<EvmConfig> OpExecutorProvider<EvmConfig> {
/// Creates a new executor provider.
pub fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig) -> Self {
Self { chain_spec, evm_config }
}
}
@@ -236,7 +236,7 @@ pub struct OpBlockExecutor<EvmConfig, DB> {
impl<EvmConfig, DB> OpBlockExecutor<EvmConfig, DB> {
/// Creates a new Ethereum block executor.
pub fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig, state: State<DB>) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>, evm_config: EvmConfig, state: State<DB>) -> Self {
Self { executor: OpEvmExecutor { chain_spec, evm_config }, state }
}

View File

@@ -179,7 +179,7 @@ pub struct OptimismBuiltPayload {
impl OptimismBuiltPayload {
/// Initializes the payload with the given initial block.
pub fn new(
pub const fn new(
id: PayloadId,
block: SealedBlock,
fees: U256,

View File

@@ -27,7 +27,7 @@ revm.workspace = true
# async
tokio = { workspace = true, features = ["sync", "time"] }
futures-core = "0.3"
futures-core.workspace = true
futures-util.workspace = true
# metrics

View File

@@ -636,7 +636,7 @@ pub struct PendingPayload<P> {
impl<P> PendingPayload<P> {
/// Constructs a `PendingPayload` future.
pub fn new(
pub const fn new(
cancel: Cancelled,
payload: oneshot::Receiver<Result<BuildOutcome<P>, PayloadBuilderError>>,
) -> Self {
@@ -773,7 +773,7 @@ pub struct BuildArguments<Pool, Client, Attributes, Payload> {
impl<Pool, Client, Attributes, Payload> BuildArguments<Pool, Client, Attributes, Payload> {
/// Create new build arguments.
pub fn new(
pub const fn new(
client: Client,
pool: Pool,
cached_reads: CachedReads,

View File

@@ -22,7 +22,7 @@ pub struct ExecutionPayloadValidator {
impl ExecutionPayloadValidator {
/// Create a new validator.
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}

View File

@@ -14,9 +14,15 @@ workspace = true
[dependencies]
reth-codecs.workspace = true
alloy-consensus.workspace = true
alloy-eips.workspace = true
alloy-genesis.workspace = true
alloy-primitives.workspace = true
alloy-consensus.workspace = true
alloy-rlp.workspace = true
alloy-rpc-types-eth = { workspace = true, optional = true }
derive_more.workspace = true
revm-primitives.workspace = true
# required by reth-codecs
modular-bitfield.workspace = true
@@ -33,11 +39,13 @@ arbitrary = { workspace = true, features = ["derive"] }
proptest.workspace = true
proptest-derive.workspace = true
test-fuzz.workspace = true
rand.workspace = true
[features]
test-utils = ["arbitrary"]
arbitrary = [
"dep:arbitrary",
"dep:proptest",
"dep:proptest-derive"
]
alloy-compat = ["alloy-rpc-types-eth"]

View File

@@ -0,0 +1,48 @@
use super::Header;
use alloy_rpc_types_eth::{ConversionError, Header as RpcHeader};
impl TryFrom<RpcHeader> for Header {
type Error = ConversionError;
fn try_from(header: RpcHeader) -> Result<Self, Self::Error> {
Ok(Self {
base_fee_per_gas: header
.base_fee_per_gas
.map(|base_fee_per_gas| {
base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion)
})
.transpose()?,
beneficiary: header.miner,
blob_gas_used: header
.blob_gas_used
.map(|blob_gas_used| {
blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion)
})
.transpose()?,
difficulty: header.difficulty,
excess_blob_gas: header
.excess_blob_gas
.map(|excess_blob_gas| {
excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion)
})
.transpose()?,
extra_data: header.extra_data,
gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?,
gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?,
logs_bloom: header.logs_bloom,
mix_hash: header.mix_hash.unwrap_or_default(),
nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0),
number: header.number.ok_or(ConversionError::MissingBlockNumber)?,
ommers_hash: header.uncles_hash,
parent_beacon_block_root: header.parent_beacon_block_root,
parent_hash: header.parent_hash,
receipts_root: header.receipts_root,
state_root: header.state_root,
timestamp: header.timestamp,
transactions_root: header.transactions_root,
withdrawals_root: header.withdrawals_root,
// TODO: requests_root: header.requests_root,
requests_root: None,
})
}
}

View File

@@ -0,0 +1,170 @@
//! Ethereum protocol-related constants
use alloy_primitives::{b256, B256, U256};
use std::time::Duration;
/// The client version: `reth/v{major}.{minor}.{patch}`
pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION"));
/// The first four bytes of the call data for a function call specifies the function to be called.
pub const SELECTOR_LEN: usize = 4;
/// Maximum extra data size in a block after genesis
pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32;
/// An EPOCH is a series of 32 slots.
pub const EPOCH_SLOTS: u64 = 32;
/// The duration of a slot in seconds.
///
/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a
/// block.
pub const SLOT_DURATION: Duration = Duration::from_secs(12);
/// An EPOCH is a series of 32 slots (~6.4min).
pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS);
/// The default block nonce in the beacon consensus
pub const BEACON_NONCE: u64 = 0u64;
/// The default Ethereum block gas limit.
// TODO: This should be a chain spec parameter.
/// See <https://github.com/paradigmxyz/reth/issues/3233>.
pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000;
/// The minimum tx fee below which the txpool will reject the transaction.
///
/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559
/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` <https://eips.ethereum.org/EIPS/eip-1559>
/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because
/// 12.5% of 7 is less than 1.
///
/// Note that min base fee under different 1559 parameterizations may differ, but there's no
/// significant harm in leaving this setting as is.
pub const MIN_PROTOCOL_BASE_FEE: u64 = 7;
/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256.
pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]);
/// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)
pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000;
/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)
pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8;
/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)
pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2;
/// Minimum gas limit allowed for transactions.
pub const MINIMUM_GAS_LIMIT: u64 = 5000;
/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50;
/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon
/// hardfork.
pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250;
/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6;
/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50;
/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon
/// hardfork.
pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250;
/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6;
/// Base fee max change denominator for Base Sepolia as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10;
/// Multiplier for converting gwei to wei.
pub const GWEI_TO_WEI: u64 = 1_000_000_000;
/// Multiplier for converting finney (milliether) to wei.
pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000;
/// Multiplier for converting ether to wei.
pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000;
/// Multiplier for converting mgas to gas.
pub const MGAS_TO_GAS: u64 = 1_000_000u64;
/// The Ethereum mainnet genesis hash:
/// `0x0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3`
pub const MAINNET_GENESIS_HASH: B256 =
b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3");
/// Goerli genesis hash: `0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a`
pub const GOERLI_GENESIS_HASH: B256 =
b256!("bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a");
/// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9`
pub const SEPOLIA_GENESIS_HASH: B256 =
b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9");
/// Holesky genesis hash: `0xff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d`
pub const HOLESKY_GENESIS_HASH: B256 =
b256!("ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d");
/// Testnet genesis hash: `0x2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c`
pub const DEV_GENESIS_HASH: B256 =
b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c");
/// Keccak256 over empty array: `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470`
pub const KECCAK_EMPTY: B256 =
b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
/// Ommer root of empty list: `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347`
pub const EMPTY_OMMER_ROOT_HASH: B256 =
b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347");
/// Root hash of an empty trie: `0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421`
pub const EMPTY_ROOT_HASH: B256 =
b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421");
/// Transactions root of empty receipts set.
pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH;
/// Transactions root of empty transactions set.
pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH;
/// Withdrawals root of empty withdrawals set.
pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH;
/// The number of blocks to unwind during a reorg that already became a part of canonical chain.
///
/// In reality, the node can end up in this particular situation very rarely. It would happen only
/// if the node process is abruptly terminated during ongoing reorg and doesn't boot back up for
/// long period of time.
///
/// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in
/// the database.
pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3;
/// Max seconds from current time allowed for blocks, before they're considered future blocks.
///
/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the
/// future.
///
/// See:
/// <https://github.com/ethereum/go-ethereum/blob/a196f3e8a22b6ad22ced5c2e3baf32bc3ebd4ec9/consensus/ethash/consensus.go#L227-L229>
pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn min_protocol_sanity() {
assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::<u64>(), MIN_PROTOCOL_BASE_FEE);
}
}

View File

@@ -0,0 +1,8 @@
/// Errors that can occur during header sanity checks.
#[derive(Debug, PartialEq, Eq)]
pub enum HeaderError {
/// Represents an error when the block difficulty is too large.
LargeDifficulty,
/// Represents an error when the block extradata is too large.
LargeExtraData,
}

View File

@@ -0,0 +1,509 @@
mod sealed;
pub use sealed::SealedHeader;
mod error;
pub use error::HeaderError;
#[cfg(any(test, feature = "test-utils", feature = "arbitrary"))]
pub mod test_utils;
use alloy_consensus::constants::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH};
use alloy_eips::{
calc_next_block_base_fee, eip1559::BaseFeeParams, merge::ALLOWED_FUTURE_BLOCK_TIME_SECONDS,
BlockNumHash,
};
use alloy_primitives::{keccak256, Address, BlockNumber, Bloom, Bytes, B256, B64, U256};
use alloy_rlp::{length_of_length, Decodable, Encodable};
use bytes::BufMut;
use reth_codecs::{main_codec, Compact};
use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas};
use std::mem;
/// Block header
#[main_codec]
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Header {
/// The Keccak 256-bit hash of the parent
/// blocks header, in its entirety; formally Hp.
pub parent_hash: B256,
/// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho.
pub ommers_hash: B256,
/// The 160-bit address to which all fees collected from the successful mining of this block
/// be transferred; formally Hc.
pub beneficiary: Address,
/// The Keccak 256-bit hash of the root node of the state trie, after all transactions are
/// executed and finalisations applied; formally Hr.
pub state_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with each
/// transaction in the transactions list portion of the block; formally Ht.
pub transactions_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts
/// of each transaction in the transactions list portion of the block; formally He.
pub receipts_root: B256,
/// The Keccak 256-bit hash of the withdrawals list portion of this block.
///
/// See [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895).
pub withdrawals_root: Option<B256>,
/// The Bloom filter composed from indexable information (logger address and log topics)
/// contained in each log entry from the receipt of each transaction in the transactions list;
/// formally Hb.
pub logs_bloom: Bloom,
/// A scalar value corresponding to the difficulty level of this block. This can be calculated
/// from the previous blocks difficulty level and the timestamp; formally Hd.
pub difficulty: U256,
/// A scalar value equal to the number of ancestor blocks. The genesis block has a number of
/// zero; formally Hi.
pub number: BlockNumber,
/// A scalar value equal to the current limit of gas expenditure per block; formally Hl.
pub gas_limit: u64,
/// A scalar value equal to the total gas used in transactions in this block; formally Hg.
pub gas_used: u64,
/// A scalar value equal to the reasonable output of Unixs time() at this blocks inception;
/// formally Hs.
pub timestamp: u64,
/// A 256-bit hash which, combined with the
/// nonce, proves that a sufficient amount of computation has been carried out on this block;
/// formally Hm.
pub mix_hash: B256,
/// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of
/// computation has been carried out on this block; formally Hn.
pub nonce: u64,
/// A scalar representing EIP1559 base fee which can move up or down each block according
/// to a formula which is a function of gas used in parent block and gas target
/// (block gas limit divided by elasticity multiplier) of parent block.
/// The algorithm results in the base fee per gas increasing when blocks are
/// above the gas target, and decreasing when blocks are below the gas target. The base fee per
/// gas is burned.
pub base_fee_per_gas: Option<u64>,
/// The total amount of blob gas consumed by the transactions within the block, added in
/// EIP-4844.
pub blob_gas_used: Option<u64>,
/// A running total of blob gas consumed in excess of the target, prior to the block. Blocks
/// with above-target blob gas consumption increase this value, blocks with below-target blob
/// gas consumption decrease it (bounded at 0). This was added in EIP-4844.
pub excess_blob_gas: Option<u64>,
/// The hash of the parent beacon block's root is included in execution blocks, as proposed by
/// EIP-4788.
///
/// This enables trust-minimized access to consensus state, supporting staking pools, bridges,
/// and more.
///
/// The beacon roots contract handles root storage, enhancing Ethereum's functionalities.
pub parent_beacon_block_root: Option<B256>,
/// The Keccak 256-bit hash of the root node of the trie structure populated with each
/// [EIP-7685] request in the block body.
///
/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685
pub requests_root: Option<B256>,
/// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or
/// fewer; formally Hx.
pub extra_data: Bytes,
}
impl AsRef<Self> for Header {
fn as_ref(&self) -> &Self {
self
}
}
impl Default for Header {
fn default() -> Self {
Self {
parent_hash: Default::default(),
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: Default::default(),
state_root: EMPTY_ROOT_HASH,
transactions_root: EMPTY_ROOT_HASH,
receipts_root: EMPTY_ROOT_HASH,
logs_bloom: Default::default(),
difficulty: Default::default(),
number: 0,
gas_limit: 0,
gas_used: 0,
timestamp: 0,
extra_data: Default::default(),
mix_hash: Default::default(),
nonce: 0,
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_root: None,
}
}
}
impl Header {
/// Checks if the block's difficulty is set to zero, indicating a Proof-of-Stake header.
///
/// This function is linked to EIP-3675, proposing the consensus upgrade to Proof-of-Stake:
/// [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#replacing-difficulty-with-0)
///
/// Verifies whether, as per the EIP, the block's difficulty is updated to zero,
/// signifying the transition to a Proof-of-Stake mechanism.
///
/// Returns `true` if the block's difficulty matches the constant zero set by the EIP.
pub fn is_zero_difficulty(&self) -> bool {
self.difficulty.is_zero()
}
/// Performs a sanity check on the extradata field of the header.
///
/// # Errors
///
/// Returns an error if the extradata size is larger than 100 KB.
pub fn ensure_extradata_valid(&self) -> Result<(), HeaderError> {
if self.extra_data.len() > 100 * 1024 {
return Err(HeaderError::LargeExtraData)
}
Ok(())
}
/// Performs a sanity check on the block difficulty field of the header.
///
/// # Errors
///
/// Returns an error if the block difficulty exceeds 80 bits.
pub fn ensure_difficulty_valid(&self) -> Result<(), HeaderError> {
if self.difficulty.bit_len() > 80 {
return Err(HeaderError::LargeDifficulty)
}
Ok(())
}
/// Performs combined sanity checks on multiple header fields.
///
/// This method combines checks for block difficulty and extradata sizes.
///
/// # Errors
///
/// Returns an error if either the block difficulty exceeds 80 bits
/// or if the extradata size is larger than 100 KB.
pub fn ensure_well_formed(&self) -> Result<(), HeaderError> {
self.ensure_difficulty_valid()?;
self.ensure_extradata_valid()?;
Ok(())
}
/// Checks if the block's timestamp is in the past compared to the parent block's timestamp.
///
/// Note: This check is relevant only pre-merge.
pub const fn is_timestamp_in_past(&self, parent_timestamp: u64) -> bool {
self.timestamp <= parent_timestamp
}
/// Checks if the block's timestamp is in the future based on the present timestamp.
///
/// Clock can drift but this can be consensus issue.
///
/// Note: This check is relevant only pre-merge.
pub const fn exceeds_allowed_future_timestamp(&self, present_timestamp: u64) -> bool {
self.timestamp > present_timestamp + ALLOWED_FUTURE_BLOCK_TIME_SECONDS
}
/// Returns the parent block's number and hash
pub const fn parent_num_hash(&self) -> BlockNumHash {
BlockNumHash { number: self.number.saturating_sub(1), hash: self.parent_hash }
}
/// Heavy function that will calculate hash of data and will *not* save the change to metadata.
/// Use [`Header::seal`], [`SealedHeader`] and unlock if you need hash to be persistent.
pub fn hash_slow(&self) -> B256 {
keccak256(alloy_rlp::encode(self))
}
/// Checks if the header is empty - has no transactions and no ommers
pub fn is_empty(&self) -> bool {
self.transaction_root_is_empty() &&
self.ommers_hash_is_empty() &&
self.withdrawals_root.map_or(true, |root| root == EMPTY_ROOT_HASH)
}
/// Check if the ommers hash equals to empty hash list.
pub fn ommers_hash_is_empty(&self) -> bool {
self.ommers_hash == EMPTY_OMMER_ROOT_HASH
}
/// Check if the transaction root equals to empty root.
pub fn transaction_root_is_empty(&self) -> bool {
self.transactions_root == EMPTY_ROOT_HASH
}
/// Returns the blob fee for _this_ block according to the EIP-4844 spec.
///
/// Returns `None` if `excess_blob_gas` is None
pub fn blob_fee(&self) -> Option<u128> {
self.excess_blob_gas.map(calc_blob_gasprice)
}
/// Returns the blob fee for the next block according to the EIP-4844 spec.
///
/// Returns `None` if `excess_blob_gas` is None.
///
/// See also [`Self::next_block_excess_blob_gas`]
pub fn next_block_blob_fee(&self) -> Option<u128> {
self.next_block_excess_blob_gas().map(calc_blob_gasprice)
}
/// Calculate base fee for next block according to the EIP-1559 spec.
///
/// Returns a `None` if no base fee is set, no EIP-1559 support
pub fn next_block_base_fee(&self, base_fee_params: BaseFeeParams) -> Option<u64> {
Some(calc_next_block_base_fee(
self.gas_used as u128,
self.gas_limit as u128,
self.base_fee_per_gas? as u128,
base_fee_params,
) as u64)
}
/// Calculate excess blob gas for the next block according to the EIP-4844 spec.
///
/// Returns a `None` if no excess blob gas is set, no EIP-4844 support
pub fn next_block_excess_blob_gas(&self) -> Option<u64> {
Some(calc_excess_blob_gas(self.excess_blob_gas?, self.blob_gas_used?))
}
/// Seal the header with a known hash.
///
/// WARNING: This method does not perform validation whether the hash is correct.
#[inline]
pub const fn seal(self, hash: B256) -> SealedHeader {
SealedHeader::new(self, hash)
}
/// Calculate hash and seal the Header so that it can't be changed.
#[inline]
pub fn seal_slow(self) -> SealedHeader {
let hash = self.hash_slow();
self.seal(hash)
}
/// Calculate a heuristic for the in-memory size of the [Header].
#[inline]
pub fn size(&self) -> usize {
mem::size_of::<B256>() + // parent hash
mem::size_of::<B256>() + // ommers hash
mem::size_of::<Address>() + // beneficiary
mem::size_of::<B256>() + // state root
mem::size_of::<B256>() + // transactions root
mem::size_of::<B256>() + // receipts root
mem::size_of::<Option<B256>>() + // withdrawals root
mem::size_of::<Bloom>() + // logs bloom
mem::size_of::<U256>() + // difficulty
mem::size_of::<BlockNumber>() + // number
mem::size_of::<u64>() + // gas limit
mem::size_of::<u64>() + // gas used
mem::size_of::<u64>() + // timestamp
mem::size_of::<B256>() + // mix hash
mem::size_of::<u64>() + // nonce
mem::size_of::<Option<u64>>() + // base fee per gas
mem::size_of::<Option<u64>>() + // blob gas used
mem::size_of::<Option<u64>>() + // excess blob gas
mem::size_of::<Option<B256>>() + // parent beacon block root
self.extra_data.len() // extra data
}
fn header_payload_length(&self) -> usize {
let mut length = 0;
length += self.parent_hash.length(); // Hash of the previous block.
length += self.ommers_hash.length(); // Hash of uncle blocks.
length += self.beneficiary.length(); // Address that receives rewards.
length += self.state_root.length(); // Root hash of the state object.
length += self.transactions_root.length(); // Root hash of transactions in the block.
length += self.receipts_root.length(); // Hash of transaction receipts.
length += self.logs_bloom.length(); // Data structure containing event logs.
length += self.difficulty.length(); // Difficulty value of the block.
length += U256::from(self.number).length(); // Block number.
length += U256::from(self.gas_limit).length(); // Maximum gas allowed.
length += U256::from(self.gas_used).length(); // Actual gas used.
length += self.timestamp.length(); // Block timestamp.
length += self.extra_data.length(); // Additional arbitrary data.
length += self.mix_hash.length(); // Hash used for mining.
length += B64::new(self.nonce.to_be_bytes()).length(); // Nonce for mining.
if let Some(base_fee) = self.base_fee_per_gas {
// Adding base fee length if it exists.
length += U256::from(base_fee).length();
}
if let Some(root) = self.withdrawals_root {
// Adding withdrawals_root length if it exists.
length += root.length();
}
if let Some(blob_gas_used) = self.blob_gas_used {
// Adding blob_gas_used length if it exists.
length += U256::from(blob_gas_used).length();
}
if let Some(excess_blob_gas) = self.excess_blob_gas {
// Adding excess_blob_gas length if it exists.
length += U256::from(excess_blob_gas).length();
}
if let Some(parent_beacon_block_root) = self.parent_beacon_block_root {
length += parent_beacon_block_root.length();
}
if let Some(requests_root) = self.requests_root {
length += requests_root.length();
}
length
}
}
impl Encodable for Header {
fn encode(&self, out: &mut dyn BufMut) {
// Create a header indicating the encoded content is a list with the payload length computed
// from the header's payload calculation function.
let list_header =
alloy_rlp::Header { list: true, payload_length: self.header_payload_length() };
list_header.encode(out);
// Encode each header field sequentially
self.parent_hash.encode(out); // Encode parent hash.
self.ommers_hash.encode(out); // Encode ommer's hash.
self.beneficiary.encode(out); // Encode beneficiary.
self.state_root.encode(out); // Encode state root.
self.transactions_root.encode(out); // Encode transactions root.
self.receipts_root.encode(out); // Encode receipts root.
self.logs_bloom.encode(out); // Encode logs bloom.
self.difficulty.encode(out); // Encode difficulty.
U256::from(self.number).encode(out); // Encode block number.
U256::from(self.gas_limit).encode(out); // Encode gas limit.
U256::from(self.gas_used).encode(out); // Encode gas used.
self.timestamp.encode(out); // Encode timestamp.
self.extra_data.encode(out); // Encode extra data.
self.mix_hash.encode(out); // Encode mix hash.
B64::new(self.nonce.to_be_bytes()).encode(out); // Encode nonce.
// Encode base fee. Put empty list if base fee is missing,
// but withdrawals root is present.
if let Some(ref base_fee) = self.base_fee_per_gas {
U256::from(*base_fee).encode(out);
}
// Encode withdrawals root. Put empty string if withdrawals root is missing,
// but blob gas used is present.
if let Some(ref root) = self.withdrawals_root {
root.encode(out);
}
// Encode blob gas used. Put empty list if blob gas used is missing,
// but excess blob gas is present.
if let Some(ref blob_gas_used) = self.blob_gas_used {
U256::from(*blob_gas_used).encode(out);
}
// Encode excess blob gas. Put empty list if excess blob gas is missing,
// but parent beacon block root is present.
if let Some(ref excess_blob_gas) = self.excess_blob_gas {
U256::from(*excess_blob_gas).encode(out);
}
// Encode parent beacon block root.
if let Some(ref parent_beacon_block_root) = self.parent_beacon_block_root {
parent_beacon_block_root.encode(out);
}
// Encode EIP-7685 requests root
//
// If new fields are added, the above pattern will need to
// be repeated and placeholders added. Otherwise, it's impossible to tell _which_
// fields are missing. This is mainly relevant for contrived cases where a header is
// created at random, for example:
// * A header is created with a withdrawals root, but no base fee. Shanghai blocks are
// post-London, so this is technically not valid. However, a tool like proptest would
// generate a block like this.
if let Some(ref requests_root) = self.requests_root {
requests_root.encode(out);
}
}
fn length(&self) -> usize {
let mut length = 0;
length += self.header_payload_length();
length += length_of_length(length);
length
}
}
impl Decodable for Header {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let rlp_head = alloy_rlp::Header::decode(buf)?;
if !rlp_head.list {
return Err(alloy_rlp::Error::UnexpectedString)
}
let started_len = buf.len();
let mut this = Self {
parent_hash: Decodable::decode(buf)?,
ommers_hash: Decodable::decode(buf)?,
beneficiary: Decodable::decode(buf)?,
state_root: Decodable::decode(buf)?,
transactions_root: Decodable::decode(buf)?,
receipts_root: Decodable::decode(buf)?,
logs_bloom: Decodable::decode(buf)?,
difficulty: Decodable::decode(buf)?,
number: u64::decode(buf)?,
gas_limit: u64::decode(buf)?,
gas_used: u64::decode(buf)?,
timestamp: Decodable::decode(buf)?,
extra_data: Decodable::decode(buf)?,
mix_hash: Decodable::decode(buf)?,
nonce: u64::from_be_bytes(B64::decode(buf)?.0),
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_root: None,
};
if started_len - buf.len() < rlp_head.payload_length {
this.base_fee_per_gas = Some(u64::decode(buf)?);
}
// Withdrawals root for post-shanghai headers
if started_len - buf.len() < rlp_head.payload_length {
this.withdrawals_root = Some(Decodable::decode(buf)?);
}
// Blob gas used and excess blob gas for post-cancun headers
if started_len - buf.len() < rlp_head.payload_length {
this.blob_gas_used = Some(u64::decode(buf)?);
}
if started_len - buf.len() < rlp_head.payload_length {
this.excess_blob_gas = Some(u64::decode(buf)?);
}
// Decode parent beacon block root.
if started_len - buf.len() < rlp_head.payload_length {
this.parent_beacon_block_root = Some(B256::decode(buf)?);
}
// Decode requests root.
//
// If new fields are added, the above pattern will need to
// be repeated and placeholders decoded. Otherwise, it's impossible to tell _which_
// fields are missing. This is mainly relevant for contrived cases where a header is
// created at random, for example:
// * A header is created with a withdrawals root, but no base fee. Shanghai blocks are
// post-London, so this is technically not valid. However, a tool like proptest would
// generate a block like this.
if started_len - buf.len() < rlp_head.payload_length {
this.requests_root = Some(B256::decode(buf)?);
}
let consumed = started_len - buf.len();
if consumed != rlp_head.payload_length {
return Err(alloy_rlp::Error::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
}
Ok(this)
}
}

View File

@@ -0,0 +1,156 @@
use super::Header;
use alloy_eips::BlockNumHash;
use alloy_primitives::{keccak256, BlockHash};
#[cfg(any(test, feature = "test-utils"))]
use alloy_primitives::{BlockNumber, B256, U256};
use alloy_rlp::{Decodable, Encodable};
use bytes::BufMut;
use derive_more::{AsRef, Deref};
#[cfg(any(test, feature = "arbitrary"))]
use proptest::prelude::*;
use reth_codecs::{add_arbitrary_tests, main_codec, Compact};
use std::mem;
/// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want
/// to modify header.
#[main_codec(no_arbitrary)]
#[add_arbitrary_tests(rlp, compact)]
#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)]
pub struct SealedHeader {
/// Locked Header hash.
hash: BlockHash,
/// Locked Header fields.
#[as_ref]
#[deref]
header: Header,
}
impl SealedHeader {
/// Creates the sealed header with the corresponding block hash.
#[inline]
pub const fn new(header: Header, hash: BlockHash) -> Self {
Self { header, hash }
}
/// Returns the sealed Header fields.
#[inline]
pub const fn header(&self) -> &Header {
&self.header
}
/// Returns header/block hash.
#[inline]
pub const fn hash(&self) -> BlockHash {
self.hash
}
/// Extract raw header that can be modified.
pub fn unseal(self) -> Header {
self.header
}
/// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash.
pub fn split(self) -> (Header, BlockHash) {
(self.header, self.hash)
}
/// Return the number hash tuple.
pub fn num_hash(&self) -> BlockNumHash {
BlockNumHash::new(self.number, self.hash)
}
/// Calculates a heuristic for the in-memory size of the [`SealedHeader`].
#[inline]
pub fn size(&self) -> usize {
self.header.size() + mem::size_of::<BlockHash>()
}
}
impl Default for SealedHeader {
fn default() -> Self {
Header::default().seal_slow()
}
}
impl Encodable for SealedHeader {
fn encode(&self, out: &mut dyn BufMut) {
self.header.encode(out);
}
}
impl Decodable for SealedHeader {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let started_len = buf.len();
// decode the header from temp buffer
let header = Header::decode(b)?;
// hash the consumed bytes, the rlp encoded header
let consumed = started_len - b.len();
let hash = keccak256(&buf[..consumed]);
// update original buffer
*buf = *b;
Ok(Self { header, hash })
}
}
#[cfg(any(test, feature = "test-utils"))]
impl SealedHeader {
/// Updates the block header.
pub fn set_header(&mut self, header: Header) {
self.header = header
}
/// Updates the block hash.
pub fn set_hash(&mut self, hash: BlockHash) {
self.hash = hash
}
/// Updates the parent block hash.
pub fn set_parent_hash(&mut self, hash: BlockHash) {
self.header.parent_hash = hash
}
/// Updates the block number.
pub fn set_block_number(&mut self, number: BlockNumber) {
self.header.number = number;
}
/// Updates the block state root.
pub fn set_state_root(&mut self, state_root: B256) {
self.header.state_root = state_root;
}
/// Updates the block difficulty.
pub fn set_difficulty(&mut self, difficulty: U256) {
self.header.difficulty = difficulty;
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl proptest::arbitrary::Arbitrary for SealedHeader {
type Parameters = ();
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
// map valid header strategy by sealing
crate::test_utils::valid_header_strategy().prop_map(|header| header.seal_slow()).boxed()
}
type Strategy = proptest::strategy::BoxedStrategy<Self>;
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> arbitrary::Arbitrary<'a> for SealedHeader {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let sealed_header = crate::test_utils::generate_valid_header(
u.arbitrary()?,
u.arbitrary()?,
u.arbitrary()?,
u.arbitrary()?,
u.arbitrary()?,
)
.seal_slow();
Ok(sealed_header)
}
}

View File

@@ -0,0 +1,66 @@
//! Test utilities to generate random valid headers.
use crate::Header;
use alloy_primitives::B256;
use proptest::{arbitrary::any, prop_compose};
/// Generates a header which is valid __with respect to past and future forks__. This means, for
/// example, that if the withdrawals root is present, the base fee per gas is also present.
///
/// If blob gas used were present, then the excess blob gas and parent beacon block root are also
/// present. In this example, the withdrawals root would also be present.
///
/// This __does not, and should not guarantee__ that the header is valid with respect to __anything
/// else__.
pub const fn generate_valid_header(
mut header: Header,
eip_4844_active: bool,
blob_gas_used: u64,
excess_blob_gas: u64,
parent_beacon_block_root: B256,
) -> Header {
// EIP-1559 logic
if header.base_fee_per_gas.is_none() {
// If EIP-1559 is not active, clear related fields
header.withdrawals_root = None;
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
} else if header.withdrawals_root.is_none() {
// If EIP-4895 is not active, clear related fields
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
} else if eip_4844_active {
// Set fields based on EIP-4844 being active
header.blob_gas_used = Some(blob_gas_used);
header.excess_blob_gas = Some(excess_blob_gas);
header.parent_beacon_block_root = Some(parent_beacon_block_root);
} else {
// If EIP-4844 is not active, clear related fields
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
}
// todo(onbjerg): adjust this for eip-7589
header.requests_root = None;
header
}
prop_compose! {
/// Generates a proptest strategy for constructing an instance of a header which is valid __with
/// respect to past and future forks__.
///
/// See docs for [generate_valid_header] for more information.
pub fn valid_header_strategy()(
header in any::<Header>(),
eip_4844_active in any::<bool>(),
blob_gas_used in any::<u64>(),
excess_blob_gas in any::<u64>(),
parent_beacon_block_root in any::<B256>()
) -> Header {
generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root)
}
}

View File

@@ -10,6 +10,18 @@
#![allow(unknown_lints, non_local_definitions)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#[cfg(feature = "alloy-compat")]
mod alloy_compat;
/// Common constants.
pub mod constants;
/// Minimal account
pub mod account;
pub use account::Account;
/// Common header types
pub mod header;
#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))]
pub use header::test_utils;
pub use header::{Header, HeaderError, SealedHeader};

View File

@@ -18,16 +18,15 @@ reth-codecs.workspace = true
reth-ethereum-forks.workspace = true
reth-network-peers.workspace = true
reth-static-file-types.workspace = true
reth-trie-types.workspace = true
reth-trie-common.workspace = true
revm.workspace = true
revm-primitives = { workspace = true, features = ["serde"] }
# ethereum
alloy-chains = { workspace = true, features = ["serde", "rlp"] }
alloy-consensus = { workspace = true, features = ["arbitrary", "serde"] }
alloy-consensus = { workspace = true, features = ["serde"] }
alloy-primitives = { workspace = true, features = ["rand", "rlp"] }
alloy-rlp = { workspace = true, features = ["arrayvec"] }
alloy-trie = { workspace = true, features = ["serde"] }
alloy-rpc-types = { workspace = true, optional = true }
alloy-genesis.workspace = true
alloy-eips = { workspace = true, features = ["serde"] }
@@ -45,21 +44,16 @@ c-kzg = { workspace = true, features = ["serde"], optional = true }
bytes.workspace = true
byteorder = "1"
derive_more.workspace = true
itertools.workspace = true
modular-bitfield.workspace = true
once_cell.workspace = true
rayon.workspace = true
serde.workspace = true
serde_json.workspace = true
tempfile = { workspace = true, optional = true }
thiserror.workspace = true
thiserror-no-std = { workspace = true , default-features = false }
zstd = { version = "0.13", features = ["experimental"], optional = true }
roaring = "0.10.2"
# `test-utils` feature
hash-db = { version = "~0.15", optional = true }
plain_hasher = { version = "0.2", optional = true }
# arbitrary utils
arbitrary = { workspace = true, features = ["derive"], optional = true }
proptest = { workspace = true, optional = true }
@@ -82,9 +76,6 @@ test-fuzz.workspace = true
toml.workspace = true
triehash = "0.8"
hash-db = "~0.15"
plain_hasher = "0.2"
sucds = "0.8.1"
criterion.workspace = true
@@ -96,7 +87,7 @@ pprof = { workspace = true, features = [
secp256k1.workspace = true
[features]
default = ["c-kzg", "zstd-codec", "alloy-compat"]
default = ["c-kzg", "zstd-codec", "alloy-compat", "std"]
asm-keccak = ["alloy-primitives/asm-keccak"]
arbitrary = [
"reth-primitives-traits/arbitrary",
@@ -105,6 +96,7 @@ arbitrary = [
"nybbles/arbitrary",
"alloy-trie/arbitrary",
"alloy-chains/arbitrary",
"alloy-consensus/arbitrary",
"alloy-eips/arbitrary",
"dep:arbitrary",
"dep:proptest",
@@ -119,14 +111,17 @@ c-kzg = [
"alloy-eips/kzg",
]
zstd-codec = ["dep:zstd"]
clap = ["reth-static-file-types/clap"]
optimism = [
"reth-codecs/optimism",
"reth-ethereum-forks/optimism",
"revm/optimism",
]
alloy-compat = ["alloy-rpc-types"]
test-utils = ["dep:plain_hasher", "dep:hash-db"]
alloy-compat = [
"reth-primitives-traits/alloy-compat",
"alloy-rpc-types",
]
std = ["thiserror-no-std/std"]
test-utils = ["reth-primitives-traits/test-utils"]
[[bench]]
name = "recover_ecdsa_crit"
@@ -137,11 +132,6 @@ name = "validate_blob_tx"
required-features = ["arbitrary", "c-kzg"]
harness = false
[[bench]]
name = "trie_root"
required-features = ["arbitrary", "test-utils"]
harness = false
[[bench]]
name = "integer_list"
harness = false

View File

@@ -89,20 +89,13 @@ criterion_main!(benches);
/// adapted to work with `sucds = "0.8.1"`
#[allow(unused, unreachable_pub)]
mod elias_fano {
use derive_more::Deref;
use std::{fmt, ops::Deref};
use sucds::{mii_sequences::EliasFano, Serializable};
#[derive(Clone, PartialEq, Eq, Default)]
#[derive(Clone, PartialEq, Eq, Default, Deref)]
pub struct IntegerList(pub EliasFano);
impl Deref for IntegerList {
type Target = EliasFano;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for IntegerList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let vec: Vec<usize> = self.0.iter(0).collect();
@@ -239,7 +232,7 @@ mod elias_fano {
}
/// Primitives error type.
#[derive(Debug, thiserror::Error)]
#[derive(Debug, thiserror_no_std::Error)]
pub enum EliasFanoError {
/// The provided input is invalid.
#[error("{0}")]

View File

@@ -1,17 +1,17 @@
use crate::revm_primitives::{Bytecode as RevmBytecode, Bytes};
use byteorder::{BigEndian, ReadBytesExt};
use bytes::Buf;
use derive_more::Deref;
use reth_codecs::Compact;
use revm_primitives::JumpTable;
use serde::{Deserialize, Serialize};
use std::ops::Deref;
pub use reth_primitives_traits::Account;
/// Bytecode for an account.
///
/// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support.
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Deref)]
pub struct Bytecode(pub RevmBytecode);
impl Bytecode {
@@ -23,14 +23,6 @@ impl Bytecode {
}
}
impl Deref for Bytecode {
type Target = RevmBytecode;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Compact for Bytecode {
fn to_compact<B>(self, buf: &mut B) -> usize
where

View File

@@ -1,13 +1,16 @@
//! Common conversions from alloy types.
use crate::{
constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, Header, Signature,
Transaction, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844,
TxLegacy, TxType,
constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, Signature, Transaction,
TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxLegacy,
TxType,
};
use alloy_primitives::TxKind;
use alloy_rlp::Error as RlpError;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
impl TryFrom<alloy_rpc_types::Block> for Block {
type Error = alloy_rpc_types::ConversionError;
@@ -61,54 +64,6 @@ impl TryFrom<alloy_rpc_types::Block> for Block {
}
}
impl TryFrom<alloy_rpc_types::Header> for Header {
type Error = alloy_rpc_types::ConversionError;
fn try_from(header: alloy_rpc_types::Header) -> Result<Self, Self::Error> {
use alloy_rpc_types::ConversionError;
Ok(Self {
base_fee_per_gas: header
.base_fee_per_gas
.map(|base_fee_per_gas| {
base_fee_per_gas.try_into().map_err(ConversionError::BaseFeePerGasConversion)
})
.transpose()?,
beneficiary: header.miner,
blob_gas_used: header
.blob_gas_used
.map(|blob_gas_used| {
blob_gas_used.try_into().map_err(ConversionError::BlobGasUsedConversion)
})
.transpose()?,
difficulty: header.difficulty,
excess_blob_gas: header
.excess_blob_gas
.map(|excess_blob_gas| {
excess_blob_gas.try_into().map_err(ConversionError::ExcessBlobGasConversion)
})
.transpose()?,
extra_data: header.extra_data,
gas_limit: header.gas_limit.try_into().map_err(ConversionError::GasLimitConversion)?,
gas_used: header.gas_used.try_into().map_err(ConversionError::GasUsedConversion)?,
logs_bloom: header.logs_bloom,
mix_hash: header.mix_hash.unwrap_or_default(),
nonce: u64::from_be_bytes(header.nonce.unwrap_or_default().0),
number: header.number.ok_or(ConversionError::MissingBlockNumber)?,
ommers_hash: header.uncles_hash,
parent_beacon_block_root: header.parent_beacon_block_root,
parent_hash: header.parent_hash,
receipts_root: header.receipts_root,
state_root: header.state_root,
timestamp: header.timestamp,
transactions_root: header.transactions_root,
withdrawals_root: header.withdrawals_root,
// TODO: requests_root: header.requests_root,
requests_root: None,
})
}
}
impl TryFrom<alloy_rpc_types::Transaction> for Transaction {
type Error = alloy_rpc_types::ConversionError;

View File

@@ -2,16 +2,20 @@ use crate::{
Address, Bytes, GotExpected, Header, Requests, SealedHeader, TransactionSigned,
TransactionSignedEcRecovered, Withdrawals, B256,
};
use alloy_rlp::{RlpDecodable, RlpEncodable};
#[cfg(any(test, feature = "arbitrary"))]
use proptest::prelude::{any, prop_compose};
use reth_codecs::derive_arbitrary;
use serde::{Deserialize, Serialize};
use std::ops::Deref;
pub use alloy_eips::eip1898::{
BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash,
};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use derive_more::{Deref, DerefMut};
#[cfg(any(test, feature = "arbitrary"))]
use proptest::prelude::prop_compose;
use reth_codecs::derive_arbitrary;
#[cfg(any(test, feature = "arbitrary"))]
pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy};
use serde::{Deserialize, Serialize};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
// HACK(onbjerg): we need this to always set `requests` to `None` since we might otherwise generate
// a block with `None` withdrawals and `Some` requests, in which case we end up trying to decode the
@@ -28,12 +32,13 @@ prop_compose! {
/// Withdrawals can be optionally included at the end of the RLP encoded message.
#[derive_arbitrary(rlp, 25)]
#[derive(
Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable,
Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, RlpEncodable, RlpDecodable,
)]
#[rlp(trailing)]
pub struct Block {
/// Block header.
#[cfg_attr(any(test, feature = "arbitrary"), proptest(strategy = "valid_header_strategy()"))]
#[deref]
pub header: Header,
/// Transactions in this block.
#[cfg_attr(
@@ -175,23 +180,18 @@ impl Block {
pub fn size(&self) -> usize {
self.header.size() +
// take into account capacity
self.body.iter().map(TransactionSigned::size).sum::<usize>() + self.body.capacity() * std::mem::size_of::<TransactionSigned>() +
self.ommers.iter().map(Header::size).sum::<usize>() + self.ommers.capacity() * std::mem::size_of::<Header>() +
self.withdrawals.as_ref().map_or(std::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
}
}
impl Deref for Block {
type Target = Header;
fn deref(&self) -> &Self::Target {
&self.header
self.body.iter().map(TransactionSigned::size).sum::<usize>() + self.body.capacity() * core::mem::size_of::<TransactionSigned>() +
self.ommers.iter().map(Header::size).sum::<usize>() + self.ommers.capacity() * core::mem::size_of::<Header>() +
self.withdrawals.as_ref().map_or(core::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
}
}
/// Sealed block with senders recovered from transactions.
#[derive(Debug, Clone, PartialEq, Eq, Default)]
#[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)]
pub struct BlockWithSenders {
/// Block
#[deref]
#[deref_mut]
pub block: Block,
/// List of senders that match the transactions in the block
pub senders: Vec<Address>,
@@ -253,30 +253,28 @@ impl BlockWithSenders {
}
}
impl Deref for BlockWithSenders {
type Target = Block;
fn deref(&self) -> &Self::Target {
&self.block
}
}
#[cfg(any(test, feature = "test-utils"))]
impl std::ops::DerefMut for BlockWithSenders {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.block
}
}
/// Sealed Ethereum full block.
///
/// Withdrawals can be optionally included at the end of the RLP encoded message.
#[derive_arbitrary(rlp)]
#[derive(
Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable,
Debug,
Clone,
PartialEq,
Eq,
Default,
Serialize,
Deserialize,
Deref,
DerefMut,
RlpEncodable,
RlpDecodable,
)]
#[rlp(trailing)]
pub struct SealedBlock {
/// Locked block header.
#[deref]
#[deref_mut]
pub header: SealedHeader,
/// Transactions with signatures.
#[cfg_attr(
@@ -397,9 +395,9 @@ impl SealedBlock {
pub fn size(&self) -> usize {
self.header.size() +
// take into account capacity
self.body.iter().map(TransactionSigned::size).sum::<usize>() + self.body.capacity() * std::mem::size_of::<TransactionSigned>() +
self.ommers.iter().map(Header::size).sum::<usize>() + self.ommers.capacity() * std::mem::size_of::<Header>() +
self.withdrawals.as_ref().map_or(std::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
self.body.iter().map(TransactionSigned::size).sum::<usize>() + self.body.capacity() * core::mem::size_of::<TransactionSigned>() +
self.ommers.iter().map(Header::size).sum::<usize>() + self.ommers.capacity() * core::mem::size_of::<Header>() +
self.withdrawals.as_ref().map_or(core::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
}
/// Calculates the total gas used by blob transactions in the sealed block.
@@ -450,24 +448,12 @@ impl From<SealedBlock> for Block {
}
}
impl Deref for SealedBlock {
type Target = SealedHeader;
fn deref(&self) -> &Self::Target {
&self.header
}
}
#[cfg(any(test, feature = "test-utils"))]
impl std::ops::DerefMut for SealedBlock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.header
}
}
/// Sealed block with senders recovered from transactions.
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)]
pub struct SealedBlockWithSenders {
/// Sealed block
#[deref]
#[deref_mut]
pub block: SealedBlock,
/// List of senders that match transactions from block.
pub senders: Vec<Address>,
@@ -521,20 +507,6 @@ impl SealedBlockWithSenders {
}
}
impl Deref for SealedBlockWithSenders {
type Target = SealedBlock;
fn deref(&self) -> &Self::Target {
&self.block
}
}
#[cfg(any(test, feature = "test-utils"))]
impl std::ops::DerefMut for SealedBlockWithSenders {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.block
}
}
/// A response to `GetBlockBodies`, containing bodies if any bodies were found.
///
/// Withdrawals can be optionally included at the end of the RLP encoded message.
@@ -604,12 +576,12 @@ impl BlockBody {
#[inline]
pub fn size(&self) -> usize {
self.transactions.iter().map(TransactionSigned::size).sum::<usize>() +
self.transactions.capacity() * std::mem::size_of::<TransactionSigned>() +
self.transactions.capacity() * core::mem::size_of::<TransactionSigned>() +
self.ommers.iter().map(Header::size).sum::<usize>() +
self.ommers.capacity() * std::mem::size_of::<Header>() +
self.ommers.capacity() * core::mem::size_of::<Header>() +
self.withdrawals
.as_ref()
.map_or(std::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
.map_or(core::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
}
}
@@ -624,69 +596,6 @@ impl From<Block> for BlockBody {
}
}
/// Generates a header which is valid __with respect to past and future forks__. This means, for
/// example, that if the withdrawals root is present, the base fee per gas is also present.
///
/// If blob gas used were present, then the excess blob gas and parent beacon block root are also
/// present. In this example, the withdrawals root would also be present.
///
/// This __does not, and should not guarantee__ that the header is valid with respect to __anything
/// else__.
#[cfg(any(test, feature = "arbitrary"))]
pub fn generate_valid_header(
mut header: Header,
eip_4844_active: bool,
blob_gas_used: u64,
excess_blob_gas: u64,
parent_beacon_block_root: B256,
) -> Header {
// EIP-1559 logic
if header.base_fee_per_gas.is_none() {
// If EIP-1559 is not active, clear related fields
header.withdrawals_root = None;
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
} else if header.withdrawals_root.is_none() {
// If EIP-4895 is not active, clear related fields
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
} else if eip_4844_active {
// Set fields based on EIP-4844 being active
header.blob_gas_used = Some(blob_gas_used);
header.excess_blob_gas = Some(excess_blob_gas);
header.parent_beacon_block_root = Some(parent_beacon_block_root);
} else {
// If EIP-4844 is not active, clear related fields
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
}
// todo(onbjerg): adjust this for eip-7589
header.requests_root = None;
header
}
#[cfg(any(test, feature = "arbitrary"))]
prop_compose! {
/// Generates a proptest strategy for constructing an instance of a header which is valid __with
/// respect to past and future forks__.
///
/// See docs for [generate_valid_header] for more information.
pub fn valid_header_strategy()(
header in any::<Header>(),
eip_4844_active in any::<bool>(),
blob_gas_used in any::<u64>(),
excess_blob_gas in any::<u64>(),
parent_beacon_block_root in any::<B256>()
) -> Header {
generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root)
}
}
#[cfg(test)]
mod tests {
use super::{BlockNumberOrTag::*, *};

View File

@@ -5,19 +5,30 @@ use crate::{
},
holesky_nodes,
net::{goerli_nodes, mainnet_nodes, sepolia_nodes},
proofs::state_root_ref_unhashed,
revm_primitives::{address, b256},
Address, BlockNumber, Chain, ChainKind, ForkFilter, ForkFilterKey, ForkHash, ForkId, Genesis,
Hardfork, Head, Header, NamedChain, NodeRecord, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH,
MAINNET_DEPOSIT_CONTRACT, U256,
};
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::{
collections::BTreeMap,
use core::{
fmt,
fmt::{Display, Formatter},
sync::Arc,
};
use derive_more::From;
use once_cell::sync::Lazy;
use reth_trie_common::root::state_root_ref_unhashed;
use serde::{Deserialize, Serialize};
#[cfg(not(feature = "std"))]
use alloc::{
collections::BTreeMap,
format,
string::{String, ToString},
sync::Arc,
vec::Vec,
};
#[cfg(feature = "std")]
use std::{collections::BTreeMap, sync::Arc};
pub use alloy_eips::eip1559::BaseFeeParams;
@@ -484,15 +495,9 @@ impl From<ForkBaseFeeParams> for BaseFeeParamsKind {
/// A type alias to a vector of tuples of [Hardfork] and [`BaseFeeParams`], sorted by [Hardfork]
/// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, From)]
pub struct ForkBaseFeeParams(Vec<(Hardfork, BaseFeeParams)>);
impl From<Vec<(Hardfork, BaseFeeParams)>> for ForkBaseFeeParams {
fn from(params: Vec<(Hardfork, BaseFeeParams)>) -> Self {
Self(params)
}
}
/// An Ethereum chain specification.
///
/// A chain specification describes:
@@ -847,6 +852,12 @@ impl ChainSpec {
self.fork(Hardfork::Homestead).active_at_block(block_number)
}
/// The Paris hardfork (merge) is activated via ttd, if we know the block is known then this
/// returns true if the block number is greater than or equal to the Paris (merge) block.
pub fn is_paris_active_at_block(&self, block_number: u64) -> Option<bool> {
self.paris_block_and_final_difficulty.map(|(paris_block, _)| block_number >= paris_block)
}
/// Convenience method to check if [`Hardfork::Bedrock`] is active at a given block number.
#[cfg(feature = "optimism")]
#[inline]
@@ -1476,7 +1487,7 @@ struct DisplayFork {
}
impl Display for DisplayFork {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let name_with_eip = if let Some(eip) = &self.eip {
format!("{} ({})", self.name, eip)
} else {
@@ -1550,13 +1561,13 @@ pub struct DisplayHardforks {
}
impl Display for DisplayHardforks {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
fn format(
header: &str,
forks: &[DisplayFork],
next_is_empty: bool,
f: &mut Formatter<'_>,
) -> std::fmt::Result {
) -> fmt::Result {
writeln!(f, "{header}:")?;
let mut iter = forks.iter().peekable();
while let Some(fork) = iter.next() {
@@ -1726,8 +1737,10 @@ impl OptimismGenesisInfo {
#[cfg(test)]
mod tests {
use reth_trie_common::TrieAccount;
use super::*;
use crate::{b256, hex, proofs::IntoTrieAccount, ChainConfig, GenesisAccount};
use crate::{b256, hex, ChainConfig, GenesisAccount};
use std::{collections::HashMap, str::FromStr};
fn test_fork_ids(spec: &ChainSpec, cases: &[(Head, ForkId)]) {
for (block, expected_id) in cases {
@@ -2829,10 +2842,7 @@ Post-merge hard forks (timestamp based):
for (key, expected_rlp) in key_rlp {
let account = chainspec.genesis.alloc.get(&key).expect("account should exist");
assert_eq!(
&alloy_rlp::encode(IntoTrieAccount::to_trie_account(account.clone())),
expected_rlp
);
assert_eq!(&alloy_rlp::encode(TrieAccount::from(account.clone())), expected_rlp);
}
assert_eq!(chainspec.genesis_hash, None);

View File

@@ -1,6 +1,9 @@
use std::{cell::RefCell, thread_local};
use zstd::bulk::{Compressor, Decompressor};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// Compression/Decompression dictionary for `Receipt`.
pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("./receipt_dictionary.bin");
/// Compression/Decompression dictionary for `Transaction`.

View File

@@ -38,7 +38,7 @@ mod trusted_setup {
}
/// Error type for loading the trusted setup.
#[derive(Debug, thiserror::Error)]
#[derive(Debug, thiserror_no_std::Error)]
pub enum LoadKzgSettingsError {
/// Failed to create temp file to store bytes for loading [`KzgSettings`] via
/// [`KzgSettings::load_trusted_setup_file`].

View File

@@ -0,0 +1,8 @@
/// Represents one Kilogas, or `1_000` gas.
pub const KILOGAS: u64 = 1_000;
/// Represents one Megagas, or `1_000_000` gas.
pub const MEGAGAS: u64 = KILOGAS * 1_000;
/// Represents one Gigagas, or `1_000_000_000` gas.
pub const GIGAGAS: u64 = MEGAGAS * 1_000;

View File

@@ -3,113 +3,26 @@
use crate::{
chain::DepositContract,
revm_primitives::{address, b256},
B256, U256,
};
use std::time::Duration;
pub use reth_primitives_traits::constants::*;
#[cfg(feature = "optimism")]
use crate::chain::BaseFeeParams;
/// Gas units, for example [`GIGAGAS`](gas_units::GIGAGAS).
pub mod gas_units;
/// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants.
pub mod eip4844;
/// The client version: `reth/v{major}.{minor}.{patch}`
pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION"));
/// The first four bytes of the call data for a function call specifies the function to be called.
pub const SELECTOR_LEN: usize = 4;
/// Maximum extra data size in a block after genesis
pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32;
/// An EPOCH is a series of 32 slots.
pub const EPOCH_SLOTS: u64 = 32;
/// The duration of a slot in seconds.
///
/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a
/// block.
pub const SLOT_DURATION: Duration = Duration::from_secs(12);
/// An EPOCH is a series of 32 slots (~6.4min).
pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS);
/// The default block nonce in the beacon consensus
pub const BEACON_NONCE: u64 = 0u64;
/// The default Ethereum block gas limit.
// TODO: This should be a chain spec parameter.
/// See <https://github.com/paradigmxyz/reth/issues/3233>.
pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000;
/// The minimum tx fee below which the txpool will reject the transaction.
///
/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559
/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` <https://eips.ethereum.org/EIPS/eip-1559>
/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because
/// 12.5% of 7 is less than 1.
///
/// Note that min base fee under different 1559 parameterizations may differ, but there's no
/// significant harm in leaving this setting as is.
pub const MIN_PROTOCOL_BASE_FEE: u64 = 7;
/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256.
pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]);
/// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)
pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000;
/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)
pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8;
/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)
pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2;
/// Minimum gas limit allowed for transactions.
pub const MINIMUM_GAS_LIMIT: u64 = 5000;
/// Deposit contract address
/// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa`
pub const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new(
address!("00000000219ab540356cbb839cbe05303d7705fa"),
11052984,
b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
);
/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
#[cfg(feature = "optimism")]
pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50;
/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon
/// hardfork.
#[cfg(feature = "optimism")]
pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250;
/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
#[cfg(feature = "optimism")]
pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6;
/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
#[cfg(feature = "optimism")]
pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50;
/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon
/// hardfork.
#[cfg(feature = "optimism")]
pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250;
/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
#[cfg(feature = "optimism")]
pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6;
/// Base fee max change denominator for Base Sepolia as defined in the Optimism
/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc.
#[cfg(feature = "optimism")]
pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10;
/// Get the base fee parameters for Base Sepolia.
#[cfg(feature = "optimism")]
pub const BASE_SEPOLIA_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
@@ -152,78 +65,6 @@ pub const OP_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams {
elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER,
};
/// Multiplier for converting gwei to wei.
pub const GWEI_TO_WEI: u64 = 1_000_000_000;
/// Multiplier for converting finney (milliether) to wei.
pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000;
/// Multiplier for converting ether to wei.
pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000;
/// Multiplier for converting mgas to gas.
pub const MGAS_TO_GAS: u64 = 1_000_000u64;
/// The Ethereum mainnet genesis hash.
pub const MAINNET_GENESIS_HASH: B256 =
b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3");
/// Goerli genesis hash.
pub const GOERLI_GENESIS_HASH: B256 =
b256!("bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a");
/// Sepolia genesis hash.
pub const SEPOLIA_GENESIS_HASH: B256 =
b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9");
/// Holesky genesis hash.
pub const HOLESKY_GENESIS_HASH: B256 =
b256!("ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d");
/// Testnet genesis hash.
pub const DEV_GENESIS_HASH: B256 =
b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c");
/// Keccak256 over empty array.
pub const KECCAK_EMPTY: B256 =
b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
/// Ommer root of empty list.
pub const EMPTY_OMMER_ROOT_HASH: B256 =
b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347");
/// Root hash of an empty trie.
pub const EMPTY_ROOT_HASH: B256 =
b256!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421");
/// Transactions root of empty receipts set.
pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH;
/// Transactions root of empty transactions set.
pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH;
/// Withdrawals root of empty withdrawals set.
pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH;
/// The number of blocks to unwind during a reorg that already became a part of canonical chain.
///
/// In reality, the node can end up in this particular situation very rarely. It would happen only
/// if the node process is abruptly terminated during ongoing reorg and doesn't boot back up for
/// long period of time.
///
/// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in
/// the database.
pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3;
/// Max seconds from current time allowed for blocks, before they're considered future blocks.
///
/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the
/// future.
///
/// See:
/// <https://github.com/ethereum/go-ethereum/blob/a196f3e8a22b6ad22ced5c2e3baf32bc3ebd4ec9/consensus/ethash/consensus.go#L227-L229>
pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15;
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -1,8 +1,11 @@
use std::{
use core::{
fmt,
ops::{Deref, DerefMut},
};
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
/// A pair of values, one of which is expected and one of which is actual.
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct GotExpected<T> {
@@ -18,6 +21,7 @@ impl<T: fmt::Display> fmt::Display for GotExpected<T> {
}
}
#[cfg(feature = "std")]
impl<T: fmt::Debug + fmt::Display> std::error::Error for GotExpected<T> {}
impl<T> From<(T, T)> for GotExpected<T> {
@@ -55,6 +59,7 @@ impl<T: fmt::Display> fmt::Display for GotExpectedBoxed<T> {
}
}
#[cfg(feature = "std")]
impl<T: fmt::Debug + fmt::Display> std::error::Error for GotExpectedBoxed<T> {}
impl<T> Deref for GotExpectedBoxed<T> {

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,21 @@
use bytes::BufMut;
use core::fmt;
use derive_more::Deref;
use roaring::RoaringTreemap;
use serde::{
de::{SeqAccess, Unexpected, Visitor},
ser::SerializeSeq,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::{fmt, ops::Deref};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// Uses Roaring Bitmaps to hold a list of integers. It provides really good compression with the
/// capability to access its elements without decoding it.
#[derive(Clone, PartialEq, Default)]
#[derive(Clone, PartialEq, Default, Deref)]
pub struct IntegerList(pub RoaringTreemap);
impl Deref for IntegerList {
type Target = RoaringTreemap;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for IntegerList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let vec: Vec<u64> = self.0.iter().collect();
@@ -105,7 +101,7 @@ struct IntegerListVisitor;
impl<'de> Visitor<'de> for IntegerListVisitor {
type Value = IntegerList;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a usize array")
}
@@ -144,7 +140,7 @@ impl<'a> Arbitrary<'a> for IntegerList {
}
/// Primitives error type.
#[derive(Debug, thiserror::Error)]
#[derive(Debug, thiserror_no_std::Error)]
pub enum RoaringBitmapError {
/// The provided input is invalid.
#[error("the provided input is invalid")]

View File

@@ -17,6 +17,10 @@
// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged
#![allow(unknown_lints, non_local_definitions)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate alloc;
mod account;
#[cfg(feature = "alloy-compat")]
@@ -30,7 +34,7 @@ pub mod constants;
pub mod eip4844;
mod error;
pub mod genesis;
mod header;
pub mod header;
mod integer_list;
mod log;
mod net;
@@ -63,7 +67,7 @@ pub use constants::{
};
pub use error::{GotExpected, GotExpectedBoxed};
pub use genesis::{ChainConfig, Genesis, GenesisAccount};
pub use header::{Header, HeaderValidationError, HeadersDirection, SealedHeader};
pub use header::{Header, HeadersDirection, SealedHeader};
pub use integer_list::IntegerList;
pub use log::{logs_bloom, Log};
pub use net::{

View File

@@ -1,5 +1,8 @@
pub use reth_network_peers::{NodeRecord, NodeRecordParseError, TrustedPeer};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
// Ethereum bootnodes come from <https://github.com/ledgerwatch/erigon/blob/devel/params/bootnodes.go>
// OP bootnodes come from <https://github.com/ethereum-optimism/op-geth/blob/optimism/params/bootnodes.go>

View File

@@ -1,58 +1,15 @@
//! Helper function for calculating Merkle proofs and hashes.
use crate::{
constants::EMPTY_OMMER_ROOT_HASH, keccak256, Address, Header, Receipt, ReceiptWithBloom,
ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256, U256,
constants::EMPTY_OMMER_ROOT_HASH, keccak256, Header, Receipt, ReceiptWithBloom,
ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256,
};
use reth_trie_types::{hash_builder::HashBuilder, Nibbles};
mod types;
pub use types::{AccountProof, StorageProof};
mod traits;
pub use traits::IntoTrieAccount;
use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder};
use alloy_eips::eip7685::Encodable7685;
use alloy_rlp::Encodable;
use itertools::Itertools;
/// Adjust the index of an item for rlp encoding.
pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize {
if i > 0x7f {
i
} else if i == 0x7f || i + 1 == len {
0
} else {
i + 1
}
}
/// Compute a trie root of the collection of rlp encodable items.
pub fn ordered_trie_root<T: Encodable>(items: &[T]) -> B256 {
ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf))
}
/// Compute a trie root of the collection of items with a custom encoder.
pub fn ordered_trie_root_with_encoder<T, F>(items: &[T], mut encode: F) -> B256
where
F: FnMut(&T, &mut Vec<u8>),
{
let mut value_buffer = Vec::new();
let mut hb = HashBuilder::default();
let items_len = items.len();
for i in 0..items_len {
let index = adjust_index_for_rlp(i, items_len);
let index_buffer = alloy_rlp::encode_fixed_size(&index);
value_buffer.clear();
encode(&items[index], &mut value_buffer);
hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer);
}
hb.root()
}
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// Calculate a transaction root.
///
@@ -175,109 +132,16 @@ pub fn calculate_ommers_root(ommers: &[Header]) -> B256 {
keccak256(ommers_rlp)
}
/// Hashes and sorts account keys, then proceeds to calculating the root hash of the state
/// represented as MPT.
/// See [`state_root_unsorted`] for more info.
pub fn state_root_ref_unhashed<'a, A: IntoTrieAccount + Clone + 'a>(
state: impl IntoIterator<Item = (&'a Address, &'a A)>,
) -> B256 {
state_root_unsorted(
state.into_iter().map(|(address, account)| (keccak256(address), account.clone())),
)
}
/// Hashes and sorts account keys, then proceeds to calculating the root hash of the state
/// represented as MPT.
/// See [`state_root_unsorted`] for more info.
pub fn state_root_unhashed<A: IntoTrieAccount>(
state: impl IntoIterator<Item = (Address, A)>,
) -> B256 {
state_root_unsorted(state.into_iter().map(|(address, account)| (keccak256(address), account)))
}
/// Sorts the hashed account keys and calculates the root hash of the state represented as MPT.
/// See [`state_root`] for more info.
pub fn state_root_unsorted<A: IntoTrieAccount>(state: impl IntoIterator<Item = (B256, A)>) -> B256 {
state_root(state.into_iter().sorted_by_key(|(key, _)| *key))
}
/// Calculates the root hash of the state represented as MPT.
/// Corresponds to [geth's `deriveHash`](https://github.com/ethereum/go-ethereum/blob/6c149fd4ad063f7c24d726a73bc0546badd1bc73/core/genesis.go#L119).
///
/// # Panics
///
/// If the items are not in sorted order.
pub fn state_root<A: IntoTrieAccount>(state: impl IntoIterator<Item = (B256, A)>) -> B256 {
let mut hb = HashBuilder::default();
let mut account_rlp_buf = Vec::new();
for (hashed_key, account) in state {
account_rlp_buf.clear();
account.to_trie_account().encode(&mut account_rlp_buf);
hb.add_leaf(Nibbles::unpack(hashed_key), &account_rlp_buf);
}
hb.root()
}
/// Hashes storage keys, sorts them and them calculates the root hash of the storage trie.
/// See [`storage_root_unsorted`] for more info.
pub fn storage_root_unhashed(storage: impl IntoIterator<Item = (B256, U256)>) -> B256 {
storage_root_unsorted(storage.into_iter().map(|(slot, value)| (keccak256(slot), value)))
}
/// Sorts and calculates the root hash of account storage trie.
/// See [`storage_root`] for more info.
pub fn storage_root_unsorted(storage: impl IntoIterator<Item = (B256, U256)>) -> B256 {
storage_root(storage.into_iter().sorted_by_key(|(key, _)| *key))
}
/// Calculates the root hash of account storage trie.
///
/// # Panics
///
/// If the items are not in sorted order.
pub fn storage_root(storage: impl IntoIterator<Item = (B256, U256)>) -> B256 {
let mut hb = HashBuilder::default();
for (hashed_slot, value) in storage {
hb.add_leaf(Nibbles::unpack(hashed_slot), alloy_rlp::encode_fixed_size(&value).as_ref());
}
hb.root()
}
/// Implementation of hasher using our keccak256 hashing function
/// for compatibility with `triehash` crate.
#[cfg(any(test, feature = "test-utils"))]
pub mod triehash {
use super::{keccak256, B256};
use hash_db::Hasher;
use plain_hasher::PlainHasher;
/// A [Hasher] that calculates a keccak256 hash of the given data.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub struct KeccakHasher;
#[cfg(any(test, feature = "test-utils"))]
impl Hasher for KeccakHasher {
type Out = B256;
type StdHasher = PlainHasher;
const LENGTH: usize = 32;
fn hash(x: &[u8]) -> Self::Out {
keccak256(x)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
bloom, constants::EMPTY_ROOT_HASH, hex_literal::hex, Block, GenesisAccount, Log, TxType,
GOERLI, HOLESKY, MAINNET, SEPOLIA,
GOERLI, HOLESKY, MAINNET, SEPOLIA, U256,
};
use alloy_primitives::{b256, LogData};
use alloy_primitives::{b256, Address, LogData};
use alloy_rlp::Decodable;
use reth_trie_common::root::{state_root_ref_unhashed, state_root_unhashed};
use std::collections::HashMap;
#[test]

View File

@@ -1,59 +0,0 @@
use crate::Account;
use alloy_consensus::constants::{EMPTY_ROOT_HASH, KECCAK_EMPTY};
use alloy_genesis::GenesisAccount;
use alloy_primitives::{keccak256, B256, U256};
use reth_trie_types::TrieAccount;
use revm_primitives::AccountInfo;
/// Converts a type into a [`TrieAccount`].
pub trait IntoTrieAccount {
/// Converts to this type into a [`TrieAccount`].
fn to_trie_account(self) -> TrieAccount;
}
impl IntoTrieAccount for GenesisAccount {
fn to_trie_account(self) -> TrieAccount {
let storage_root = self
.storage
.map(|storage| {
super::storage_root_unhashed(
storage
.into_iter()
.filter(|(_, value)| *value != B256::ZERO)
.map(|(slot, value)| (slot, U256::from_be_bytes(*value))),
)
})
.unwrap_or(EMPTY_ROOT_HASH);
TrieAccount {
nonce: self.nonce.unwrap_or_default(),
balance: self.balance,
storage_root,
code_hash: self.code.map_or(KECCAK_EMPTY, keccak256),
}
}
}
impl IntoTrieAccount for (Account, B256) {
fn to_trie_account(self) -> TrieAccount {
let (account, storage_root) = self;
TrieAccount {
nonce: account.nonce,
balance: account.balance,
storage_root,
code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY),
}
}
}
impl IntoTrieAccount for (AccountInfo, B256) {
fn to_trie_account(self) -> TrieAccount {
let (account, storage_root) = self;
TrieAccount {
nonce: account.nonce,
balance: account.balance,
storage_root,
code_hash: account.code_hash,
}
}
}

View File

@@ -4,15 +4,16 @@ use crate::{logs_bloom, Bloom, Bytes, TxType, B256};
use alloy_primitives::Log;
use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::{Buf, BufMut};
use core::{cmp::Ordering, ops::Deref};
use derive_more::{Deref, DerefMut, From, IntoIterator};
#[cfg(any(test, feature = "arbitrary"))]
use proptest::strategy::Strategy;
#[cfg(feature = "zstd-codec")]
use reth_codecs::CompactZstd;
use reth_codecs::{add_arbitrary_tests, main_codec, Compact};
use std::{
cmp::Ordering,
ops::{Deref, DerefMut},
};
#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
/// Receipt containing result of transaction execution.
#[cfg_attr(feature = "zstd-codec", main_codec(no_arbitrary, zstd))]
@@ -65,7 +66,7 @@ impl Receipt {
}
/// A collection of receipts organized as a two-dimensional vector.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
#[derive(Clone, Debug, PartialEq, Eq, Default, From, Deref, DerefMut, IntoIterator)]
pub struct Receipts {
/// A two-dimensional vector of optional `Receipt` instances.
pub receipt_vec: Vec<Vec<Option<Receipt>>>,
@@ -110,41 +111,12 @@ impl Receipts {
}
}
impl From<Vec<Vec<Option<Receipt>>>> for Receipts {
fn from(receipt_vec: Vec<Vec<Option<Receipt>>>) -> Self {
Self { receipt_vec }
}
}
impl From<Vec<Receipt>> for Receipts {
fn from(block_receipts: Vec<Receipt>) -> Self {
Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] }
}
}
impl Deref for Receipts {
type Target = Vec<Vec<Option<Receipt>>>;
fn deref(&self) -> &Self::Target {
&self.receipt_vec
}
}
impl DerefMut for Receipts {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.receipt_vec
}
}
impl IntoIterator for Receipts {
type Item = Vec<Option<Receipt>>;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.receipt_vec.into_iter()
}
}
impl FromIterator<Vec<Option<Receipt>>> for Receipts {
fn from_iter<I: IntoIterator<Item = Vec<Option<Receipt>>>>(iter: I) -> Self {
iter.into_iter().collect::<Vec<_>>().into()

View File

@@ -3,29 +3,18 @@
use crate::Request;
use alloy_eips::eip7685::{Decodable7685, Encodable7685};
use alloy_rlp::{Decodable, Encodable};
use derive_more::{Deref, DerefMut, From, IntoIterator};
use reth_codecs::{main_codec, Compact};
use revm_primitives::Bytes;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// A list of EIP-7685 requests.
#[main_codec]
#[derive(Debug, Clone, PartialEq, Eq, Default, Hash)]
#[derive(Debug, Clone, PartialEq, Eq, Default, Hash, Deref, DerefMut, From, IntoIterator)]
pub struct Requests(pub Vec<Request>);
impl From<Vec<Request>> for Requests {
fn from(requests: Vec<Request>) -> Self {
Self(requests)
}
}
impl IntoIterator for Requests {
type Item = Request;
type IntoIter = std::vec::IntoIter<Request>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl Encodable for Requests {
fn encode(&self, out: &mut dyn bytes::BufMut) {
let mut h = alloy_rlp::Header { list: true, payload_length: 0 };

View File

@@ -1,6 +1,9 @@
use crate::{revm_primitives::AccountInfo, Account, Address, TxKind, KECCAK_EMPTY, U256};
use revm::{interpreter::gas::validate_initial_tx_gas, primitives::SpecId};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// Converts a Revm [`AccountInfo`] into a Reth [`Account`].
///
/// Sets `bytecode_hash` to `None` if `code_hash` is [`KECCAK_EMPTY`].

View File

@@ -9,6 +9,9 @@ use alloy_eips::{eip4788::BEACON_ROOTS_ADDRESS, eip7002::WITHDRAWAL_REQUEST_PRED
#[cfg(feature = "optimism")]
use revm_primitives::OptimismFields;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// Fill block environment from Block.
pub fn fill_block_env(
block_env: &mut BlockEnv,
@@ -73,7 +76,7 @@ pub fn block_coinbase(chain_spec: &ChainSpec, header: &Header, after_merge: bool
}
/// Error type for recovering Clique signer from a header.
#[derive(Debug, thiserror::Error)]
#[derive(Debug, thiserror_no_std::Error)]
pub enum CliqueSignerRecoveryError {
/// Header extradata is too short.
#[error("Invalid extra data length")]

View File

@@ -2,8 +2,8 @@ use super::access_list::AccessList;
use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256};
use alloy_rlp::{length_of_length, Decodable, Encodable, Header};
use bytes::BytesMut;
use core::mem;
use reth_codecs::{main_codec, Compact};
use std::mem;
/// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)).
#[main_codec]

View File

@@ -2,8 +2,8 @@ use super::access_list::AccessList;
use crate::{keccak256, Bytes, ChainId, Signature, TxKind, TxType, B256, U256};
use alloy_rlp::{length_of_length, Decodable, Encodable, Header};
use bytes::BytesMut;
use core::mem;
use reth_codecs::{main_codec, Compact};
use std::mem;
/// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)).
#[main_codec]

Some files were not shown because too many files have changed in this diff Show More