mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-01-09 23:38:10 -05:00
Merge remote-tracking branch 'origin/devnet-0' into alexey/block-hash-mismatch
This commit is contained in:
5
Cargo.lock
generated
5
Cargo.lock
generated
@@ -623,9 +623,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "alloy-trie"
|
||||
version = "0.3.1"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "beb28aa4ecd32fdfa1b1bdd111ff7357dd562c6b2372694cf9e613434fcba659"
|
||||
checksum = "d55bd16fdb7ff4bd74cc4c878eeac7e8a27c0d7ba9df4ab58d9310aaafb62d43"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
@@ -637,7 +637,6 @@ dependencies = [
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
"serde",
|
||||
"smallvec",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
|
||||
@@ -298,7 +298,7 @@ alloy-primitives = "0.7.2"
|
||||
alloy-dyn-abi = "0.7.2"
|
||||
alloy-sol-types = "0.7.2"
|
||||
alloy-rlp = "0.3.4"
|
||||
alloy-trie = "0.3.1"
|
||||
alloy-trie = "0.4.0"
|
||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "07611cf" }
|
||||
alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "07611cf" }
|
||||
alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "07611cf" }
|
||||
|
||||
@@ -13,8 +13,6 @@ workspace = true
|
||||
[dependencies]
|
||||
# reth
|
||||
reth-primitives.workspace = true
|
||||
reth-interfaces.workspace = true
|
||||
reth-provider.workspace = true
|
||||
reth-consensus.workspace=true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//! Collection of methods for block validation.
|
||||
|
||||
use reth_consensus::ConsensusError;
|
||||
use reth_interfaces::RethResult;
|
||||
use reth_primitives::{
|
||||
constants::{
|
||||
eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK},
|
||||
@@ -9,7 +8,6 @@ use reth_primitives::{
|
||||
},
|
||||
ChainSpec, GotExpected, Hardfork, Header, SealedBlock, SealedHeader,
|
||||
};
|
||||
use reth_provider::{HeaderProvider, WithdrawalsProvider};
|
||||
|
||||
/// Validate header standalone
|
||||
pub fn validate_header_standalone(
|
||||
@@ -131,33 +129,6 @@ pub fn validate_block_standalone(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate block with regard to chain (parent)
|
||||
///
|
||||
/// Checks:
|
||||
/// If we already know the block.
|
||||
/// If parent is known
|
||||
///
|
||||
/// Returns parent block header
|
||||
pub fn validate_block_regarding_chain<PROV: HeaderProvider + WithdrawalsProvider>(
|
||||
block: &SealedBlock,
|
||||
provider: &PROV,
|
||||
) -> RethResult<SealedHeader> {
|
||||
let hash = block.header.hash();
|
||||
|
||||
// Check if block is known.
|
||||
if provider.is_known(&hash)? {
|
||||
return Err(ConsensusError::BlockKnown { hash, number: block.header.number }.into())
|
||||
}
|
||||
|
||||
// Check if parent is known.
|
||||
let parent = provider
|
||||
.header(&block.parent_hash)?
|
||||
.ok_or(ConsensusError::ParentUnknown { hash: block.parent_hash })?;
|
||||
|
||||
// Return parent header.
|
||||
Ok(parent.seal(block.parent_hash))
|
||||
}
|
||||
|
||||
/// Validates that the EIP-4844 header fields exist and conform to the spec. This ensures that:
|
||||
///
|
||||
/// * `blob_gas_used` exists as a header field
|
||||
@@ -225,7 +196,7 @@ mod tests {
|
||||
BlockNumber, Bytes, ChainSpecBuilder, Signature, Transaction, TransactionSigned, TxEip4844,
|
||||
Withdrawal, Withdrawals, U256,
|
||||
};
|
||||
use reth_provider::AccountReader;
|
||||
use reth_provider::{AccountReader, HeaderProvider, WithdrawalsProvider};
|
||||
use std::ops::RangeBounds;
|
||||
|
||||
mock! {
|
||||
@@ -429,22 +400,14 @@ mod tests {
|
||||
assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(()));
|
||||
let block = create_block_with_withdrawals(&[5, 6, 7, 8, 9]);
|
||||
assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(()));
|
||||
|
||||
let (_, parent) = mock_block();
|
||||
let provider = Provider::new(Some(parent.clone()));
|
||||
let block = create_block_with_withdrawals(&[0, 1, 2]);
|
||||
let res = validate_block_regarding_chain(&block, &provider);
|
||||
assert!(res.is_ok());
|
||||
|
||||
// Withdrawal index should be the last withdrawal index + 1
|
||||
let mut provider = Provider::new(Some(parent));
|
||||
let block = create_block_with_withdrawals(&[3, 4, 5]);
|
||||
provider
|
||||
.withdrawals_provider
|
||||
.expect_latest_withdrawal()
|
||||
.return_const(Ok(Some(Withdrawal { index: 2, ..Default::default() })));
|
||||
let res = validate_block_regarding_chain(&block, &provider);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -163,10 +163,6 @@ pub fn validate_withdrawals_presence(
|
||||
return Err(message_validation_kind
|
||||
.to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1))
|
||||
}
|
||||
if is_shanghai_active {
|
||||
return Err(message_validation_kind
|
||||
.to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai))
|
||||
}
|
||||
}
|
||||
EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => {
|
||||
if is_shanghai_active && !has_withdrawals {
|
||||
|
||||
@@ -475,6 +475,7 @@ where
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_eips::{
|
||||
eip2935::HISTORY_STORAGE_ADDRESS,
|
||||
eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS},
|
||||
eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE},
|
||||
};
|
||||
@@ -484,7 +485,8 @@ mod tests {
|
||||
ForkCondition, Transaction, TxKind, TxLegacy, B256,
|
||||
};
|
||||
use reth_revm::{
|
||||
database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState,
|
||||
database::StateProviderDatabase, state_change::HISTORY_SERVE_WINDOW,
|
||||
test_utils::StateProviderTest, TransitionState,
|
||||
};
|
||||
use revm_primitives::{b256, fixed_bytes, Bytes};
|
||||
use secp256k1::{Keypair, Secp256k1};
|
||||
@@ -736,9 +738,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let mut header = chain_spec.genesis_header();
|
||||
|
||||
let provider = executor_provider(chain_spec);
|
||||
|
||||
let mut executor =
|
||||
provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none());
|
||||
|
||||
@@ -954,4 +954,305 @@ mod tests {
|
||||
assert_eq!(withdrawal_request.validator_public_key, validator_public_key);
|
||||
assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into()));
|
||||
}
|
||||
|
||||
fn create_state_provider_with_block_hashes(latest_block: u64) -> StateProviderTest {
|
||||
let mut db = StateProviderTest::default();
|
||||
for block_number in 0..=latest_block {
|
||||
db.insert_block_hash(block_number, keccak256(block_number.to_string()));
|
||||
}
|
||||
db
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eip_2935_pre_fork() {
|
||||
let db = create_state_provider_with_block_hashes(1);
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::from(&*MAINNET)
|
||||
.shanghai_activated()
|
||||
.with_fork(Hardfork::Prague, ForkCondition::Never)
|
||||
.build(),
|
||||
);
|
||||
|
||||
let provider = executor_provider(chain_spec);
|
||||
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||
|
||||
// construct the header for block one
|
||||
let header = Header { timestamp: 1, number: 1, ..Header::default() };
|
||||
|
||||
// attempt to execute an empty block, this should not fail
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
requests: None,
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// ensure that the block hash was *not* written to storage, since this is before the fork
|
||||
// was activated
|
||||
//
|
||||
// we load the account first, which should also not exist, because revm expects it to be
|
||||
// loaded
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_none());
|
||||
assert!(executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eip_2935_fork_activation_genesis() {
|
||||
let db = create_state_provider_with_block_hashes(0);
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::from(&*MAINNET)
|
||||
.shanghai_activated()
|
||||
.with_fork(Hardfork::Prague, ForkCondition::Timestamp(0))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let header = chain_spec.genesis_header();
|
||||
let provider = executor_provider(chain_spec);
|
||||
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||
|
||||
// attempt to execute genesis block, this should not fail
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
requests: None,
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// ensure that the block hash was *not* written to storage, since there are no blocks
|
||||
// preceding genesis
|
||||
//
|
||||
// we load the account first, which should also not exist, because revm expects it to be
|
||||
// loaded
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_none());
|
||||
assert!(executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eip_2935_fork_activation_within_window_bounds() {
|
||||
let fork_activation_block = (HISTORY_SERVE_WINDOW - 10) as u64;
|
||||
let db = create_state_provider_with_block_hashes(fork_activation_block);
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::from(&*MAINNET)
|
||||
.shanghai_activated()
|
||||
.with_fork(Hardfork::Prague, ForkCondition::Timestamp(1))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let header = Header { timestamp: 1, number: fork_activation_block, ..Header::default() };
|
||||
let provider = executor_provider(chain_spec);
|
||||
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||
|
||||
// attempt to execute the fork activation block, this should not fail
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
..Default::default()
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// since this is the activation block, the hashes of all ancestors for the block (up to
|
||||
// `HISTORY_SERVE_WINDOW`) must be present.
|
||||
//
|
||||
// our fork activation check depends on checking slot 0, so we also check that slot 0 was
|
||||
// indeed set if the fork activation block was within `HISTORY_SERVE_WINDOW`
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some());
|
||||
for slot in 0..fork_activation_block {
|
||||
assert_ne!(
|
||||
executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::from(slot)).unwrap(),
|
||||
U256::ZERO
|
||||
);
|
||||
}
|
||||
|
||||
// the hash of the block itself should not be in storage
|
||||
assert!(executor
|
||||
.state
|
||||
.storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block))
|
||||
.unwrap()
|
||||
.is_zero());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eip_2935_fork_activation_outside_window_bounds() {
|
||||
let fork_activation_block = (HISTORY_SERVE_WINDOW + 256) as u64;
|
||||
let db = create_state_provider_with_block_hashes(fork_activation_block);
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::from(&*MAINNET)
|
||||
.shanghai_activated()
|
||||
.with_fork(Hardfork::Prague, ForkCondition::Timestamp(1))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let provider = executor_provider(chain_spec);
|
||||
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||
|
||||
let header = Header { timestamp: 1, number: fork_activation_block, ..Header::default() };
|
||||
|
||||
// attempt to execute the fork activation block, this should not fail
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
..Default::default()
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// since this is the activation block, the hashes of all ancestors for the block (up to
|
||||
// `HISTORY_SERVE_WINDOW`) must be present.
|
||||
//
|
||||
// our fork activation check depends on checking slot 0, so we also check that slot 0 was
|
||||
// indeed set if the fork activation block was within `HISTORY_SERVE_WINDOW`
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some());
|
||||
for slot in 0..HISTORY_SERVE_WINDOW {
|
||||
assert_ne!(
|
||||
executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::from(slot)).unwrap(),
|
||||
U256::ZERO
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eip_2935_state_transition_inside_fork() {
|
||||
let db = create_state_provider_with_block_hashes(2);
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::from(&*MAINNET)
|
||||
.shanghai_activated()
|
||||
.with_fork(Hardfork::Prague, ForkCondition::Timestamp(0))
|
||||
.build(),
|
||||
);
|
||||
|
||||
let header = chain_spec.genesis_header();
|
||||
let provider = executor_provider(chain_spec);
|
||||
let mut executor = provider.executor(StateProviderDatabase::new(&db));
|
||||
|
||||
// attempt to execute the genesis block, this should not fail
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
requests: None,
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// nothing should be written as the genesis has no ancestors
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_none());
|
||||
assert!(executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap().is_zero());
|
||||
|
||||
// attempt to execute block 1, this should not fail
|
||||
let header = Header { timestamp: 1, number: 1, ..Header::default() };
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
..Default::default()
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// the block hash of genesis should now be in storage, but not block 1
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some());
|
||||
assert_ne!(
|
||||
executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(),
|
||||
U256::ZERO
|
||||
);
|
||||
assert!(executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap().is_zero());
|
||||
|
||||
// attempt to execute block 2, this should not fail
|
||||
let header = Header { timestamp: 1, number: 2, ..Header::default() };
|
||||
executor
|
||||
.execute_and_verify(
|
||||
&BlockWithSenders {
|
||||
block: Block {
|
||||
header,
|
||||
body: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: None,
|
||||
..Default::default()
|
||||
},
|
||||
senders: vec![],
|
||||
},
|
||||
U256::ZERO,
|
||||
)
|
||||
.expect(
|
||||
"Executing a block with no transactions while Prague is active should not fail",
|
||||
);
|
||||
|
||||
// the block hash of genesis and block 1 should now be in storage, but not block 2
|
||||
assert!(executor.state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some());
|
||||
assert_ne!(
|
||||
executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(),
|
||||
U256::ZERO
|
||||
);
|
||||
assert_ne!(
|
||||
executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap(),
|
||||
U256::ZERO
|
||||
);
|
||||
assert!(executor.state.storage(HISTORY_STORAGE_ADDRESS, U256::from(2)).unwrap().is_zero());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1276,7 +1276,7 @@ pub struct PeersConfig {
|
||||
/// How often to recheck free slots for outbound connections.
|
||||
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
|
||||
pub refill_slots_interval: Duration,
|
||||
/// Trusted nodes to connect to.
|
||||
/// Trusted nodes to connect to or accept from
|
||||
pub trusted_nodes: HashSet<NodeRecord>,
|
||||
/// Connect to or accept from trusted nodes only?
|
||||
#[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))]
|
||||
|
||||
@@ -21,6 +21,7 @@ use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord};
|
||||
use secp256k1::SecretKey;
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr},
|
||||
ops::Not,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
@@ -39,7 +40,7 @@ pub struct NetworkArgs {
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub trusted_peers: Vec<NodeRecord>,
|
||||
|
||||
/// Connect only to trusted peers
|
||||
/// Connect to or accept from trusted peers only
|
||||
#[arg(long)]
|
||||
pub trusted_only: bool,
|
||||
|
||||
@@ -156,13 +157,9 @@ impl NetworkArgs {
|
||||
self.discovery.apply_to_builder(network_config_builder)
|
||||
}
|
||||
|
||||
/// If `no_persist_peers` is true then this returns the path to the persistent peers file path.
|
||||
/// If `no_persist_peers` is false then this returns the path to the persistent peers file path.
|
||||
pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option<PathBuf> {
|
||||
if self.no_persist_peers {
|
||||
return None
|
||||
}
|
||||
|
||||
Some(peers_file)
|
||||
self.no_persist_peers.not().then_some(peers_file)
|
||||
}
|
||||
|
||||
/// Sets the p2p port to zero, to allow the OS to assign a random unused port when
|
||||
@@ -258,12 +255,12 @@ pub struct DiscoveryArgs {
|
||||
|
||||
/// The interval in seconds at which to carry out boost lookup queries, for a fixed number of
|
||||
/// times, at bootstrap.
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval",
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval",
|
||||
default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)]
|
||||
pub discv5_bootstrap_lookup_interval: u64,
|
||||
|
||||
/// The number of times to carry out boost lookup queries at bootstrap.
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown",
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown",
|
||||
default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)]
|
||||
pub discv5_bootstrap_lookup_countdown: u64,
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ pub struct Header {
|
||||
/// The Keccak 256-bit hash of the root node of the trie structure populated with each
|
||||
/// [EIP-7685] request in the block body.
|
||||
///
|
||||
/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-4895
|
||||
/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685
|
||||
pub requests_root: Option<B256>,
|
||||
/// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or
|
||||
/// fewer; formally Hx.
|
||||
|
||||
@@ -24,4 +24,4 @@ pub use storage::StorageTrieEntry;
|
||||
mod subnode;
|
||||
pub use subnode::StoredSubNode;
|
||||
|
||||
pub use alloy_trie::{BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH};
|
||||
pub use alloy_trie::{proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH};
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
//! Merkle trie proofs.
|
||||
|
||||
use super::Nibbles;
|
||||
use super::{
|
||||
proof::{verify_proof, ProofVerificationError},
|
||||
Nibbles, TrieAccount,
|
||||
};
|
||||
use crate::{keccak256, Account, Address, Bytes, B256, U256};
|
||||
use alloy_rlp::encode_fixed_size;
|
||||
use alloy_trie::EMPTY_ROOT_HASH;
|
||||
|
||||
/// The merkle proof with the relevant account info.
|
||||
#[derive(PartialEq, Eq, Default, Debug)]
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct AccountProof {
|
||||
/// The address associated with the account.
|
||||
pub address: Address,
|
||||
@@ -22,7 +27,13 @@ pub struct AccountProof {
|
||||
impl AccountProof {
|
||||
/// Create new account proof entity.
|
||||
pub fn new(address: Address) -> Self {
|
||||
Self { address, ..Default::default() }
|
||||
Self {
|
||||
address,
|
||||
info: None,
|
||||
proof: Vec::new(),
|
||||
storage_root: EMPTY_ROOT_HASH,
|
||||
storage_proofs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set account info, storage root and requested storage proofs.
|
||||
@@ -41,6 +52,26 @@ impl AccountProof {
|
||||
pub fn set_proof(&mut self, proof: Vec<Bytes>) {
|
||||
self.proof = proof;
|
||||
}
|
||||
|
||||
/// Verify the storage proofs and account proof against the provided state root.
|
||||
pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> {
|
||||
// Verify storage proofs.
|
||||
for storage_proof in &self.storage_proofs {
|
||||
storage_proof.verify(self.storage_root)?;
|
||||
}
|
||||
|
||||
// Verify the account proof.
|
||||
let expected = if self.info.is_none() && self.storage_root == EMPTY_ROOT_HASH {
|
||||
None
|
||||
} else {
|
||||
Some(alloy_rlp::encode(TrieAccount::from((
|
||||
self.info.unwrap_or_default(),
|
||||
self.storage_root,
|
||||
))))
|
||||
};
|
||||
let nibbles = Nibbles::unpack(keccak256(self.address));
|
||||
verify_proof(root, nibbles, expected, &self.proof)
|
||||
}
|
||||
}
|
||||
|
||||
/// The merkle proof of the storage entry.
|
||||
@@ -83,4 +114,11 @@ impl StorageProof {
|
||||
pub fn set_proof(&mut self, proof: Vec<Bytes>) {
|
||||
self.proof = proof;
|
||||
}
|
||||
|
||||
/// Verify the proof against the provided storage root.
|
||||
pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> {
|
||||
let expected =
|
||||
if self.value.is_zero() { None } else { Some(encode_fixed_size(&self.value).to_vec()) };
|
||||
verify_proof(root, self.nibbles.clone(), expected, &self.proof)
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -52,7 +52,8 @@ pub fn generate_from_to(ident: &Ident, fields: &FieldList, is_zstd: bool) -> Tok
|
||||
/// Generates code to implement the `Compact` trait method `to_compact`.
|
||||
fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> TokenStream2 {
|
||||
let mut lines = vec![];
|
||||
let mut known_types = vec!["B256", "Address", "Bloom", "Vec", "TxHash", "BlockHash"];
|
||||
let mut known_types =
|
||||
vec!["B256", "Address", "Bloom", "Vec", "TxHash", "BlockHash", "FixedBytes"];
|
||||
|
||||
// Only types without `Bytes` should be added here. It's currently manually added, since
|
||||
// it's hard to figure out with derive_macro which types have Bytes fields.
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
pub use reth_codecs_derive::*;
|
||||
|
||||
use alloy_primitives::{Address, Bloom, Bytes, B256, B512, U256};
|
||||
use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256};
|
||||
use bytes::Buf;
|
||||
|
||||
#[cfg(any(test, feature = "alloy"))]
|
||||
@@ -301,9 +301,9 @@ impl<const N: usize> Compact for [u8; N] {
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements the [`Compact`] trait for fixed size byte array types like [`B256`].
|
||||
/// Implements the [`Compact`] trait for wrappers over fixed size byte array types.
|
||||
#[macro_export]
|
||||
macro_rules! impl_compact_for_bytes {
|
||||
macro_rules! impl_compact_for_wrapped_bytes {
|
||||
($($name:tt),+) => {
|
||||
$(
|
||||
impl Compact for $name {
|
||||
@@ -324,8 +324,23 @@ macro_rules! impl_compact_for_bytes {
|
||||
)+
|
||||
};
|
||||
}
|
||||
impl_compact_for_wrapped_bytes!(Address, Bloom);
|
||||
|
||||
impl_compact_for_bytes!(Address, B256, B512, Bloom);
|
||||
impl<const N: usize> Compact for FixedBytes<N> {
|
||||
#[inline]
|
||||
fn to_compact<B>(self, buf: &mut B) -> usize
|
||||
where
|
||||
B: bytes::BufMut + AsMut<[u8]>,
|
||||
{
|
||||
self.0.to_compact(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
|
||||
let (v, buf) = <[u8; N]>::from_compact(buf, len);
|
||||
(Self::from(v), buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl Compact for bool {
|
||||
/// `bool` vars go directly to the `StructFlags` and are not written to the buffer.
|
||||
@@ -378,6 +393,7 @@ const fn decode_varuint_panic() -> ! {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_primitives::B256;
|
||||
|
||||
#[test]
|
||||
fn compact_bytes() {
|
||||
|
||||
@@ -560,8 +560,8 @@ impl StateProvider for MockEthProvider {
|
||||
}))
|
||||
}
|
||||
|
||||
fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult<AccountProof> {
|
||||
Ok(AccountProof::default())
|
||||
fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult<AccountProof> {
|
||||
Ok(AccountProof::new(address))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -319,8 +319,8 @@ impl StateProvider for NoopProvider {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult<AccountProof> {
|
||||
Ok(AccountProof::default())
|
||||
fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult<AccountProof> {
|
||||
Ok(AccountProof::new(address))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ use reth_interfaces::trie::{StateRootError, StorageRootError};
|
||||
use reth_primitives::{
|
||||
constants::EMPTY_ROOT_HASH,
|
||||
keccak256,
|
||||
trie::{AccountProof, HashBuilder, Nibbles, StorageProof, TrieAccount},
|
||||
trie::{proof::ProofRetainer, AccountProof, HashBuilder, Nibbles, StorageProof, TrieAccount},
|
||||
Address, B256,
|
||||
};
|
||||
|
||||
@@ -60,8 +60,8 @@ where
|
||||
let walker = TrieWalker::new(trie_cursor, prefix_set.freeze());
|
||||
|
||||
// Create a hash builder to rebuild the root node since it is not available in the database.
|
||||
let mut hash_builder =
|
||||
HashBuilder::default().with_proof_retainer(Vec::from([target_nibbles]));
|
||||
let retainer = ProofRetainer::from_iter([target_nibbles]);
|
||||
let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer);
|
||||
|
||||
let mut account_rlp = Vec::with_capacity(128);
|
||||
let mut account_node_iter = AccountNodeIter::new(walker, hashed_account_cursor);
|
||||
@@ -126,7 +126,8 @@ where
|
||||
);
|
||||
let walker = TrieWalker::new(trie_cursor, prefix_set);
|
||||
|
||||
let mut hash_builder = HashBuilder::default().with_proof_retainer(target_nibbles);
|
||||
let retainer = ProofRetainer::from_iter(target_nibbles);
|
||||
let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer);
|
||||
let mut storage_node_iter =
|
||||
StorageNodeIter::new(walker, hashed_storage_cursor, hashed_address);
|
||||
while let Some(node) = storage_node_iter.try_next()? {
|
||||
@@ -200,7 +201,7 @@ mod tests {
|
||||
fn insert_genesis<DB: Database>(
|
||||
provider_factory: &ProviderFactory<DB>,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
) -> RethResult<()> {
|
||||
) -> RethResult<B256> {
|
||||
let mut provider = provider_factory.provider_rw()?;
|
||||
|
||||
// Hash accounts and insert them into hashing table.
|
||||
@@ -224,21 +225,21 @@ mod tests {
|
||||
});
|
||||
provider.insert_storage_for_hashing(alloc_storage)?;
|
||||
|
||||
let (_, updates) = StateRoot::from_tx(provider.tx_ref())
|
||||
let (root, updates) = StateRoot::from_tx(provider.tx_ref())
|
||||
.root_with_updates()
|
||||
.map_err(Into::<reth_db::DatabaseError>::into)?;
|
||||
updates.flush(provider.tx_mut())?;
|
||||
|
||||
provider.commit()?;
|
||||
|
||||
Ok(())
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn testspec_proofs() {
|
||||
// Create test database and insert genesis accounts.
|
||||
let factory = create_test_provider_factory();
|
||||
insert_genesis(&factory, TEST_SPEC.clone()).unwrap();
|
||||
let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap();
|
||||
|
||||
let data = Vec::from([
|
||||
(
|
||||
@@ -288,6 +289,7 @@ mod tests {
|
||||
expected_proof,
|
||||
"proof for {target:?} does not match"
|
||||
);
|
||||
assert_eq!(account_proof.verify(root), Ok(()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +297,7 @@ mod tests {
|
||||
fn testspec_empty_storage_proof() {
|
||||
// Create test database and insert genesis accounts.
|
||||
let factory = create_test_provider_factory();
|
||||
insert_genesis(&factory, TEST_SPEC.clone()).unwrap();
|
||||
let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap();
|
||||
|
||||
let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap();
|
||||
let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]);
|
||||
@@ -306,15 +308,18 @@ mod tests {
|
||||
|
||||
assert_eq!(slots.len(), account_proof.storage_proofs.len());
|
||||
for (idx, slot) in slots.into_iter().enumerate() {
|
||||
assert_eq!(account_proof.storage_proofs.get(idx), Some(&StorageProof::new(slot)));
|
||||
let proof = account_proof.storage_proofs.get(idx).unwrap();
|
||||
assert_eq!(proof, &StorageProof::new(slot));
|
||||
assert_eq!(proof.verify(account_proof.storage_root), Ok(()));
|
||||
}
|
||||
assert_eq!(account_proof.verify(root), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mainnet_genesis_account_proof() {
|
||||
// Create test database and insert genesis accounts.
|
||||
let factory = create_test_provider_factory();
|
||||
insert_genesis(&factory, MAINNET.clone()).unwrap();
|
||||
let root = insert_genesis(&factory, MAINNET.clone()).unwrap();
|
||||
|
||||
// Address from mainnet genesis allocation.
|
||||
// keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4`
|
||||
@@ -332,13 +337,14 @@ mod tests {
|
||||
let provider = factory.provider().unwrap();
|
||||
let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap();
|
||||
similar_asserts::assert_eq!(account_proof.proof, expected_account_proof);
|
||||
assert_eq!(account_proof.verify(root), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mainnet_genesis_account_proof_nonexistent() {
|
||||
// Create test database and insert genesis accounts.
|
||||
let factory = create_test_provider_factory();
|
||||
insert_genesis(&factory, MAINNET.clone()).unwrap();
|
||||
let root = insert_genesis(&factory, MAINNET.clone()).unwrap();
|
||||
|
||||
// Address that does not exist in mainnet genesis allocation.
|
||||
// keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761`
|
||||
@@ -354,13 +360,14 @@ mod tests {
|
||||
let provider = factory.provider().unwrap();
|
||||
let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap();
|
||||
similar_asserts::assert_eq!(account_proof.proof, expected_account_proof);
|
||||
assert_eq!(account_proof.verify(root), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn holesky_deposit_contract_proof() {
|
||||
// Create test database and insert genesis accounts.
|
||||
let factory = create_test_provider_factory();
|
||||
insert_genesis(&factory, HOLESKY.clone()).unwrap();
|
||||
let root = insert_genesis(&factory, HOLESKY.clone()).unwrap();
|
||||
|
||||
let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap();
|
||||
// existent
|
||||
@@ -439,5 +446,6 @@ mod tests {
|
||||
let provider = factory.provider().unwrap();
|
||||
let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap();
|
||||
similar_asserts::assert_eq!(account_proof, expected);
|
||||
assert_eq!(account_proof.verify(root), Ok(()));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user