mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-02-15 09:25:33 -05:00
chore: rename executor -> blockchain-tree (#2285)
This commit is contained in:
334
crates/blockchain-tree/src/blockchain_tree/block_indices.rs
Normal file
334
crates/blockchain-tree/src/blockchain_tree/block_indices.rs
Normal file
@@ -0,0 +1,334 @@
|
||||
//! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`]
|
||||
|
||||
use super::chain::BlockChainId;
|
||||
use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlockWithSenders};
|
||||
use reth_provider::Chain;
|
||||
use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet};
|
||||
|
||||
/// Internal indices of the blocks and chains.
|
||||
///
|
||||
/// This is main connection between blocks, chains and canonical chain.
|
||||
///
|
||||
/// It contains a list of canonical block hashes, forks to child blocks, and a mapping of block hash
|
||||
/// to chain ID.
|
||||
#[derive(Debug)]
|
||||
pub struct BlockIndices {
|
||||
/// Last finalized block.
|
||||
last_finalized_block: BlockNumber,
|
||||
/// Canonical chain. Contains N number (depends on `finalization_depth`) of blocks.
|
||||
/// These blocks are found in fork_to_child but not inside `blocks_to_chain` or
|
||||
/// `number_to_block` as those are chain specific indices.
|
||||
canonical_chain: BTreeMap<BlockNumber, BlockHash>,
|
||||
/// Index needed when discarding the chain, so we can remove connected chains from tree.
|
||||
/// NOTE: It contains just a blocks that are forks as a key and not all blocks.
|
||||
fork_to_child: HashMap<BlockHash, HashSet<BlockHash>>,
|
||||
/// Block hashes and side chain they belong
|
||||
blocks_to_chain: HashMap<BlockHash, BlockChainId>,
|
||||
/// Utility index. Block number to block hash. Can be used for
|
||||
/// RPC to fetch all pending block in chain by its number.
|
||||
index_number_to_block: BTreeMap<BlockNumber, HashSet<BlockHash>>,
|
||||
}
|
||||
|
||||
impl BlockIndices {
|
||||
/// Create new block indices structure
|
||||
pub fn new(
|
||||
last_finalized_block: BlockNumber,
|
||||
canonical_chain: BTreeMap<BlockNumber, BlockHash>,
|
||||
) -> Self {
|
||||
Self {
|
||||
last_finalized_block,
|
||||
canonical_chain,
|
||||
fork_to_child: Default::default(),
|
||||
blocks_to_chain: Default::default(),
|
||||
index_number_to_block: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return internal index that maps all pending block number to their hash.
|
||||
pub fn index_of_number_to_pending_blocks(&self) -> &BTreeMap<BlockNumber, HashSet<BlockHash>> {
|
||||
&self.index_number_to_block
|
||||
}
|
||||
|
||||
/// Return fork to child indices
|
||||
pub fn fork_to_child(&self) -> &HashMap<BlockHash, HashSet<BlockHash>> {
|
||||
&self.fork_to_child
|
||||
}
|
||||
|
||||
/// Return block to chain id
|
||||
pub fn blocks_to_chain(&self) -> &HashMap<BlockHash, BlockChainId> {
|
||||
&self.blocks_to_chain
|
||||
}
|
||||
|
||||
/// Return all pending block hashes. Pending blocks are considered blocks
|
||||
/// that are extending that canonical tip by one block number.
|
||||
pub fn pending_blocks(&self) -> (BlockNumber, Vec<BlockHash>) {
|
||||
let canonical_tip = self.canonical_tip();
|
||||
let pending_blocks = self
|
||||
.fork_to_child
|
||||
.get(&canonical_tip.hash)
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.collect();
|
||||
(canonical_tip.number + 1, pending_blocks)
|
||||
}
|
||||
|
||||
/// Check if block hash belongs to canonical chain.
|
||||
pub fn is_block_hash_canonical(&self, block_hash: &BlockHash) -> bool {
|
||||
self.canonical_chain.range(self.last_finalized_block..).any(|(_, &h)| h == *block_hash)
|
||||
}
|
||||
|
||||
/// Last finalized block
|
||||
pub fn last_finalized_block(&self) -> BlockNumber {
|
||||
self.last_finalized_block
|
||||
}
|
||||
|
||||
/// Insert non fork block.
|
||||
pub fn insert_non_fork_block(
|
||||
&mut self,
|
||||
block_number: BlockNumber,
|
||||
block_hash: BlockHash,
|
||||
chain_id: BlockChainId,
|
||||
) {
|
||||
self.index_number_to_block.entry(block_number).or_default().insert(block_hash);
|
||||
self.blocks_to_chain.insert(block_hash, chain_id);
|
||||
}
|
||||
|
||||
/// Insert block to chain and fork child indices of the new chain
|
||||
pub fn insert_chain(&mut self, chain_id: BlockChainId, chain: &Chain) {
|
||||
for (number, block) in chain.blocks().iter() {
|
||||
// add block -> chain_id index
|
||||
self.blocks_to_chain.insert(block.hash(), chain_id);
|
||||
// add number -> block
|
||||
self.index_number_to_block.entry(*number).or_default().insert(block.hash());
|
||||
}
|
||||
let first = chain.first();
|
||||
// add parent block -> block index
|
||||
self.fork_to_child.entry(first.parent_hash).or_default().insert(first.hash());
|
||||
}
|
||||
|
||||
/// Get the chain ID the block belongs to
|
||||
pub fn get_blocks_chain_id(&self, block: &BlockHash) -> Option<BlockChainId> {
|
||||
self.blocks_to_chain.get(block).cloned()
|
||||
}
|
||||
|
||||
/// Update all block hashes. iterate over present and new list of canonical hashes and compare
|
||||
/// them. Remove all missmatches, disconnect them and return all chains that needs to be
|
||||
/// removed.
|
||||
pub fn update_block_hashes(
|
||||
&mut self,
|
||||
hashes: BTreeMap<u64, BlockHash>,
|
||||
) -> BTreeSet<BlockChainId> {
|
||||
let mut new_hashes = hashes.iter();
|
||||
let mut old_hashes = self.canonical_chain().clone().into_iter();
|
||||
|
||||
let mut remove = Vec::new();
|
||||
|
||||
let mut new_hash = new_hashes.next();
|
||||
let mut old_hash = old_hashes.next();
|
||||
|
||||
loop {
|
||||
let Some(old_block_value) = old_hash else {
|
||||
// end of old_hashes canonical chain. New chain has more block then old chain.
|
||||
break
|
||||
};
|
||||
let Some(new_block_value) = new_hash else {
|
||||
// Old canonical chain had more block than new chain.
|
||||
// remove all present block.
|
||||
// this is mostly not going to happen as reorg should make new chain in Tree.
|
||||
while let Some(rem) = old_hash {
|
||||
remove.push(rem);
|
||||
old_hash = old_hashes.next();
|
||||
}
|
||||
break;
|
||||
};
|
||||
// compare old and new canonical block number
|
||||
match new_block_value.0.cmp(&old_block_value.0) {
|
||||
std::cmp::Ordering::Less => {
|
||||
// new chain has more past blocks than old chain
|
||||
new_hash = new_hashes.next();
|
||||
}
|
||||
std::cmp::Ordering::Equal => {
|
||||
if *new_block_value.1 != old_block_value.1 {
|
||||
// remove block hash as it is different
|
||||
remove.push(old_block_value);
|
||||
}
|
||||
new_hash = new_hashes.next();
|
||||
old_hash = old_hashes.next();
|
||||
}
|
||||
std::cmp::Ordering::Greater => {
|
||||
// old chain has more past blocks that new chain
|
||||
remove.push(old_block_value);
|
||||
old_hash = old_hashes.next()
|
||||
}
|
||||
}
|
||||
}
|
||||
self.canonical_chain = hashes;
|
||||
|
||||
remove.into_iter().fold(BTreeSet::new(), |mut fold, (number, hash)| {
|
||||
fold.extend(self.remove_block(number, hash));
|
||||
fold
|
||||
})
|
||||
}
|
||||
|
||||
/// Remove chain from indices and return dependent chains that needs to be removed.
|
||||
/// Does the cleaning of the tree and removing blocks from the chain.
|
||||
pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet<BlockChainId> {
|
||||
let mut lose_chains = BTreeSet::new();
|
||||
for (block_number, block) in chain.blocks().iter() {
|
||||
let block_hash = block.hash();
|
||||
lose_chains.extend(self.remove_block(*block_number, block_hash))
|
||||
}
|
||||
lose_chains
|
||||
}
|
||||
|
||||
/// Remove Blocks from indices.
|
||||
fn remove_block(
|
||||
&mut self,
|
||||
block_number: BlockNumber,
|
||||
block_hash: BlockHash,
|
||||
) -> BTreeSet<BlockChainId> {
|
||||
// rm number -> block
|
||||
if let btree_map::Entry::Occupied(mut entry) =
|
||||
self.index_number_to_block.entry(block_number)
|
||||
{
|
||||
let set = entry.get_mut();
|
||||
set.remove(&block_hash);
|
||||
// remove set if empty
|
||||
if set.is_empty() {
|
||||
entry.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// rm block -> chain_id
|
||||
self.blocks_to_chain.remove(&block_hash);
|
||||
|
||||
// rm fork -> child
|
||||
let removed_fork = self.fork_to_child.remove(&block_hash);
|
||||
removed_fork
|
||||
.map(|fork_blocks| {
|
||||
fork_blocks
|
||||
.into_iter()
|
||||
.filter_map(|fork_child| self.blocks_to_chain.remove(&fork_child))
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Remove all blocks from canonical list and insert new blocks to it.
|
||||
///
|
||||
/// It is assumed that blocks are interconnected and that they connect to canonical chain
|
||||
pub fn canonicalize_blocks(&mut self, blocks: &BTreeMap<BlockNumber, SealedBlockWithSenders>) {
|
||||
if blocks.is_empty() {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove all blocks from canonical chain
|
||||
let first_number = *blocks.first_key_value().unwrap().0;
|
||||
|
||||
// this will remove all blocks numbers that are going to be replaced.
|
||||
self.canonical_chain.retain(|num, _| *num < first_number);
|
||||
|
||||
// remove them from block to chain_id index
|
||||
blocks.iter().map(|(_, b)| (b.number, b.hash(), b.parent_hash)).for_each(
|
||||
|(number, hash, parent_hash)| {
|
||||
// rm block -> chain_id
|
||||
self.blocks_to_chain.remove(&hash);
|
||||
|
||||
// rm number -> block
|
||||
if let btree_map::Entry::Occupied(mut entry) =
|
||||
self.index_number_to_block.entry(number)
|
||||
{
|
||||
let set = entry.get_mut();
|
||||
set.remove(&hash);
|
||||
// remove set if empty
|
||||
if set.is_empty() {
|
||||
entry.remove();
|
||||
}
|
||||
}
|
||||
// rm fork block -> hash
|
||||
if let hash_map::Entry::Occupied(mut entry) = self.fork_to_child.entry(parent_hash)
|
||||
{
|
||||
let set = entry.get_mut();
|
||||
set.remove(&hash);
|
||||
// remove set if empty
|
||||
if set.is_empty() {
|
||||
entry.remove();
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
// insert new canonical
|
||||
self.canonical_chain.extend(blocks.iter().map(|(number, block)| (*number, block.hash())))
|
||||
}
|
||||
|
||||
/// this is function that is going to remove N number of last canonical hashes.
|
||||
///
|
||||
/// NOTE: This is not safe standalone, as it will not disconnect
|
||||
/// blocks that deppends on unwinded canonical chain. And should be
|
||||
/// used when canonical chain is reinserted inside Tree.
|
||||
pub(crate) fn unwind_canonical_chain(&mut self, unwind_to: BlockNumber) {
|
||||
// this will remove all blocks numbers that are going to be replaced.
|
||||
self.canonical_chain.retain(|num, _| *num <= unwind_to);
|
||||
}
|
||||
|
||||
/// Used for finalization of block.
|
||||
/// Return list of chains for removal that depend on finalized canonical chain.
|
||||
pub fn finalize_canonical_blocks(
|
||||
&mut self,
|
||||
finalized_block: BlockNumber,
|
||||
num_of_additional_canonical_hashes_to_retain: u64,
|
||||
) -> BTreeSet<BlockChainId> {
|
||||
// get finalized chains. blocks between [self.last_finalized,finalized_block).
|
||||
// Dont remove finalized_block, as sidechain can point to it.
|
||||
let finalized_blocks: Vec<BlockHash> = self
|
||||
.canonical_chain
|
||||
.iter()
|
||||
.filter(|(&number, _)| number >= self.last_finalized_block && number < finalized_block)
|
||||
.map(|(_, hash)| *hash)
|
||||
.collect();
|
||||
|
||||
// remove unneeded canonical hashes.
|
||||
let remove_until =
|
||||
finalized_block.saturating_sub(num_of_additional_canonical_hashes_to_retain);
|
||||
self.canonical_chain.retain(|&number, _| number >= remove_until);
|
||||
|
||||
let mut lose_chains = BTreeSet::new();
|
||||
|
||||
for block_hash in finalized_blocks.into_iter() {
|
||||
// there is a fork block.
|
||||
if let Some(fork_blocks) = self.fork_to_child.remove(&block_hash) {
|
||||
lose_chains = fork_blocks.into_iter().fold(lose_chains, |mut fold, fork_child| {
|
||||
if let Some(lose_chain) = self.blocks_to_chain.remove(&fork_child) {
|
||||
fold.insert(lose_chain);
|
||||
}
|
||||
fold
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// set last finalized block.
|
||||
self.last_finalized_block = finalized_block;
|
||||
|
||||
lose_chains
|
||||
}
|
||||
|
||||
/// get canonical hash
|
||||
pub fn canonical_hash(&self, block_number: &BlockNumber) -> Option<BlockHash> {
|
||||
self.canonical_chain.get(block_number).cloned()
|
||||
}
|
||||
|
||||
/// get canonical tip
|
||||
pub fn canonical_tip(&self) -> BlockNumHash {
|
||||
self.canonical_chain
|
||||
.last_key_value()
|
||||
.map(|(&number, &hash)| BlockNumHash { number, hash })
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Canonical chain needed for execution of EVM. It should contains last 256 block hashes.
|
||||
pub fn canonical_chain(&self) -> &BTreeMap<BlockNumber, BlockHash> {
|
||||
&self.canonical_chain
|
||||
}
|
||||
}
|
||||
217
crates/blockchain-tree/src/blockchain_tree/chain.rs
Normal file
217
crates/blockchain-tree/src/blockchain_tree/chain.rs
Normal file
@@ -0,0 +1,217 @@
|
||||
//! A chain in a [`BlockchainTree`][super::BlockchainTree].
|
||||
//!
|
||||
//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent
|
||||
//! blocks, as well as a list of the blocks the chain is composed of.
|
||||
use crate::{blockchain_tree::PostStateDataRef, post_state::PostState};
|
||||
use reth_db::database::Database;
|
||||
use reth_interfaces::{consensus::Consensus, executor::Error as ExecError, Error};
|
||||
use reth_primitives::{
|
||||
BlockHash, BlockNumber, ForkBlock, SealedBlockWithSenders, SealedHeader, U256,
|
||||
};
|
||||
use reth_provider::{
|
||||
providers::PostStateProvider, BlockExecutor, Chain, ExecutorFactory, PostStateDataProvider,
|
||||
StateProviderFactory,
|
||||
};
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
use super::externals::TreeExternals;
|
||||
|
||||
/// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree].
|
||||
pub(crate) type BlockChainId = u64;
|
||||
|
||||
/// A chain if the blockchain tree, that has functionality to execute blocks and append them to the
|
||||
/// it self.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct AppendableChain {
|
||||
chain: Chain,
|
||||
}
|
||||
|
||||
impl Deref for AppendableChain {
|
||||
type Target = Chain;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.chain
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for AppendableChain {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.chain
|
||||
}
|
||||
}
|
||||
|
||||
impl AppendableChain {
|
||||
/// Crate a new appendable chain from a given chain.
|
||||
pub fn new(chain: Chain) -> Self {
|
||||
Self { chain }
|
||||
}
|
||||
|
||||
/// Get the chain.
|
||||
pub fn into_inner(self) -> Chain {
|
||||
self.chain
|
||||
}
|
||||
|
||||
/// Create a new chain that forks off of the canonical chain.
|
||||
pub fn new_canonical_fork<DB, C, EF>(
|
||||
block: &SealedBlockWithSenders,
|
||||
parent_header: &SealedHeader,
|
||||
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
|
||||
canonical_fork: ForkBlock,
|
||||
externals: &TreeExternals<DB, C, EF>,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
DB: Database,
|
||||
C: Consensus,
|
||||
EF: ExecutorFactory,
|
||||
{
|
||||
let state = PostState::default();
|
||||
let empty = BTreeMap::new();
|
||||
|
||||
let state_provider = PostStateDataRef {
|
||||
state: &state,
|
||||
sidechain_block_hashes: &empty,
|
||||
canonical_block_hashes,
|
||||
canonical_fork,
|
||||
};
|
||||
|
||||
let changeset = Self::validate_and_execute(
|
||||
block.clone(),
|
||||
parent_header,
|
||||
canonical_fork,
|
||||
state_provider,
|
||||
externals,
|
||||
)?;
|
||||
|
||||
Ok(Self { chain: Chain::new(vec![(block.clone(), changeset)]) })
|
||||
}
|
||||
|
||||
/// Create a new chain that forks off of an existing sidechain.
|
||||
pub fn new_chain_fork<DB, C, EF>(
|
||||
&self,
|
||||
block: SealedBlockWithSenders,
|
||||
side_chain_block_hashes: BTreeMap<BlockNumber, BlockHash>,
|
||||
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
|
||||
canonical_fork: ForkBlock,
|
||||
externals: &TreeExternals<DB, C, EF>,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
DB: Database,
|
||||
C: Consensus,
|
||||
EF: ExecutorFactory,
|
||||
{
|
||||
let parent_number = block.number - 1;
|
||||
let parent = self
|
||||
.blocks()
|
||||
.get(&parent_number)
|
||||
.ok_or(ExecError::BlockNumberNotFoundInChain { block_number: parent_number })?;
|
||||
|
||||
let revert_to_transition_id = self
|
||||
.block_transitions()
|
||||
.get(&parent.number)
|
||||
.expect("Should have the transition ID for the parent block");
|
||||
let mut state = self.chain.state().clone();
|
||||
|
||||
// Revert state to the state after execution of the parent block
|
||||
state.revert_to(*revert_to_transition_id);
|
||||
|
||||
// Revert changesets to get the state of the parent that we need to apply the change.
|
||||
let post_state_data = PostStateDataRef {
|
||||
state: &state,
|
||||
sidechain_block_hashes: &side_chain_block_hashes,
|
||||
canonical_block_hashes,
|
||||
canonical_fork,
|
||||
};
|
||||
let block_state = Self::validate_and_execute(
|
||||
block.clone(),
|
||||
parent,
|
||||
canonical_fork,
|
||||
post_state_data,
|
||||
externals,
|
||||
)?;
|
||||
state.extend(block_state);
|
||||
|
||||
let chain = Self {
|
||||
chain: Chain {
|
||||
block_transitions: BTreeMap::from([(block.number, state.transitions_count())]),
|
||||
state,
|
||||
blocks: BTreeMap::from([(block.number, block)]),
|
||||
},
|
||||
};
|
||||
|
||||
// If all is okay, return new chain back. Present chain is not modified.
|
||||
Ok(chain)
|
||||
}
|
||||
|
||||
/// Validate and execute the given block.
|
||||
fn validate_and_execute<PSDP, DB, C, EF>(
|
||||
block: SealedBlockWithSenders,
|
||||
parent_block: &SealedHeader,
|
||||
canonical_fork: ForkBlock,
|
||||
post_state_data_provider: PSDP,
|
||||
externals: &TreeExternals<DB, C, EF>,
|
||||
) -> Result<PostState, Error>
|
||||
where
|
||||
PSDP: PostStateDataProvider,
|
||||
DB: Database,
|
||||
C: Consensus,
|
||||
EF: ExecutorFactory,
|
||||
{
|
||||
externals.consensus.validate_header(&block, U256::MAX)?;
|
||||
externals.consensus.pre_validate_header(&block, parent_block)?;
|
||||
externals.consensus.pre_validate_block(&block)?;
|
||||
|
||||
let (unseal, senders) = block.into_components();
|
||||
let unseal = unseal.unseal();
|
||||
|
||||
//get state provider.
|
||||
let db = externals.shareable_db();
|
||||
// TODO, small perf can check if caonical fork is the latest state.
|
||||
let history_provider = db.history_by_block_number(canonical_fork.number)?;
|
||||
let state_provider = history_provider;
|
||||
|
||||
let provider = PostStateProvider { state_provider, post_state_data_provider };
|
||||
|
||||
let mut executor = externals.executor_factory.with_sp(&provider);
|
||||
executor.execute_and_verify_receipt(&unseal, U256::MAX, Some(senders)).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Validate and execute the given block, and append it to this chain.
|
||||
pub fn append_block<DB, C, EF>(
|
||||
&mut self,
|
||||
block: SealedBlockWithSenders,
|
||||
side_chain_block_hashes: BTreeMap<BlockNumber, BlockHash>,
|
||||
canonical_block_hashes: &BTreeMap<BlockNumber, BlockHash>,
|
||||
canonical_fork: ForkBlock,
|
||||
externals: &TreeExternals<DB, C, EF>,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
DB: Database,
|
||||
C: Consensus,
|
||||
EF: ExecutorFactory,
|
||||
{
|
||||
let (_, parent_block) = self.blocks.last_key_value().expect("Chain has at least one block");
|
||||
|
||||
let post_state_data = PostStateDataRef {
|
||||
state: &self.state,
|
||||
sidechain_block_hashes: &side_chain_block_hashes,
|
||||
canonical_block_hashes,
|
||||
canonical_fork,
|
||||
};
|
||||
|
||||
let block_state = Self::validate_and_execute(
|
||||
block.clone(),
|
||||
parent_block,
|
||||
canonical_fork,
|
||||
post_state_data,
|
||||
externals,
|
||||
)?;
|
||||
self.state.extend(block_state);
|
||||
let transition_count = self.state.transitions_count();
|
||||
self.block_transitions.insert(block.number, transition_count);
|
||||
self.blocks.insert(block.number, block);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
60
crates/blockchain-tree/src/blockchain_tree/config.rs
Normal file
60
crates/blockchain-tree/src/blockchain_tree/config.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
//! Blockchain tree configuration
|
||||
|
||||
/// The configuration for the blockchain tree.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BlockchainTreeConfig {
|
||||
/// Number of blocks after the last finalized block that we are storing.
|
||||
///
|
||||
/// It should be more than the finalization window for the canonical chain.
|
||||
max_blocks_in_chain: u64,
|
||||
/// The number of blocks that can be re-orged (finalization windows)
|
||||
max_reorg_depth: u64,
|
||||
/// For EVM's "BLOCKHASH" opcode we require last 256 block hashes. So we need to specify
|
||||
/// at least `additional_canonical_block_hashes`+`max_reorg_depth`, for eth that would be
|
||||
/// 256+64.
|
||||
num_of_additional_canonical_block_hashes: u64,
|
||||
}
|
||||
|
||||
impl Default for BlockchainTreeConfig {
|
||||
fn default() -> Self {
|
||||
// The defaults for Ethereum mainnet
|
||||
Self {
|
||||
// Gasper allows reorgs of any length from 1 to 64.
|
||||
max_reorg_depth: 64,
|
||||
// This default is just an assumption. Has to be greater than the `max_reorg_depth`.
|
||||
max_blocks_in_chain: 65,
|
||||
// EVM requires that last 256 block hashes are available.
|
||||
num_of_additional_canonical_block_hashes: 256,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockchainTreeConfig {
|
||||
/// Create tree configuration.
|
||||
pub fn new(
|
||||
max_reorg_depth: u64,
|
||||
max_blocks_in_chain: u64,
|
||||
num_of_additional_canonical_block_hashes: u64,
|
||||
) -> Self {
|
||||
if max_reorg_depth > max_blocks_in_chain {
|
||||
panic!("Side chain size should be more then finalization window");
|
||||
}
|
||||
Self { max_blocks_in_chain, max_reorg_depth, num_of_additional_canonical_block_hashes }
|
||||
}
|
||||
|
||||
/// Return the maximum reorg depth.
|
||||
pub fn max_reorg_depth(&self) -> u64 {
|
||||
self.max_reorg_depth
|
||||
}
|
||||
|
||||
/// Return the maximum number of blocks in one chain.
|
||||
pub fn max_blocks_in_chain(&self) -> u64 {
|
||||
self.max_blocks_in_chain
|
||||
}
|
||||
|
||||
/// Return number of additional canonical block hashes that we need to retain
|
||||
/// in order to have enough information for EVM execution.
|
||||
pub fn num_of_additional_canonical_block_hashes(&self) -> u64 {
|
||||
self.num_of_additional_canonical_block_hashes
|
||||
}
|
||||
}
|
||||
41
crates/blockchain-tree/src/blockchain_tree/externals.rs
Normal file
41
crates/blockchain-tree/src/blockchain_tree/externals.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
//! Blockchain tree externals.
|
||||
|
||||
use reth_db::database::Database;
|
||||
use reth_primitives::ChainSpec;
|
||||
use reth_provider::ShareableDatabase;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A container for external components.
|
||||
///
|
||||
/// This is a simple container for external components used throughout the blockchain tree
|
||||
/// implementation:
|
||||
///
|
||||
/// - A handle to the database
|
||||
/// - A handle to the consensus engine
|
||||
/// - The executor factory to execute blocks with
|
||||
/// - The chain spec
|
||||
#[derive(Debug)]
|
||||
pub struct TreeExternals<DB, C, EF> {
|
||||
/// The database, used to commit the canonical chain, or unwind it.
|
||||
pub db: DB,
|
||||
/// The consensus engine.
|
||||
pub consensus: C,
|
||||
/// The executor factory to execute blocks with.
|
||||
pub executor_factory: EF,
|
||||
/// The chain spec.
|
||||
pub chain_spec: Arc<ChainSpec>,
|
||||
}
|
||||
|
||||
impl<DB, C, EF> TreeExternals<DB, C, EF> {
|
||||
/// Create new tree externals.
|
||||
pub fn new(db: DB, consensus: C, executor_factory: EF, chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self { db, consensus, executor_factory, chain_spec }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database, C, EF> TreeExternals<DB, C, EF> {
|
||||
/// Return shareable database helper structure.
|
||||
pub fn shareable_db(&self) -> ShareableDatabase<&DB> {
|
||||
ShareableDatabase::new(&self.db, self.chain_spec.clone())
|
||||
}
|
||||
}
|
||||
1079
crates/blockchain-tree/src/blockchain_tree/mod.rs
Normal file
1079
crates/blockchain-tree/src/blockchain_tree/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,64 @@
|
||||
//! Substate for blockchain trees
|
||||
|
||||
use reth_primitives::{BlockHash, BlockNumber, ForkBlock};
|
||||
use reth_provider::{post_state::PostState, PostStateDataProvider};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
/// Structure that bundles references of data needs to implement [`PostStateDataProvider`]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PostStateDataRef<'a> {
|
||||
/// The wrapped state after execution of one or more transactions and/or blocks.
|
||||
pub state: &'a PostState,
|
||||
/// The blocks in the sidechain.
|
||||
pub sidechain_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
|
||||
/// The blocks in the canonical chain.
|
||||
pub canonical_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
|
||||
/// Canonical fork
|
||||
pub canonical_fork: ForkBlock,
|
||||
}
|
||||
|
||||
impl<'a> PostStateDataProvider for PostStateDataRef<'a> {
|
||||
fn state(&self) -> &PostState {
|
||||
self.state
|
||||
}
|
||||
|
||||
fn block_hash(&self, block_number: BlockNumber) -> Option<BlockHash> {
|
||||
let block_hash = self.sidechain_block_hashes.get(&block_number).cloned();
|
||||
if block_hash.is_some() {
|
||||
return block_hash
|
||||
}
|
||||
|
||||
self.canonical_block_hashes.get(&block_number).cloned()
|
||||
}
|
||||
|
||||
fn canonical_fork(&self) -> ForkBlock {
|
||||
self.canonical_fork
|
||||
}
|
||||
}
|
||||
|
||||
/// Structure that contains data needs to implement [`PostStateDataProvider`]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PostStateData {
|
||||
/// Post state with changes
|
||||
pub state: PostState,
|
||||
/// Parent block hashes needs for evm BLOCKHASH opcode.
|
||||
/// NOTE: it does not mean that all hashes are there but all until finalized are there.
|
||||
/// Other hashes can be obtained from provider
|
||||
pub parent_block_hashed: BTreeMap<BlockNumber, BlockHash>,
|
||||
/// Canonical block where state forked from.
|
||||
pub canonical_fork: ForkBlock,
|
||||
}
|
||||
|
||||
impl PostStateDataProvider for PostStateData {
|
||||
fn state(&self) -> &PostState {
|
||||
&self.state
|
||||
}
|
||||
|
||||
fn block_hash(&self, block_number: BlockNumber) -> Option<BlockHash> {
|
||||
self.parent_block_hashed.get(&block_number).cloned()
|
||||
}
|
||||
|
||||
fn canonical_fork(&self) -> ForkBlock {
|
||||
self.canonical_fork
|
||||
}
|
||||
}
|
||||
115
crates/blockchain-tree/src/blockchain_tree/shareable.rs
Normal file
115
crates/blockchain-tree/src/blockchain_tree/shareable.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
//! Wrapper around BlockchainTree that allows for it to be shared.
|
||||
use parking_lot::RwLock;
|
||||
use reth_db::database::Database;
|
||||
use reth_interfaces::{
|
||||
blockchain_tree::{BlockStatus, BlockchainTreeEngine, BlockchainTreeViewer},
|
||||
consensus::Consensus,
|
||||
provider::ProviderError,
|
||||
Error,
|
||||
};
|
||||
use reth_primitives::{BlockHash, BlockNumHash, BlockNumber, SealedBlock, SealedBlockWithSenders};
|
||||
use reth_provider::{
|
||||
BlockchainTreePendingStateProvider, CanonStateSubscriptions, ExecutorFactory,
|
||||
PostStateDataProvider,
|
||||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use super::BlockchainTree;
|
||||
|
||||
/// Shareable blockchain tree that is behind tokio::RwLock
|
||||
#[derive(Clone)]
|
||||
pub struct ShareableBlockchainTree<DB: Database, C: Consensus, EF: ExecutorFactory> {
|
||||
/// BlockchainTree
|
||||
pub tree: Arc<RwLock<BlockchainTree<DB, C, EF>>>,
|
||||
}
|
||||
|
||||
impl<DB: Database, C: Consensus, EF: ExecutorFactory> ShareableBlockchainTree<DB, C, EF> {
|
||||
/// Create New sharable database.
|
||||
pub fn new(tree: BlockchainTree<DB, C, EF>) -> Self {
|
||||
Self { tree: Arc::new(RwLock::new(tree)) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTreeEngine
|
||||
for ShareableBlockchainTree<DB, C, EF>
|
||||
{
|
||||
fn insert_block_with_senders(
|
||||
&self,
|
||||
block: SealedBlockWithSenders,
|
||||
) -> Result<BlockStatus, Error> {
|
||||
self.tree.write().insert_block_with_senders(block)
|
||||
}
|
||||
|
||||
fn finalize_block(&self, finalized_block: BlockNumber) {
|
||||
self.tree.write().finalize_block(finalized_block)
|
||||
}
|
||||
|
||||
fn restore_canonical_hashes(&self, last_finalized_block: BlockNumber) -> Result<(), Error> {
|
||||
self.tree.write().restore_canonical_hashes(last_finalized_block)
|
||||
}
|
||||
|
||||
fn make_canonical(&self, block_hash: &BlockHash) -> Result<(), Error> {
|
||||
self.tree.write().make_canonical(block_hash)
|
||||
}
|
||||
|
||||
fn unwind(&self, unwind_to: BlockNumber) -> Result<(), Error> {
|
||||
self.tree.write().unwind(unwind_to)
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTreeViewer
|
||||
for ShareableBlockchainTree<DB, C, EF>
|
||||
{
|
||||
fn blocks(&self) -> BTreeMap<BlockNumber, HashSet<BlockHash>> {
|
||||
self.tree.read().block_indices().index_of_number_to_pending_blocks().clone()
|
||||
}
|
||||
|
||||
fn block_by_hash(&self, block_hash: BlockHash) -> Option<SealedBlock> {
|
||||
self.tree.read().block_by_hash(block_hash).cloned()
|
||||
}
|
||||
|
||||
fn canonical_blocks(&self) -> BTreeMap<BlockNumber, BlockHash> {
|
||||
self.tree.read().block_indices().canonical_chain().clone()
|
||||
}
|
||||
|
||||
fn canonical_tip(&self) -> BlockNumHash {
|
||||
self.tree.read().block_indices().canonical_tip()
|
||||
}
|
||||
|
||||
fn pending_blocks(&self) -> (BlockNumber, Vec<BlockHash>) {
|
||||
self.tree.read().block_indices().pending_blocks()
|
||||
}
|
||||
|
||||
fn pending_block(&self) -> Option<BlockNumHash> {
|
||||
let (number, blocks) = self.tree.read().block_indices().pending_blocks();
|
||||
blocks.first().map(|&hash| BlockNumHash { number, hash })
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTreePendingStateProvider
|
||||
for ShareableBlockchainTree<DB, C, EF>
|
||||
{
|
||||
fn pending_state_provider(
|
||||
&self,
|
||||
block_hash: BlockHash,
|
||||
) -> Result<Box<dyn PostStateDataProvider>, Error> {
|
||||
let post_state = self
|
||||
.tree
|
||||
.read()
|
||||
.post_state_data(block_hash)
|
||||
.ok_or(ProviderError::UnknownBlockHash(block_hash))
|
||||
.map(Box::new)?;
|
||||
Ok(Box::new(post_state))
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB: Database, C: Consensus, EF: ExecutorFactory> CanonStateSubscriptions
|
||||
for ShareableBlockchainTree<DB, C, EF>
|
||||
{
|
||||
fn subscribe_canon_state(&self) -> reth_provider::CanonStateNotifications {
|
||||
self.tree.read().subscribe_canon_state()
|
||||
}
|
||||
}
|
||||
126
crates/blockchain-tree/src/eth_dao_fork.rs
Normal file
126
crates/blockchain-tree/src/eth_dao_fork.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
//! DAO FOrk related constants from [EIP-779](https://eips.ethereum.org/EIPS/eip-779).
|
||||
//! It happened on Ethereum block 1_920_000
|
||||
use reth_primitives::{hex_literal::hex, H160};
|
||||
|
||||
/// Dao hardfork beneficiary that received ether from accounts from DAO and DAO creator children.
|
||||
pub static DAO_HARDFORK_BENEFICIARY: H160 = H160(hex!("bf4ed7b27f1d666546e30d74d50d173d20bca754"));
|
||||
|
||||
/// DAO hardfork account that ether was taken and added to beneficiry
|
||||
pub static DAO_HARDKFORK_ACCOUNTS: [H160; 116] = [
|
||||
H160(hex!("d4fe7bc31cedb7bfb8a345f31e668033056b2728")),
|
||||
H160(hex!("b3fb0e5aba0e20e5c49d252dfd30e102b171a425")),
|
||||
H160(hex!("2c19c7f9ae8b751e37aeb2d93a699722395ae18f")),
|
||||
H160(hex!("ecd135fa4f61a655311e86238c92adcd779555d2")),
|
||||
H160(hex!("1975bd06d486162d5dc297798dfc41edd5d160a7")),
|
||||
H160(hex!("a3acf3a1e16b1d7c315e23510fdd7847b48234f6")),
|
||||
H160(hex!("319f70bab6845585f412ec7724b744fec6095c85")),
|
||||
H160(hex!("06706dd3f2c9abf0a21ddcc6941d9b86f0596936")),
|
||||
H160(hex!("5c8536898fbb74fc7445814902fd08422eac56d0")),
|
||||
H160(hex!("6966ab0d485353095148a2155858910e0965b6f9")),
|
||||
H160(hex!("779543a0491a837ca36ce8c635d6154e3c4911a6")),
|
||||
H160(hex!("2a5ed960395e2a49b1c758cef4aa15213cfd874c")),
|
||||
H160(hex!("5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5")),
|
||||
H160(hex!("9c50426be05db97f5d64fc54bf89eff947f0a321")),
|
||||
H160(hex!("200450f06520bdd6c527622a273333384d870efb")),
|
||||
H160(hex!("be8539bfe837b67d1282b2b1d61c3f723966f049")),
|
||||
H160(hex!("6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb")),
|
||||
H160(hex!("f1385fb24aad0cd7432824085e42aff90886fef5")),
|
||||
H160(hex!("d1ac8b1ef1b69ff51d1d401a476e7e612414f091")),
|
||||
H160(hex!("8163e7fb499e90f8544ea62bbf80d21cd26d9efd")),
|
||||
H160(hex!("51e0ddd9998364a2eb38588679f0d2c42653e4a6")),
|
||||
H160(hex!("627a0a960c079c21c34f7612d5d230e01b4ad4c7")),
|
||||
H160(hex!("f0b1aa0eb660754448a7937c022e30aa692fe0c5")),
|
||||
H160(hex!("24c4d950dfd4dd1902bbed3508144a54542bba94")),
|
||||
H160(hex!("9f27daea7aca0aa0446220b98d028715e3bc803d")),
|
||||
H160(hex!("a5dc5acd6a7968a4554d89d65e59b7fd3bff0f90")),
|
||||
H160(hex!("d9aef3a1e38a39c16b31d1ace71bca8ef58d315b")),
|
||||
H160(hex!("63ed5a272de2f6d968408b4acb9024f4cc208ebf")),
|
||||
H160(hex!("6f6704e5a10332af6672e50b3d9754dc460dfa4d")),
|
||||
H160(hex!("77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6")),
|
||||
H160(hex!("492ea3bb0f3315521c31f273e565b868fc090f17")),
|
||||
H160(hex!("0ff30d6de14a8224aa97b78aea5388d1c51c1f00")),
|
||||
H160(hex!("9ea779f907f0b315b364b0cfc39a0fde5b02a416")),
|
||||
H160(hex!("ceaeb481747ca6c540a000c1f3641f8cef161fa7")),
|
||||
H160(hex!("cc34673c6c40e791051898567a1222daf90be287")),
|
||||
H160(hex!("579a80d909f346fbfb1189493f521d7f48d52238")),
|
||||
H160(hex!("e308bd1ac5fda103967359b2712dd89deffb7973")),
|
||||
H160(hex!("4cb31628079fb14e4bc3cd5e30c2f7489b00960c")),
|
||||
H160(hex!("ac1ecab32727358dba8962a0f3b261731aad9723")),
|
||||
H160(hex!("4fd6ace747f06ece9c49699c7cabc62d02211f75")),
|
||||
H160(hex!("440c59b325d2997a134c2c7c60a8c61611212bad")),
|
||||
H160(hex!("4486a3d68fac6967006d7a517b889fd3f98c102b")),
|
||||
H160(hex!("9c15b54878ba618f494b38f0ae7443db6af648ba")),
|
||||
H160(hex!("27b137a85656544b1ccb5a0f2e561a5703c6a68f")),
|
||||
H160(hex!("21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241")),
|
||||
H160(hex!("23b75c2f6791eef49c69684db4c6c1f93bf49a50")),
|
||||
H160(hex!("1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b")),
|
||||
H160(hex!("b9637156d330c0d605a791f1c31ba5890582fe1c")),
|
||||
H160(hex!("6131c42fa982e56929107413a9d526fd99405560")),
|
||||
H160(hex!("1591fc0f688c81fbeb17f5426a162a7024d430c2")),
|
||||
H160(hex!("542a9515200d14b68e934e9830d91645a980dd7a")),
|
||||
H160(hex!("c4bbd073882dd2add2424cf47d35213405b01324")),
|
||||
H160(hex!("782495b7b3355efb2833d56ecb34dc22ad7dfcc4")),
|
||||
H160(hex!("58b95c9a9d5d26825e70a82b6adb139d3fd829eb")),
|
||||
H160(hex!("3ba4d81db016dc2890c81f3acec2454bff5aada5")),
|
||||
H160(hex!("b52042c8ca3f8aa246fa79c3feaa3d959347c0ab")),
|
||||
H160(hex!("e4ae1efdfc53b73893af49113d8694a057b9c0d1")),
|
||||
H160(hex!("3c02a7bc0391e86d91b7d144e61c2c01a25a79c5")),
|
||||
H160(hex!("0737a6b837f97f46ebade41b9bc3e1c509c85c53")),
|
||||
H160(hex!("97f43a37f595ab5dd318fb46e7a155eae057317a")),
|
||||
H160(hex!("52c5317c848ba20c7504cb2c8052abd1fde29d03")),
|
||||
H160(hex!("4863226780fe7c0356454236d3b1c8792785748d")),
|
||||
H160(hex!("5d2b2e6fcbe3b11d26b525e085ff818dae332479")),
|
||||
H160(hex!("5f9f3392e9f62f63b8eac0beb55541fc8627f42c")),
|
||||
H160(hex!("057b56736d32b86616a10f619859c6cd6f59092a")),
|
||||
H160(hex!("9aa008f65de0b923a2a4f02012ad034a5e2e2192")),
|
||||
H160(hex!("304a554a310c7e546dfe434669c62820b7d83490")),
|
||||
H160(hex!("914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79")),
|
||||
H160(hex!("4deb0033bb26bc534b197e61d19e0733e5679784")),
|
||||
H160(hex!("07f5c1e1bc2c93e0402f23341973a0e043f7bf8a")),
|
||||
H160(hex!("35a051a0010aba705c9008d7a7eff6fb88f6ea7b")),
|
||||
H160(hex!("4fa802324e929786dbda3b8820dc7834e9134a2a")),
|
||||
H160(hex!("9da397b9e80755301a3b32173283a91c0ef6c87e")),
|
||||
H160(hex!("8d9edb3054ce5c5774a420ac37ebae0ac02343c6")),
|
||||
H160(hex!("0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9")),
|
||||
H160(hex!("5dc28b15dffed94048d73806ce4b7a4612a1d48f")),
|
||||
H160(hex!("bcf899e6c7d9d5a215ab1e3444c86806fa854c76")),
|
||||
H160(hex!("12e626b0eebfe86a56d633b9864e389b45dcb260")),
|
||||
H160(hex!("a2f1ccba9395d7fcb155bba8bc92db9bafaeade7")),
|
||||
H160(hex!("ec8e57756626fdc07c63ad2eafbd28d08e7b0ca5")),
|
||||
H160(hex!("d164b088bd9108b60d0ca3751da4bceb207b0782")),
|
||||
H160(hex!("6231b6d0d5e77fe001c2a460bd9584fee60d409b")),
|
||||
H160(hex!("1cba23d343a983e9b5cfd19496b9a9701ada385f")),
|
||||
H160(hex!("a82f360a8d3455c5c41366975bde739c37bfeb8a")),
|
||||
H160(hex!("9fcd2deaff372a39cc679d5c5e4de7bafb0b1339")),
|
||||
H160(hex!("005f5cee7a43331d5a3d3eec71305925a62f34b6")),
|
||||
H160(hex!("0e0da70933f4c7849fc0d203f5d1d43b9ae4532d")),
|
||||
H160(hex!("d131637d5275fd1a68a3200f4ad25c71a2a9522e")),
|
||||
H160(hex!("bc07118b9ac290e4622f5e77a0853539789effbe")),
|
||||
H160(hex!("47e7aa56d6bdf3f36be34619660de61275420af8")),
|
||||
H160(hex!("acd87e28b0c9d1254e868b81cba4cc20d9a32225")),
|
||||
H160(hex!("adf80daec7ba8dcf15392f1ac611fff65d94f880")),
|
||||
H160(hex!("5524c55fb03cf21f549444ccbecb664d0acad706")),
|
||||
H160(hex!("40b803a9abce16f50f36a77ba41180eb90023925")),
|
||||
H160(hex!("fe24cdd8648121a43a7c86d289be4dd2951ed49f")),
|
||||
H160(hex!("17802f43a0137c506ba92291391a8a8f207f487d")),
|
||||
H160(hex!("253488078a4edf4d6f42f113d1e62836a942cf1a")),
|
||||
H160(hex!("86af3e9626fce1957c82e88cbf04ddf3a2ed7915")),
|
||||
H160(hex!("b136707642a4ea12fb4bae820f03d2562ebff487")),
|
||||
H160(hex!("dbe9b615a3ae8709af8b93336ce9b477e4ac0940")),
|
||||
H160(hex!("f14c14075d6c4ed84b86798af0956deef67365b5")),
|
||||
H160(hex!("ca544e5c4687d109611d0f8f928b53a25af72448")),
|
||||
H160(hex!("aeeb8ff27288bdabc0fa5ebb731b6f409507516c")),
|
||||
H160(hex!("cbb9d3703e651b0d496cdefb8b92c25aeb2171f7")),
|
||||
H160(hex!("6d87578288b6cb5549d5076a207456a1f6a63dc0")),
|
||||
H160(hex!("b2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e")),
|
||||
H160(hex!("accc230e8a6e5be9160b8cdf2864dd2a001c28b6")),
|
||||
H160(hex!("2b3455ec7fedf16e646268bf88846bd7a2319bb2")),
|
||||
H160(hex!("4613f3bca5c44ea06337a9e439fbc6d42e501d0a")),
|
||||
H160(hex!("d343b217de44030afaa275f54d31a9317c7f441e")),
|
||||
H160(hex!("84ef4b2357079cd7a7c69fd7a37cd0609a679106")),
|
||||
H160(hex!("da2fef9e4a3230988ff17df2165440f37e8b1708")),
|
||||
H160(hex!("f4c64518ea10f995918a454158c6b61407ea345c")),
|
||||
H160(hex!("7602b46df5390e432ef1c307d4f2c9ff6d65cc97")),
|
||||
H160(hex!("bb9bc244d798123fde783fcc1c72d3bb8c189413")),
|
||||
H160(hex!("807640a13483f8ac783c557fcdf27be11ea4ac7a")),
|
||||
];
|
||||
20
crates/blockchain-tree/src/lib.rs
Normal file
20
crates/blockchain-tree/src/lib.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
#![warn(missing_docs, unreachable_pub)]
|
||||
#![deny(unused_must_use, rust_2018_idioms)]
|
||||
#![doc(test(
|
||||
no_crate_inject,
|
||||
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
|
||||
))]
|
||||
|
||||
//! Reth executor executes transaction in block of data.
|
||||
|
||||
pub mod eth_dao_fork;
|
||||
|
||||
/// Execution result types.
|
||||
pub use reth_provider::post_state;
|
||||
|
||||
pub mod blockchain_tree;
|
||||
pub use blockchain_tree::*;
|
||||
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
/// Common test helpers for mocking out executor and executor factory
|
||||
pub mod test_utils;
|
||||
26
crates/blockchain-tree/src/test_utils/executor.rs
Normal file
26
crates/blockchain-tree/src/test_utils/executor.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use reth_interfaces::executor::Error as ExecutionError;
|
||||
use reth_primitives::{Address, Block, U256};
|
||||
use reth_provider::{post_state::PostState, BlockExecutor, StateProvider};
|
||||
|
||||
/// Test executor with mocked result.
|
||||
pub struct TestExecutor(pub Option<PostState>);
|
||||
|
||||
impl<SP: StateProvider> BlockExecutor<SP> for TestExecutor {
|
||||
fn execute(
|
||||
&mut self,
|
||||
_block: &Block,
|
||||
_total_difficulty: U256,
|
||||
_senders: Option<Vec<Address>>,
|
||||
) -> Result<PostState, ExecutionError> {
|
||||
self.0.clone().ok_or(ExecutionError::VerificationFailed)
|
||||
}
|
||||
|
||||
fn execute_and_verify_receipt(
|
||||
&mut self,
|
||||
_block: &Block,
|
||||
_total_difficulty: U256,
|
||||
_senders: Option<Vec<Address>>,
|
||||
) -> Result<PostState, ExecutionError> {
|
||||
self.0.clone().ok_or(ExecutionError::VerificationFailed)
|
||||
}
|
||||
}
|
||||
37
crates/blockchain-tree/src/test_utils/factory.rs
Normal file
37
crates/blockchain-tree/src/test_utils/factory.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use super::TestExecutor;
|
||||
use parking_lot::Mutex;
|
||||
use reth_primitives::ChainSpec;
|
||||
use reth_provider::{post_state::PostState, ExecutorFactory, StateProvider};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Executor factory with pre-set execution results.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TestExecutorFactory {
|
||||
exec_results: Arc<Mutex<Vec<PostState>>>,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
}
|
||||
|
||||
impl TestExecutorFactory {
|
||||
/// Create new instance of test factory.
|
||||
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self { exec_results: Arc::new(Mutex::new(Vec::new())), chain_spec }
|
||||
}
|
||||
|
||||
/// Extend the mocked execution results
|
||||
pub fn extend(&self, results: Vec<PostState>) {
|
||||
self.exec_results.lock().extend(results.into_iter());
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutorFactory for TestExecutorFactory {
|
||||
type Executor<T: StateProvider> = TestExecutor;
|
||||
|
||||
fn with_sp<SP: StateProvider>(&self, _sp: SP) -> Self::Executor<SP> {
|
||||
let exec_res = self.exec_results.lock().pop();
|
||||
TestExecutor(exec_res)
|
||||
}
|
||||
|
||||
fn chain_spec(&self) -> &ChainSpec {
|
||||
self.chain_spec.as_ref()
|
||||
}
|
||||
}
|
||||
5
crates/blockchain-tree/src/test_utils/mod.rs
Normal file
5
crates/blockchain-tree/src/test_utils/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
mod executor;
|
||||
pub use executor::*;
|
||||
|
||||
mod factory;
|
||||
pub use factory::*;
|
||||
Reference in New Issue
Block a user