perf: write friendly ExecutionResult (#1674)

Co-authored-by: Georgios Konstantopoulos <me@gakonst.com>
This commit is contained in:
Bjerg
2023-03-17 00:42:59 +01:00
committed by GitHub
parent 670db0b433
commit 7f5ac990eb
15 changed files with 1715 additions and 1272 deletions

View File

@@ -4,11 +4,12 @@ use super::chain::{BlockChainId, Chain, ForkBlock};
use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders};
use std::collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet};
/// Internal indices of the blocks and chains. This is main connection
/// between blocks, chains and canonical chain.
/// Internal indices of the blocks and chains.
///
/// It contains list of canonical block hashes, forks to childs blocks
/// and block hash to chain id.
/// This is main connection between blocks, chains and canonical chain.
///
/// It contains a list of canonical block hashes, forks to child blocks, and a mapping of block hash
/// to chain ID.
#[derive(Debug)]
pub struct BlockIndices {
/// Last finalized block.

View File

@@ -1,29 +1,39 @@
//! Handles substate and list of blocks.
//! have functions to split, branch and append the chain.
use crate::{
execution_result::ExecutionResult,
substate::{SubStateData, SubStateWithProvider},
};
//! A chain in a [`BlockchainTree`][super::BlockchainTree].
//!
//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent
//! blocks, as well as a list of the blocks the chain is composed of.
use crate::{post_state::PostState, substate::PostStateProvider};
use reth_interfaces::{consensus::Consensus, executor::Error as ExecError, Error};
use reth_primitives::{BlockHash, BlockNumber, SealedBlockWithSenders, SealedHeader, U256};
use reth_provider::{BlockExecutor, ExecutorFactory, StateProvider};
use std::collections::BTreeMap;
/// Internal to BlockchainTree chain identification.
/// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree].
pub(crate) type BlockChainId = u64;
/// Side chain that contain it state and connect to block found in canonical chain.
/// A side chain.
///
/// The sidechain contains the state of accounts after execution of its blocks,
/// changesets for those blocks (and their transactions), as well as the blocks themselves.
///
/// Each chain in the tree are identified using a unique ID.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct Chain {
/// Chain substate. Updated state after execution all blocks in chain.
substate: SubStateData,
/// Changesets for block and transaction. Will be used to update tables in database.
changesets: Vec<ExecutionResult>,
/// Blocks in this chain
/// The state of accounts after execution of the blocks in this chain.
///
/// This state also contains the individual changes that lead to the current state.
state: PostState,
/// The blocks in this chain.
blocks: BTreeMap<BlockNumber, SealedBlockWithSenders>,
/// A mapping of each block number in the chain to the highest transition ID in the chain's
/// state after execution of the block.
///
/// This is used to revert changes in the state until a certain block number when the chain is
/// split.
block_transitions: BTreeMap<BlockNumber, usize>,
}
/// Contains fork block and hash.
/// Describes a fork block by its number and hash.
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct ForkBlock {
/// Block number of block that chains branches from
@@ -33,75 +43,68 @@ pub struct ForkBlock {
}
impl ForkBlock {
/// Return the number hash tuple.
/// Return the `(block_number, block_hash)` tuple for this fork block.
pub fn num_hash(&self) -> (BlockNumber, BlockHash) {
(self.number, self.hash)
}
}
impl Chain {
/// Return blocks found in chain
/// Get the blocks in this chain.
pub fn blocks(&self) -> &BTreeMap<BlockNumber, SealedBlockWithSenders> {
&self.blocks
}
/// Into inner components
pub fn into_inner(
self,
) -> (BTreeMap<BlockNumber, SealedBlockWithSenders>, Vec<ExecutionResult>, SubStateData) {
(self.blocks, self.changesets, self.substate)
/// Destructure the chain into its inner components, the blocks and the state.
pub fn into_inner(self) -> (BTreeMap<BlockNumber, SealedBlockWithSenders>, PostState) {
(self.blocks, self.state)
}
/// Return execution results of blocks
pub fn changesets(&self) -> &Vec<ExecutionResult> {
&self.changesets
}
/// Return fork block number and hash.
/// Get the block at which this chain forked.
pub fn fork_block(&self) -> ForkBlock {
let tip = self.first();
ForkBlock { number: tip.number.saturating_sub(1), hash: tip.parent_hash }
}
/// Block fork number
/// Get the block number at which this chain forked.
pub fn fork_block_number(&self) -> BlockNumber {
self.first().number.saturating_sub(1)
}
/// Block fork hash
/// Get the block hash at which this chain forked.
pub fn fork_block_hash(&self) -> BlockHash {
self.first().parent_hash
}
/// First block in chain.
/// Get the first block in this chain.
pub fn first(&self) -> &SealedBlockWithSenders {
self.blocks.first_key_value().expect("Chain has at least one block for first").1
}
/// Return tip of the chain. Chain always have at least one block inside
/// Get the tip of the chain.
///
/// # Note
///
/// Chains always have at least one block.
pub fn tip(&self) -> &SealedBlockWithSenders {
self.last()
self.blocks.last_key_value().expect("Chain should have at least one block").1
}
/// Return tip of the chain. Chain always have at least one block inside
pub fn last(&self) -> &SealedBlockWithSenders {
self.blocks.last_key_value().expect("Chain has at least one block for last").1
/// Create new chain with given blocks and post state.
pub fn new(blocks: Vec<(SealedBlockWithSenders, PostState)>) -> Self {
let mut state = PostState::default();
let mut block_transitions = BTreeMap::new();
let mut block_num_hash = BTreeMap::new();
for (block, block_state) in blocks.into_iter() {
state.extend(block_state);
block_transitions.insert(block.number, state.transitions_count());
block_num_hash.insert(block.number, block);
}
Self { state, block_transitions, blocks: block_num_hash }
}
/// Create new chain with given blocks and execution result.
pub fn new(blocks: Vec<(SealedBlockWithSenders, ExecutionResult)>) -> Self {
let (blocks, changesets): (Vec<_>, Vec<_>) = blocks.into_iter().unzip();
let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::<BTreeMap<_, _>>();
let mut substate = SubStateData::default();
substate.apply(&changesets);
Self { substate, changesets, blocks }
}
/// Create new chain that joins canonical block
/// If parent block is the tip mark chain fork.
/// Create a new chain that forks off of the canonical chain.
pub fn new_canonical_fork<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
block: &SealedBlockWithSenders,
parent_header: &SealedHeader,
@@ -110,17 +113,16 @@ impl Chain {
consensus: &C,
factory: &EF,
) -> Result<Self, Error> {
// substate
let substate = SubStateData::default();
let state = PostState::default();
let empty = BTreeMap::new();
let substate_with_sp =
SubStateWithProvider::new(&substate, provider, &empty, canonical_block_hashes);
let state_provider =
PostStateProvider::new(&state, provider, &empty, canonical_block_hashes);
let changeset = Self::validate_and_execute(
block.clone(),
parent_header,
substate_with_sp,
state_provider,
consensus,
factory,
)?;
@@ -128,7 +130,7 @@ impl Chain {
Ok(Self::new(vec![(block.clone(), changeset)]))
}
/// Create new chain that branches out from existing side chain.
/// Create a new chain that forks off of an existing sidechain.
pub fn new_chain_fork<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
&self,
block: SealedBlockWithSenders,
@@ -144,61 +146,58 @@ impl Chain {
.get(&parent_number)
.ok_or(ExecError::BlockNumberNotFoundInChain { block_number: parent_number })?;
// revert changesets
let revert_from = self.changesets.len() - (self.tip().number - parent.number) as usize;
let mut substate = self.substate.clone();
let revert_to_transition_id = self
.block_transitions
.get(&parent.number)
.expect("Should have the transition ID for the parent block");
let mut state = self.state.clone();
// Revert state to the state after execution of the parent block
state.revert_to(*revert_to_transition_id);
// Revert changesets to get the state of the parent that we need to apply the change.
substate.revert(&self.changesets[revert_from..]);
let substate_with_sp = SubStateWithProvider::new(
&substate,
let state_provider = PostStateProvider::new(
&state,
provider,
&side_chain_block_hashes,
canonical_block_hashes,
);
let changeset = Self::validate_and_execute(
block.clone(),
parent,
substate_with_sp,
consensus,
factory,
)?;
substate.apply_one(&changeset);
let block_state =
Self::validate_and_execute(block.clone(), parent, state_provider, consensus, factory)?;
state.extend(block_state);
let chain = Self {
substate,
changesets: vec![changeset],
block_transitions: BTreeMap::from([(block.number, state.transitions_count())]),
state,
blocks: BTreeMap::from([(block.number, block)]),
};
// if all is okay, return new chain back. Present chain is not modified.
// If all is okay, return new chain back. Present chain is not modified.
Ok(chain)
}
/// Validate and execute block and return execution result or error.
/// Validate and execute the given block.
fn validate_and_execute<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
block: SealedBlockWithSenders,
parent_block: &SealedHeader,
substate: SubStateWithProvider<'_, SP>,
state_provider: PostStateProvider<'_, SP>,
consensus: &C,
factory: &EF,
) -> Result<ExecutionResult, Error> {
) -> Result<PostState, Error> {
consensus.validate_header(&block, U256::MAX)?;
consensus.pre_validate_header(&block, parent_block)?;
consensus.pre_validate_block(&block)?;
let (unseal, senders) = block.into_components();
let unseal = unseal.unseal();
let res = factory.with_sp(substate).execute_and_verify_receipt(
&unseal,
U256::MAX,
Some(senders),
)?;
Ok(res)
factory
.with_sp(state_provider)
.execute_and_verify_receipt(&unseal, U256::MAX, Some(senders))
.map_err(Into::into)
}
/// Append block to this chain
/// Validate and execute the given block, and append it to this chain.
pub fn append_block<SP: StateProvider, C: Consensus, EF: ExecutorFactory>(
&mut self,
block: SealedBlockWithSenders,
@@ -210,11 +209,11 @@ impl Chain {
) -> Result<(), Error> {
let (_, parent_block) = self.blocks.last_key_value().expect("Chain has at least one block");
let changeset = Self::validate_and_execute(
let block_state = Self::validate_and_execute(
block.clone(),
parent_block,
SubStateWithProvider::new(
&self.substate,
PostStateProvider::new(
&self.state,
provider,
&side_chain_block_hashes,
canonical_block_hashes,
@@ -222,14 +221,15 @@ impl Chain {
consensus,
factory,
)?;
self.substate.apply_one(&changeset);
self.changesets.push(changeset);
self.state.extend(block_state);
self.block_transitions.insert(block.number, self.state.transitions_count());
self.blocks.insert(block.number, block);
Ok(())
}
/// Merge two chains into one by appending received chain to the current one.
/// Take substate from newest one.
/// Merge two chains by appending the given chain into the current one.
///
/// The state of accounts for this chain is set to the state of the newest chain.
pub fn append_chain(&mut self, chain: Chain) -> Result<(), Error> {
let chain_tip = self.tip();
if chain_tip.hash != chain.fork_block_hash() {
@@ -239,19 +239,36 @@ impl Chain {
}
.into())
}
// Insert blocks from other chain
self.blocks.extend(chain.blocks.into_iter());
self.changesets.extend(chain.changesets.into_iter());
self.substate = chain.substate;
let current_transition_count = self.state.transitions_count();
self.state.extend(chain.state);
// Update the block transition mapping, shifting the transition ID by the current number of
// transitions in *this* chain
for (block_number, transition_id) in chain.block_transitions.iter() {
self.block_transitions.insert(*block_number, transition_id + current_transition_count);
}
Ok(())
}
/// Split this chain at the given block.
///
/// The given block will be the first block in the first returned chain.
///
/// If the given block is not found, [`ChainSplit::NoSplitPending`] is returned.
/// Split chain at the number or hash, block with given number will be included at first chain.
/// If any chain is empty (Does not have blocks) None will be returned.
///
/// If block hash is not found ChainSplit::NoSplitPending is returned.
/// # Note
///
/// Subtate state will be only found in second chain. First change substate will be
/// invalid.
/// The block number to transition ID mapping is only found in the second chain, making it
/// impossible to perform any state reverts on the first chain.
///
/// The second chain only contains the changes that were reverted on the first chain; however,
/// it retains the up to date state as if the chains were one, i.e. the second chain is an
/// extension of the first.
pub fn split(mut self, split_at: SplitAt) -> ChainSplit {
let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key();
let block_number = match split_at {
@@ -282,17 +299,22 @@ impl Chain {
};
let higher_number_blocks = self.blocks.split_off(&(block_number + 1));
let (first_changesets, second_changeset) = self.changesets.split_at(self.blocks.len());
let mut canonical_state = std::mem::take(&mut self.state);
let new_state = canonical_state.split_at(
*self.block_transitions.get(&(block_number)).expect("Unknown block transition ID"),
);
self.state = new_state;
ChainSplit::Split {
canonical: Chain {
substate: SubStateData::default(),
changesets: first_changesets.to_vec(),
state: canonical_state,
block_transitions: BTreeMap::new(),
blocks: self.blocks,
},
pending: Chain {
substate: self.substate,
changesets: second_changeset.to_vec(),
state: self.state,
block_transitions: self.block_transitions,
blocks: higher_number_blocks,
},
}
@@ -333,12 +355,10 @@ pub enum ChainSplit {
#[cfg(test)]
mod tests {
use super::*;
use crate::substate::AccountSubState;
use reth_primitives::{H160, H256};
use reth_provider::execution_result::AccountInfoChangeSet;
use reth_primitives::{Account, H160, H256};
#[test]
fn chain_apend() {
fn chain_append() {
let block = SealedBlockWithSenders::default();
let block1_hash = H256([0x01; 32]);
let block2_hash = H256([0x02; 32]);
@@ -357,17 +377,11 @@ mod tests {
block3.block.header.header.parent_hash = block2_hash;
let mut chain1 = Chain {
substate: Default::default(),
changesets: vec![],
blocks: BTreeMap::from([(1, block1), (2, block2)]),
};
let mut chain1 =
Chain { blocks: BTreeMap::from([(1, block1), (2, block2)]), ..Default::default() };
let chain2 = Chain {
substate: Default::default(),
changesets: vec![],
blocks: BTreeMap::from([(3, block3), (4, block4)]),
};
let chain2 =
Chain { blocks: BTreeMap::from([(3, block3), (4, block4)]), ..Default::default() };
assert_eq!(chain1.append_chain(chain2.clone()), Ok(()));
@@ -377,41 +391,49 @@ mod tests {
#[test]
fn test_number_split() {
let mut substate = SubStateData::default();
let mut account = AccountSubState::default();
account.info.nonce = 10;
substate.accounts.insert(H160([1; 20]), account);
let mut base_state = PostState::default();
let mut account = Account::default();
account.nonce = 10;
base_state.create_account(H160([1; 20]), account);
base_state.finish_transition();
let mut exec1 = ExecutionResult::default();
exec1.block_changesets.insert(H160([2; 20]), AccountInfoChangeSet::default());
let mut exec2 = ExecutionResult::default();
exec2.block_changesets.insert(H160([3; 20]), AccountInfoChangeSet::default());
let mut block_state1 = PostState::default();
block_state1.create_account(H160([2; 20]), Account::default());
block_state1.finish_transition();
let mut block_state2 = PostState::default();
block_state2.create_account(H160([3; 20]), Account::default());
block_state2.finish_transition();
let mut block1 = SealedBlockWithSenders::default();
let block1_hash = H256([15; 32]);
block1.number = 1;
block1.hash = block1_hash;
block1.senders.push(H160([4; 20]));
let mut block2 = SealedBlockWithSenders::default();
let block2_hash = H256([16; 32]);
block2.number = 2;
block2.hash = block2_hash;
block2.senders.push(H160([4; 20]));
let chain = Chain {
substate: substate.clone(),
changesets: vec![exec1.clone(), exec2.clone()],
blocks: BTreeMap::from([(1, block1.clone()), (2, block2.clone())]),
};
let chain = Chain::new(vec![
(block1.clone(), block_state1.clone()),
(block2.clone(), block_state2.clone()),
]);
let mut split1_state = chain.state.clone();
let split2_state = split1_state.split_at(*chain.block_transitions.get(&1).unwrap());
let chain_split1 = Chain {
substate: SubStateData::default(),
changesets: vec![exec1],
state: split1_state,
block_transitions: BTreeMap::new(),
blocks: BTreeMap::from([(1, block1.clone())]),
};
let chain_split2 = Chain {
substate,
changesets: vec![exec2.clone()],
state: split2_state,
block_transitions: chain.block_transitions.clone(),
blocks: BTreeMap::from([(2, block2.clone())]),
};
@@ -432,6 +454,7 @@ mod tests {
chain.clone().split(SplitAt::Number(10)),
ChainSplit::NoSplitCanonical(chain.clone())
);
// split at lower number
assert_eq!(chain.clone().split(SplitAt::Number(0)), ChainSplit::NoSplitPending(chain));
}

View File

@@ -3,11 +3,12 @@
/// The configuration for the blockchain tree.
#[derive(Clone, Debug)]
pub struct BlockchainTreeConfig {
/// Finalization windows. Number of blocks that can be reorged
max_reorg_depth: u64,
/// Number of block after finalized block that we are storing. It should be more then
/// finalization window
/// Number of blocks after the last finalized block that we are storing.
///
/// It should be more than the finalization window for the canonical chain.
max_blocks_in_chain: u64,
/// The number of blocks that can be re-orged (finalization windows)
max_reorg_depth: u64,
/// For EVM's "BLOCKHASH" opcode we require last 256 block hashes. So we need to specify
/// at least `additional_canonical_block_hashes`+`max_reorg_depth`, for eth that would be
/// 256+64.

View File

@@ -5,15 +5,24 @@ use reth_primitives::ChainSpec;
use reth_provider::ShareableDatabase;
use std::sync::Arc;
/// Container for external abstractions.
/// A container for external components.
///
/// This is a simple container for external components used throughout the blockchain tree
/// implementation:
///
/// - A handle to the database
/// - A handle to the consensus engine
/// - The executor factory to exexcute blocks with
/// - The chain spec
#[derive(Debug)]
pub struct TreeExternals<DB, C, EF> {
/// Save sidechain, do reorgs and push new block to canonical chain that is inside db.
/// The database, used to commit the canonical chain, or unwind it.
pub db: DB,
/// Consensus checks
/// The consensus engine.
pub consensus: C,
/// Create executor to execute blocks.
/// The executor factory to execute blocks with.
pub executor_factory: EF,
/// Chain spec
/// The chain spec.
pub chain_spec: Arc<ChainSpec>,
}

View File

@@ -6,7 +6,10 @@ use reth_primitives::{BlockHash, BlockNumber, SealedBlock, SealedBlockWithSender
use reth_provider::{
providers::ChainState, ExecutorFactory, HeaderProvider, StateProviderFactory, Transaction,
};
use std::collections::{BTreeMap, HashMap};
use std::{
collections::{BTreeMap, HashMap},
ops::DerefMut,
};
pub mod block_indices;
use block_indices::BlockIndices;
@@ -60,17 +63,18 @@ use externals::TreeExternals;
/// * [BlockchainTree::make_canonical]: Check if we have the hash of block that we want to finalize
/// and commit it to db. If we dont have the block, pipeline syncing should start to fetch the
/// blocks from p2p. Do reorg in tables if canonical chain if needed.
#[derive(Debug)]
pub struct BlockchainTree<DB: Database, C: Consensus, EF: ExecutorFactory> {
/// chains and present data
/// The tracked chains and their current data.
chains: HashMap<BlockChainId, Chain>,
/// Static blockchain id generator
/// Static blockchain ID generator
block_chain_id_generator: u64,
/// Indices to block and their connection.
/// Indices to block and their connection to the canonical chain.
block_indices: BlockIndices,
/// Tree configuration.
config: BlockchainTreeConfig,
/// Externals
/// External components (the database, consensus engine etc.)
externals: TreeExternals<DB, C, EF>,
/// Tree configuration
config: BlockchainTreeConfig,
}
/// From Engine API spec, block inclusion can be valid, accepted or invalid.
@@ -90,16 +94,17 @@ pub enum BlockStatus {
Disconnected,
}
/// Helper structure that wraps chains and indices to search for block hash accross the chains.
/// A container that wraps chains and block indices to allow searching for block hashes across all
/// sidechains.
pub struct BlockHashes<'a> {
/// Chains
/// The current tracked chains.
pub chains: &'a mut HashMap<BlockChainId, Chain>,
/// Indices
/// The block indices for all chains.
pub indices: &'a BlockIndices,
}
impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF> {
/// New blockchain tree
/// Create a new blockchain tree.
pub fn new(
externals: TreeExternals<DB, C, EF>,
config: BlockchainTreeConfig,
@@ -137,7 +142,8 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
})
}
/// Fork side chain or append the block if parent is the top of the chain
/// Create a new sidechain by forking the given chain, or append the block if the parent block
/// is the top of the given chain.
fn fork_side_chain(
&mut self,
block: SealedBlockWithSenders,
@@ -200,8 +206,9 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
}
}
/// Fork canonical chain by creating new chain
fn fork_canonical_chain(
/// Create a new sidechain by forking the canonical chain.
// TODO(onbjerg): Is this not a specialized case of [`fork_side_chain`]? If so, can we merge?
pub fn fork_canonical_chain(
&mut self,
block: SealedBlockWithSenders,
) -> Result<BlockStatus, Error> {
@@ -238,8 +245,13 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
Ok(block_status)
}
/// Get all block hashes from chain that are not canonical. This is one time operation per
/// block. Reason why this is not caches is to save memory.
/// Get all block hashes from a sidechain that are not part of the canonical chain.
///
/// This is a one time operation per block.
///
/// # Note
///
/// This is not cached in order to save memory.
fn all_chain_hashes(&self, chain_id: BlockChainId) -> BTreeMap<BlockNumber, BlockHash> {
// find chain and iterate over it,
let mut chain_id = chain_id;
@@ -260,9 +272,12 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
hashes
}
/// Getting the canonical fork would tell use what kind of Provider we should execute block on.
/// If it is latest state provider or history state provider
/// Return None if chain_id is not known.
/// Get the block at which the given chain forked from the current canonical chain.
///
/// This is used to figure out what kind of state provider the executor should use to execute
/// the block.
///
/// Returns `None` if the chain is not known.
fn canonical_fork(&self, chain_id: BlockChainId) -> Option<ForkBlock> {
let mut chain_id = chain_id;
let mut fork;
@@ -283,8 +298,9 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
}
}
/// Insert chain to tree and ties the blocks to it.
/// Helper function that handles indexing and inserting.
/// Insert a chain into the tree.
///
/// Inserts a chain into the tree and builds the block indices.
fn insert_chain(&mut self, chain: Chain) -> BlockChainId {
let chain_id = self.block_chain_id_generator;
self.block_chain_id_generator += 1;
@@ -294,22 +310,35 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
chain_id
}
/// Insert block inside tree. recover transaction signers and
/// internaly call [`BlockchainTree::insert_block_with_senders`] fn.
/// Insert a new block in the tree.
///
/// # Note
///
/// This recovers transaction signers (unlike [`BlockchainTree::insert_block_with_senders`]).
pub fn insert_block(&mut self, block: SealedBlock) -> Result<BlockStatus, Error> {
let block = block.seal_with_senders().ok_or(ExecError::SenderRecoveryError)?;
self.insert_block_with_senders(&block)
}
/// Insert block with senders inside tree.
/// Insert a block (with senders recovered) in the tree.
///
/// Returns `true` if:
/// 1. It is part of the blockchain tree
/// 2. It is part of the canonical chain
/// 3. Its parent is part of the blockchain tree and we can fork at the parent
/// 4. Its parent is part of the canonical chain and we can fork at the parent
/// Otherwise will return `false`, indicating that neither the block nor its parent
/// is part of the chain or any sidechains. This means that if block becomes canonical
/// we need to fetch the missing blocks over p2p.
///
/// - The block is already part of a sidechain in the tree, or
/// - The block is already part of the canonical chain, or
/// - The parent is part of a sidechain in the tree, and we can fork at this block, or
/// - The parent is part of the canonical chain, and we can fork at this block
///
/// Otherwise `false` is returned, indicating that neither the block nor its parent is part of
/// the chain or any sidechains.
///
/// This means that if the block becomes canonical, we need to fetch the missing blocks over
/// P2P.
///
/// # Note
///
/// If the senders have not already been recovered, call [`BlockchainTree::insert_block`]
/// instead.
pub fn insert_block_with_senders(
&mut self,
block: &SealedBlockWithSenders,
@@ -372,7 +401,7 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
Ok(BlockStatus::Disconnected)
}
/// Do finalization of blocks. Remove them from tree
/// Finalize blocks up until and including `finalized_block`, and remove them from the tree.
pub fn finalize_block(&mut self, finalized_block: BlockNumber) {
let mut remove_chains = self.block_indices.finalize_canonical_blocks(
finalized_block,
@@ -386,7 +415,16 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
}
}
/// Update canonical hashes. Reads last N canonical blocks from database and update all indices.
/// Reads the last `N` canonical hashes from the database and updates the block indices of the
/// tree.
///
/// `N` is the `max_reorg_depth` plus the number of block hashes needed to satisfy the
/// `BLOCKHASH` opcode in the EVM.
///
/// # Note
///
/// This finalizes `last_finalized_block` prior to reading the canonical hashes (using
/// [`BlockchainTree::finalize_block`]).
pub fn restore_canonical_hashes(
&mut self,
last_finalized_block: BlockNumber,
@@ -417,8 +455,9 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
Ok(())
}
/// Split chain and return canonical part of it. Pending part is reinserted inside tree
/// with same chain_id.
/// Split a sidechain at the given point, and return the canonical part of it.
///
/// The pending part of the chain is reinserted into the tree with the same `chain_id`.
fn split_chain(&mut self, chain_id: BlockChainId, chain: Chain, split_at: SplitAt) -> Chain {
match chain.split(split_at) {
ChainSplit::Split { canonical, pending } => {
@@ -434,9 +473,16 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
}
}
/// Make block and its parent canonical. Unwind chains to database if necessary.
/// Make a block and its parent part of the canonical chain.
///
/// If block is already part of canonical chain return Ok.
/// # Note
///
/// This unwinds the database if necessary, i.e. if parts of the canonical chain have been
/// re-orged.
///
/// # Returns
///
/// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical.
pub fn make_canonical(&mut self, block_hash: &BlockHash) -> Result<(), Error> {
let chain_id = if let Some(chain_id) = self.block_indices.get_blocks_chain_id(block_hash) {
chain_id
@@ -498,19 +544,41 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
Ok(())
}
/// Commit chain for it to become canonical. Assume we are doing pending operation to db.
/// Canonicalize the given chain and commit it to the database.
fn commit_canonical(&mut self, chain: Chain) -> Result<(), Error> {
let mut tx = Transaction::new(&self.externals.db)?;
let new_tip_number = chain.tip().number;
let new_tip_hash = chain.tip().hash;
let first_transition_id =
tx.get_block_transition(chain.first().number.saturating_sub(1))
.map_err(|e| ExecError::CanonicalCommit { inner: e.to_string() })?;
let expected_state_root = chain.tip().state_root;
let fork_block = chain.fork_block_number();
let (blocks, state) = chain.into_inner();
let num_transitions = state.transitions_count();
let new_tip = chain.tip().number;
let (blocks, changesets, _) = chain.into_inner();
for item in blocks.into_iter().zip(changesets.into_iter()) {
let ((_, block), changeset) = item;
tx.insert_block(block, self.externals.chain_spec.as_ref(), changeset)
// Write state and changesets to the database
state
.write_to_db(tx.deref_mut(), first_transition_id)
.map_err(|e| ExecError::CanonicalCommit { inner: e.to_string() })?;
// Insert the blocks
for block in blocks.into_values() {
tx.insert_block(block)
.map_err(|e| ExecError::CanonicalCommit { inner: e.to_string() })?;
}
// update pipeline progress.
tx.update_pipeline_stages(new_tip)
tx.insert_hashes(
fork_block,
first_transition_id,
first_transition_id + num_transitions as u64,
new_tip_number,
new_tip_hash,
expected_state_root,
)
.map_err(|e| ExecError::CanonicalCommit { inner: e.to_string() })?;
// Update pipeline progress
tx.update_pipeline_stages(new_tip_number)
.map_err(|e| ExecError::PipelineStatusUpdate { inner: e.to_string() })?;
tx.commit()?;
@@ -538,9 +606,9 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
Ok(())
}
/// Revert canonical blocks from database and insert them to pending table
/// Revert should be non inclusive, and revert_until should stay in db.
/// Return the chain that represent reverted canonical blocks.
/// Revert canonical blocks from the database and return them.
///
/// The block, `revert_until`, is non-inclusive, i.e. `revert_until` stays in the database.
fn revert_canonical(&mut self, revert_until: BlockNumber) -> Result<Chain, Error> {
// read data that is needed for new sidechain
@@ -560,9 +628,7 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
tx.commit()?;
let chain = Chain::new(blocks_and_execution);
Ok(chain)
Ok(Chain::new(blocks_and_execution))
}
}
@@ -575,18 +641,16 @@ mod tests {
transaction::DbTxMut,
};
use reth_interfaces::test_utils::TestConsensus;
use reth_primitives::{
hex_literal::hex, proofs::EMPTY_ROOT, ChainSpec, ChainSpecBuilder, H256, MAINNET,
};
use reth_primitives::{proofs::EMPTY_ROOT, ChainSpec, ChainSpecBuilder, H256, MAINNET};
use reth_provider::{
execution_result::ExecutionResult, insert_block, test_utils::blocks::BlockChainTestData,
BlockExecutor, StateProvider,
insert_block, post_state::PostState, test_utils::blocks::BlockChainTestData, BlockExecutor,
StateProvider,
};
use std::{collections::HashSet, sync::Arc};
#[derive(Clone)]
#[derive(Clone, Debug)]
struct TestFactory {
exec_result: Arc<Mutex<Vec<ExecutionResult>>>,
exec_result: Arc<Mutex<Vec<PostState>>>,
chain_spec: Arc<ChainSpec>,
}
@@ -595,12 +659,12 @@ mod tests {
Self { exec_result: Arc::new(Mutex::new(Vec::new())), chain_spec }
}
fn extend(&self, exec_res: Vec<ExecutionResult>) {
fn extend(&self, exec_res: Vec<PostState>) {
self.exec_result.lock().extend(exec_res.into_iter());
}
}
struct TestExecutor(Option<ExecutionResult>);
struct TestExecutor(Option<PostState>);
impl<SP: StateProvider> BlockExecutor<SP> for TestExecutor {
fn execute(
@@ -608,7 +672,7 @@ mod tests {
_block: &reth_primitives::Block,
_total_difficulty: reth_primitives::U256,
_senders: Option<Vec<reth_primitives::Address>>,
) -> Result<ExecutionResult, ExecError> {
) -> Result<PostState, ExecError> {
self.0.clone().ok_or(ExecError::VerificationFailed)
}
@@ -617,7 +681,7 @@ mod tests {
_block: &reth_primitives::Block,
_total_difficulty: reth_primitives::U256,
_senders: Option<Vec<reth_primitives::Address>>,
) -> Result<ExecutionResult, ExecError> {
) -> Result<PostState, ExecError> {
self.0.clone().ok_or(ExecError::VerificationFailed)
}
}
@@ -636,7 +700,7 @@ mod tests {
}
fn setup_externals(
exec_res: Vec<ExecutionResult>,
exec_res: Vec<PostState>,
) -> TreeExternals<Arc<Env<WriteMap>>, Arc<TestConsensus>, TestFactory> {
let db = create_test_rw_db();
let consensus = Arc::new(TestConsensus::default());
@@ -718,12 +782,8 @@ mod tests {
let data = BlockChainTestData::default();
let (mut block1, exec1) = data.blocks[0].clone();
block1.number = 11;
block1.state_root =
H256(hex!("5d035ccb3e75a9057452ff060b773b213ec1fc353426174068edfc3971a0b6bd"));
let (mut block2, exec2) = data.blocks[1].clone();
block2.number = 12;
block2.state_root =
H256(hex!("90101a13dd059fa5cca99ed93d1dc23657f63626c5b8f993a2ccbdf7446b64f8"));
// test pops execution results from vector, so order is from last to first.ß
let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]);

View File

@@ -1,10 +1,8 @@
use crate::execution_result::{
AccountChangeSet, AccountInfoChangeSet, ExecutionResult, TransactionChangeSet,
};
use crate::post_state::PostState;
use reth_interfaces::executor::Error;
use reth_primitives::{
bloom::logs_bloom, Account, Address, Block, Bloom, ChainSpec, Hardfork, Header, Log, Receipt,
TransactionSigned, H256, U256,
bloom::logs_bloom, Account, Address, Block, Bloom, Bytecode, ChainSpec, Hardfork, Header, Log,
Receipt, TransactionSigned, H256, U256,
};
use reth_provider::{BlockExecutor, StateProvider};
use reth_revm::{
@@ -18,7 +16,7 @@ use revm::{
db::AccountState,
primitives::{
hash_map::{self, Entry},
Account as RevmAccount, AccountInfo, Bytecode, ResultAndState,
Account as RevmAccount, AccountInfo, ResultAndState,
},
EVM,
};
@@ -100,22 +98,16 @@ where
);
}
/// Commit change to database and return change diff that is used to update state and create
/// history index
///
/// ChangeDiff consists of:
/// address->AccountChangeSet (It contains old and new account info,storage wipe flag, and
/// old/new storage) bytecode_hash->bytecodes mapping
///
/// BTreeMap is used to have sorted values
/// Commit change to the run-time database, and update the given [PostState] with the changes
/// made in the transaction, which can be persisted to the database.
fn commit_changes(
&mut self,
changes: hash_map::HashMap<Address, RevmAccount>,
) -> (BTreeMap<Address, AccountChangeSet>, BTreeMap<H256, Bytecode>) {
has_state_clear_eip: bool,
post_state: &mut PostState,
) {
let db = self.db();
let mut change = BTreeMap::new();
let mut new_bytecodes = BTreeMap::new();
// iterate over all changed accounts
for (address, account) in changes {
if account.is_destroyed {
@@ -128,16 +120,8 @@ where
};
// Insert into `change` a old account and None for new account
// and mark storage to be mapped
change.insert(
address,
AccountChangeSet {
account: AccountInfoChangeSet::Destroyed {
old: to_reth_acc(&db_account.info),
},
storage: BTreeMap::new(),
wipe_storage: true,
},
);
post_state.destroy_account(address, to_reth_acc(&db_account.info));
// clear cached DB and mark account as not existing
db_account.storage.clear();
db_account.account_state = AccountState::NotExisting;
@@ -149,53 +133,54 @@ where
// does it exist inside cached contracts if it doesn't it is new bytecode that
// we are inserting inside `change`
if let Some(ref code) = account.info.code {
if !code.is_empty() {
match db.contracts.entry(account.info.code_hash) {
Entry::Vacant(entry) => {
entry.insert(code.clone());
new_bytecodes.insert(H256(account.info.code_hash.0), code.clone());
}
Entry::Occupied(mut entry) => {
entry.insert(code.clone());
}
}
if !code.is_empty() && !db.contracts.contains_key(&account.info.code_hash) {
db.contracts.insert(account.info.code_hash, code.clone());
post_state.add_bytecode(account.info.code_hash, Bytecode(code.clone()));
}
}
// get old account that is going to be overwritten or none if it does not exist
// and get new account that was just inserted. new account mut ref is used for
// inserting storage
let (account_info_changeset, new_account) = match db.accounts.entry(address) {
let cached_account = match db.accounts.entry(address) {
Entry::Vacant(entry) => {
let entry = entry.insert(Default::default());
entry.info = account.info.clone();
// account was not existing, so this means new account is created
(AccountInfoChangeSet::Created { new: to_reth_acc(&entry.info) }, entry)
let account = to_reth_acc(&entry.info);
if !(has_state_clear_eip && account.is_empty()) {
post_state.create_account(address, account);
}
entry
}
Entry::Occupied(entry) => {
let entry = entry.into_mut();
// account is present inside cache but is marked as NotExisting.
let account_changeset =
if matches!(entry.account_state, AccountState::NotExisting) {
AccountInfoChangeSet::Created { new: to_reth_acc(&account.info) }
} else if entry.info != account.info {
AccountInfoChangeSet::Changed {
old: to_reth_acc(&entry.info),
new: to_reth_acc(&account.info),
}
} else {
AccountInfoChangeSet::NoChange { is_empty: account.is_empty() }
};
if matches!(entry.account_state, AccountState::NotExisting) {
let account = to_reth_acc(&account.info);
if !(has_state_clear_eip && account.is_empty()) {
post_state.create_account(address, account);
}
} else if entry.info != account.info {
post_state.change_account(
address,
to_reth_acc(&entry.info),
to_reth_acc(&account.info),
);
} else if has_state_clear_eip && account.is_empty() {
// The account was touched, but it is empty, so it should be deleted.
post_state.destroy_account(address, to_reth_acc(&account.info));
}
entry.info = account.info.clone();
(account_changeset, entry)
entry
}
};
new_account.account_state = if account.storage_cleared {
new_account.storage.clear();
cached_account.account_state = if account.storage_cleared {
cached_account.storage.clear();
AccountState::StorageCleared
} else if new_account.account_state.is_storage_cleared() {
} else if cached_account.account_state.is_storage_cleared() {
// the account already exists and its storage was cleared, preserve its previous
// state
AccountState::StorageCleared
@@ -204,32 +189,28 @@ where
};
// Insert storage.
let mut storage = BTreeMap::new();
let mut storage_changeset = BTreeMap::new();
// insert storage into new db account.
new_account.storage.extend(account.storage.into_iter().map(|(key, value)| {
storage.insert(key, (value.original_value(), value.present_value()));
cached_account.storage.extend(account.storage.into_iter().map(|(key, value)| {
storage_changeset.insert(key, (value.original_value(), value.present_value()));
(key, value.present_value())
}));
// Insert into change.
change.insert(
address,
AccountChangeSet {
account: account_info_changeset,
storage,
wipe_storage: false,
},
);
if !storage_changeset.is_empty() {
post_state.change_storage(address, storage_changeset);
}
}
}
(change, new_bytecodes)
}
/// Collect all balance changes at the end of the block. Balance changes might include block
/// reward, uncle rewards, withdrawals or irregular state changes (DAO fork).
/// Collect all balance changes at the end of the block.
///
/// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular
/// state changes (DAO fork).
fn post_block_balance_increments(
&mut self,
&self,
block: &Block,
td: U256,
) -> Result<HashMap<Address, U256>, Error> {
@@ -291,48 +272,36 @@ where
}
/// Irregular state change at Ethereum DAO hardfork
fn dao_fork_changeset(&mut self) -> Result<BTreeMap<Address, AccountInfoChangeSet>, Error> {
fn apply_dao_fork_changes(&mut self, post_state: &mut PostState) -> Result<(), Error> {
let db = self.db();
let mut drained_balance = U256::ZERO;
// drain all accounts ether
let mut changesets = crate::eth_dao_fork::DAO_HARDKFORK_ACCOUNTS
.iter()
.map(|&address| {
let db_account = db.load_account(address).map_err(|_| Error::ProviderError)?;
let old = to_reth_acc(&db_account.info);
// drain balance
drained_balance += core::mem::take(&mut db_account.info.balance);
let new = to_reth_acc(&db_account.info);
// assume it is changeset as it is irregular state change
Ok((address, AccountInfoChangeSet::Changed { new, old }))
})
.collect::<Result<BTreeMap<Address, AccountInfoChangeSet>, _>>()?;
for address in crate::eth_dao_fork::DAO_HARDKFORK_ACCOUNTS {
let db_account = db.load_account(address).map_err(|_| Error::ProviderError)?;
let old = to_reth_acc(&db_account.info);
// drain balance
drained_balance += core::mem::take(&mut db_account.info.balance);
let new = to_reth_acc(&db_account.info);
// assume it is changeset as it is irregular state change
post_state.change_account(address, old, new);
}
// add drained ether to beneficiary.
let beneficiary = crate::eth_dao_fork::DAO_HARDFORK_BENEFICIARY;
self.increment_account_balance(beneficiary, drained_balance, post_state)?;
let beneficiary_db_account =
db.load_account(beneficiary).map_err(|_| Error::ProviderError)?;
let old = to_reth_acc(&beneficiary_db_account.info);
beneficiary_db_account.info.balance += drained_balance;
let new = to_reth_acc(&beneficiary_db_account.info);
let beneficiary_changeset = AccountInfoChangeSet::Changed { new, old };
// insert changeset
changesets.insert(beneficiary, beneficiary_changeset);
Ok(changesets)
Ok(())
}
/// Generate balance increment account changeset and mutate account database entry in place.
fn account_balance_increment_changeset(
/// Increment the balance for the given account in the [PostState].
fn increment_account_balance(
&mut self,
address: Address,
increment: U256,
) -> Result<AccountInfoChangeSet, Error> {
post_state: &mut PostState,
) -> Result<(), Error> {
let db = self.db();
let beneficiary = db.load_account(address).map_err(|_| Error::ProviderError)?;
let old = to_reth_acc(&beneficiary.info);
@@ -346,9 +315,10 @@ where
beneficiary.account_state = AccountState::StorageCleared;
// if account was not present append `Created` changeset
Ok(AccountInfoChangeSet::Created {
new: Account { nonce: 0, balance: new.balance, bytecode_hash: None },
})
post_state.create_account(
address,
Account { nonce: 0, balance: new.balance, bytecode_hash: None },
)
}
AccountState::StorageCleared | AccountState::Touched | AccountState::None => {
@@ -359,9 +329,11 @@ where
beneficiary.account_state = AccountState::Touched;
}
// if account was present, append changed changeset.
Ok(AccountInfoChangeSet::Changed { new, old })
post_state.change_account(address, old, new);
}
}
Ok(())
}
/// Runs a single transaction in the configured environment and proceeds
@@ -393,23 +365,26 @@ where
out.map_err(|e| Error::EVM { hash, message: format!("{e:?}") })
}
/// Runs the provided transactions and commits their state. Will proceed
/// to return the total gas used by this batch of transaction as well as the
/// changesets generated by each tx.
/// Runs the provided transactions and commits their state to the run-time database.
///
/// The returned [PostState] can be used to persist the changes to disk, and contains the
/// changes made by each transaction.
///
/// The changes in [PostState] have a transition ID associated with them: there is one
/// transition ID for each transaction (with the first executed tx having transition ID 0, and
/// so on).
pub fn execute_transactions(
&mut self,
block: &Block,
total_difficulty: U256,
senders: Option<Vec<Address>>,
) -> Result<(Vec<TransactionChangeSet>, u64), Error> {
) -> Result<(PostState, u64), Error> {
let senders = self.recover_senders(&block.body, senders)?;
self.init_env(&block.header, total_difficulty);
let mut cumulative_gas_used = 0;
// output of execution
let mut tx_changesets = Vec::with_capacity(block.body.len());
let mut post_state = PostState::with_tx_capacity(block.body.len());
for (transaction, sender) in block.body.iter().zip(senders.into_iter()) {
// The sum of the transactions gas limit, Tg, and the gas utilised in this block prior,
// must be no greater than the blocks gasLimit.
@@ -424,7 +399,11 @@ where
let ResultAndState { result, state } = self.transact(transaction, sender)?;
// commit changes
let (changeset, new_bytecodes) = self.commit_changes(state);
self.commit_changes(
state,
self.chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(block.number),
&mut post_state,
);
// append gas used
cumulative_gas_used += result.gas_used();
@@ -433,22 +412,19 @@ where
let logs: Vec<Log> = result.logs().into_iter().map(into_reth_log).collect();
// Push transaction changeset and calculate header bloom filter for receipt.
tx_changesets.push(TransactionChangeSet {
receipt: Receipt {
tx_type: transaction.tx_type(),
// Success flag was added in `EIP-658: Embedding transaction status code in
// receipts`.
success: result.is_success(),
cumulative_gas_used,
bloom: logs_bloom(logs.iter()),
logs,
},
changeset,
new_bytecodes,
post_state.add_receipt(Receipt {
tx_type: transaction.tx_type(),
// Success flag was added in `EIP-658: Embedding transaction status code in
// receipts`.
success: result.is_success(),
cumulative_gas_used,
bloom: logs_bloom(logs.iter()),
logs,
});
post_state.finish_transition();
}
Ok((tx_changesets, cumulative_gas_used))
Ok((post_state, cumulative_gas_used))
}
}
@@ -461,8 +437,8 @@ where
block: &Block,
total_difficulty: U256,
senders: Option<Vec<Address>>,
) -> Result<ExecutionResult, Error> {
let (tx_changesets, cumulative_gas_used) =
) -> Result<PostState, Error> {
let (mut post_state, cumulative_gas_used) =
self.execute_transactions(block, total_difficulty, senders)?;
// Check if gas used matches the value set in header.
@@ -470,21 +446,22 @@ where
return Err(Error::BlockGasUsed { got: cumulative_gas_used, expected: block.gas_used })
}
let mut block_changesets = BTreeMap::default();
let balance_increments = self.post_block_balance_increments(block, total_difficulty)?;
for (address, increment) in balance_increments {
let changeset = self.account_balance_increment_changeset(address, increment)?;
block_changesets.insert(address, changeset);
let mut includes_block_transition = !balance_increments.is_empty();
for (address, increment) in balance_increments.into_iter() {
self.increment_account_balance(address, increment, &mut post_state)?;
}
if self.chain_spec.fork(Hardfork::Dao).transitions_at_block(block.number) {
for (address, changeset) in self.dao_fork_changeset()? {
// No account collision between rewarded accounts and DAO fork related accounts.
block_changesets.insert(address, changeset);
}
includes_block_transition = true;
self.apply_dao_fork_changes(&mut post_state)?;
}
Ok(ExecutionResult { tx_changesets, block_changesets })
if includes_block_transition {
post_state.finish_transition();
}
Ok(post_state)
}
fn execute_and_verify_receipt(
@@ -492,14 +469,15 @@ where
block: &Block,
total_difficulty: U256,
senders: Option<Vec<Address>>,
) -> Result<ExecutionResult, Error> {
let execution_result = self.execute(block, total_difficulty, senders)?;
let receipts_iter =
execution_result.tx_changesets.iter().map(|changeset| &changeset.receipt);
) -> Result<PostState, Error> {
let post_state = self.execute(block, total_difficulty, senders)?;
if self.chain_spec.fork(Hardfork::Byzantium).active_at_block(block.header.number) {
verify_receipt(block.header.receipts_root, block.header.logs_bloom, receipts_iter)?;
verify_receipt(
block.header.receipts_root,
block.header.logs_bloom,
post_state.receipts().iter(),
)?;
}
// TODO Before Byzantium, receipts contained state root that would mean that expensive
@@ -507,7 +485,7 @@ where
// transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
Ok(execution_result)
Ok(post_state)
}
}
@@ -541,7 +519,10 @@ mod tests {
hex_literal::hex, keccak256, Account, Address, Bytecode, Bytes, ChainSpecBuilder,
ForkCondition, StorageKey, H256, MAINNET, U256,
};
use reth_provider::{AccountProvider, BlockHashProvider, StateProvider};
use reth_provider::{
post_state::{Change, Storage},
AccountProvider, BlockHashProvider, StateProvider,
};
use reth_revm::database::State;
use reth_rlp::Decodable;
use std::{collections::HashMap, str::FromStr};
@@ -661,17 +642,21 @@ mod tests {
// execute chain and verify receipts
let mut executor = Executor::new(chain_spec, db);
let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap();
let post_state = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap();
assert_eq!(out.tx_changesets.len(), 1, "Should executed one transaction");
assert_eq!(post_state.transitions_count(), 2, "Should executed one transaction");
let changesets = out.tx_changesets[0].clone();
assert_eq!(changesets.new_bytecodes.len(), 0, "Should have zero new bytecodes");
let block_reward = U256::from(WEI_2ETH + (WEI_2ETH >> 5));
let account1_info = Account { balance: U256::ZERO, nonce: 0x00, bytecode_hash: None };
let account2_info = Account {
balance: U256::from(0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128), /* decrease for
* block reward */
// Block reward decrease
balance: U256::from(0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128),
nonce: 0x00,
bytecode_hash: None,
};
let account2_info_with_block_reward = Account {
balance: account2_info.balance + block_reward,
nonce: 0x00,
bytecode_hash: None,
};
@@ -680,8 +665,8 @@ mod tests {
nonce: 0x01,
bytecode_hash: None,
};
let block_reward = U256::from(WEI_2ETH + (WEI_2ETH >> 5));
let ommer_beneficiary_info =
Account { nonce: 0, balance: U256::from((8 * WEI_2ETH) >> 3), bytecode_hash: None };
// Check if cache is set
// account1
@@ -707,60 +692,88 @@ mod tests {
assert_eq!(cached_acc3.account_state, AccountState::Touched);
assert_eq!(cached_acc3.storage.len(), 0);
assert_eq!(
changesets.changeset.get(&account1).unwrap().account,
AccountInfoChangeSet::NoChange { is_empty: false },
"No change to account"
);
assert_eq!(
changesets.changeset.get(&account2).unwrap().account,
AccountInfoChangeSet::Created { new: account2_info },
"New account"
);
assert_eq!(
changesets.changeset.get(&account3).unwrap().account,
AccountInfoChangeSet::Changed { old: account3_old_info, new: account3_info },
"Change to account state"
assert!(
post_state.accounts().get(&account1).is_none(),
"Account should not be present in post-state since it was not changed"
);
// check block rewards changeset.
let mut block_rewarded_acc_info = account2_info;
// add Blocks 2 eth reward and 2>>5 for one ommer
block_rewarded_acc_info.balance += block_reward;
// Check changes
const TX_TRANSITION_ID: u64 = 0;
const BLOCK_TRANSITION_ID: u64 = 1;
// check block reward changeset
// Clone and sort to make the test deterministic
let mut changes = post_state.changes().to_vec();
changes.sort_by_key(|change| (change.transition_id(), change.address()));
assert_eq!(
out.block_changesets,
BTreeMap::from([
(
account2,
AccountInfoChangeSet::Changed {
new: block_rewarded_acc_info,
old: account2_info
}
),
(
ommer_beneficiary,
AccountInfoChangeSet::Created {
new: Account {
nonce: 0,
balance: U256::from((8 * WEI_2ETH) >> 3),
bytecode_hash: None
}
}
)
])
changes,
&[
// Storage changes on account 1
Change::StorageChanged {
id: TX_TRANSITION_ID,
address: account1,
changeset: [(U256::from(1), (U256::ZERO, U256::from(2)))].into()
},
// New account
Change::AccountCreated {
id: TX_TRANSITION_ID,
address: account2,
account: account2_info
},
// Changed account
Change::AccountChanged {
id: TX_TRANSITION_ID,
address: account3,
old: account3_old_info,
new: account3_info
},
// Block reward
Change::AccountChanged {
id: BLOCK_TRANSITION_ID,
address: account2,
old: account2_info,
new: account2_info_with_block_reward
},
// Ommer reward
Change::AccountCreated {
id: BLOCK_TRANSITION_ID,
address: ommer_beneficiary,
account: ommer_beneficiary_info
},
],
"Changeset did not match"
);
assert_eq!(changesets.new_bytecodes.len(), 0, "No new bytecodes");
// check storage
let storage = &changesets.changeset.get(&account1).unwrap().storage;
assert_eq!(storage.len(), 1, "Only one storage change");
// Check final post-state
assert_eq!(
storage.get(&U256::from(1)),
Some(&(U256::ZERO, U256::from(2))),
"Storage change from 0 to 2 on slot 1"
post_state.storage(),
&BTreeMap::from([(
account1,
Storage { wiped: false, storage: BTreeMap::from([(U256::from(1), U256::from(2))]) }
)]),
"Should have changed 1 storage slot"
);
assert_eq!(post_state.bytecodes().len(), 0, "Should have zero new bytecodes");
let accounts = post_state.accounts();
assert_eq!(
accounts.len(),
3,
"Should have 4 accounts (account 2, 3 and the ommer beneficiary)"
);
assert_eq!(
accounts.get(&account2).unwrap(),
&Some(account2_info_with_block_reward),
"Account 2 state is wrong"
);
assert_eq!(
accounts.get(&account3).unwrap(),
&Some(account3_info),
"Account 3 state is wrong"
);
assert_eq!(
accounts.get(&ommer_beneficiary).unwrap(),
&Some(ommer_beneficiary_info),
"Ommer beneficiary state is wrong"
);
}
@@ -798,7 +811,11 @@ mod tests {
None,
)
.unwrap();
assert_eq!(out.tx_changesets.len(), 0, "No tx");
assert_eq!(
out.transitions_count(),
1,
"Should only have 1 transition (the block transition)"
);
// Check if cache is set
// beneficiary
@@ -813,24 +830,15 @@ mod tests {
}
// check changesets
let change_set =
out.block_changesets.get(&crate::eth_dao_fork::DAO_HARDFORK_BENEFICIARY).unwrap();
let beneficiary_state =
out.accounts().get(&crate::eth_dao_fork::DAO_HARDFORK_BENEFICIARY).unwrap().unwrap();
assert_eq!(
*change_set,
AccountInfoChangeSet::Changed {
new: Account { balance: U256::from(beneficiary_balance), ..Default::default() },
old: Account { balance: U256::ZERO, ..Default::default() }
}
beneficiary_state,
Account { balance: U256::from(beneficiary_balance), ..Default::default() },
);
for (i, address) in crate::eth_dao_fork::DAO_HARDKFORK_ACCOUNTS.iter().enumerate() {
let change_set = out.block_changesets.get(address).unwrap();
assert_eq!(
*change_set,
AccountInfoChangeSet::Changed {
new: Account { balance: U256::ZERO, ..Default::default() },
old: Account { balance: U256::from(i), ..Default::default() }
}
);
for address in crate::eth_dao_fork::DAO_HARDKFORK_ACCOUNTS.iter() {
let updated_account = out.accounts().get(address).unwrap().unwrap();
assert_eq!(updated_account, Account { balance: U256::ZERO, ..Default::default() });
}
}
@@ -885,10 +893,12 @@ mod tests {
let mut executor = Executor::new(chain_spec, db);
let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap();
assert_eq!(out.tx_changesets.len(), 1, "Should executed one transaction");
let changesets = out.tx_changesets[0].clone();
assert_eq!(changesets.new_bytecodes.len(), 0, "Should have zero new bytecodes");
assert_eq!(
out.transitions_count(),
2,
"Should only have two transitions (the transaction and the block)"
);
assert_eq!(out.bytecodes().len(), 0, "Should have zero new bytecodes");
let post_account_caller = Account {
balance: U256::from(0x0de0b6b3a761cf60u64),
@@ -897,21 +907,20 @@ mod tests {
};
assert_eq!(
changesets.changeset.get(&address_caller).unwrap().account,
AccountInfoChangeSet::Changed { new: post_account_caller, old: pre_account_caller },
out.accounts().get(&address_caller).unwrap().unwrap(),
post_account_caller,
"Caller account has changed and fee is deduced"
);
let selfdestroyer_changeset = changesets.changeset.get(&address_selfdestruct).unwrap();
// check account
assert_eq!(
selfdestroyer_changeset.account,
AccountInfoChangeSet::Destroyed { old: pre_account_selfdestroyed },
"Selfdestroyed account"
out.accounts().get(&address_selfdestruct).unwrap(),
&None,
"Selfdestructed account should have been deleted"
);
assert!(
out.storage().get(&address_selfdestruct).unwrap().wiped,
"Selfdestructed account should have its storage wiped"
);
assert!(selfdestroyer_changeset.wipe_storage);
}
// Test vector from https://github.com/ethereum/tests/blob/3156db5389921125bb9e04142d18e0e7b0cf8d64/BlockchainTests/EIPTests/bc4895-withdrawals/twoIdenticalIndexDifferentValidator.json
@@ -933,7 +942,7 @@ mod tests {
// execute chain and verify receipts
let mut executor = Executor::new(chain_spec, db);
let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap();
assert_eq!(out.tx_changesets.len(), 0, "No tx");
assert_eq!(out.transitions_count(), 1, "Only one transition (the block transition)");
let withdrawal_sum = withdrawals.iter().fold(U256::ZERO, |sum, w| sum + w.amount_wei());
let beneficiary_account = executor.db().accounts.get(&withdrawal_beneficiary).unwrap();
@@ -941,29 +950,28 @@ mod tests {
assert_eq!(beneficiary_account.info.nonce, 0);
assert_eq!(beneficiary_account.account_state, AccountState::StorageCleared);
assert_eq!(out.block_changesets.len(), 1);
assert_eq!(
out.block_changesets.get(&withdrawal_beneficiary),
Some(&AccountInfoChangeSet::Created {
new: Account { nonce: 0, balance: withdrawal_sum, bytecode_hash: None },
})
out.accounts().get(&withdrawal_beneficiary).unwrap(),
&Some(Account { nonce: 0, balance: withdrawal_sum, bytecode_hash: None }),
"Withdrawal account should have gotten its balance set"
);
// Execute same block again
let out = executor.execute_and_verify_receipt(&block, U256::ZERO, None).unwrap();
assert_eq!(out.tx_changesets.len(), 0, "No tx");
assert_eq!(out.block_changesets.len(), 1);
assert_eq!(
out.block_changesets.get(&withdrawal_beneficiary),
Some(&AccountInfoChangeSet::Changed {
old: Account { nonce: 0, balance: withdrawal_sum, bytecode_hash: None },
new: Account {
nonce: 0,
balance: withdrawal_sum + withdrawal_sum,
bytecode_hash: None
},
})
out.transitions_count(),
1,
"Should only have one transition (the block transition)"
);
assert_eq!(
out.accounts().get(&withdrawal_beneficiary).unwrap(),
&Some(Account {
nonce: 0,
balance: withdrawal_sum + withdrawal_sum,
bytecode_hash: None
}),
"Withdrawal account should have gotten its balance set"
);
}
@@ -987,23 +995,35 @@ mod tests {
};
let mut executor = Executor::new(chain_spec, db);
// touch account
executor.commit_changes(hash_map::HashMap::from([(
account,
RevmAccount { ..default_acc.clone() },
)]));
executor.commit_changes(
hash_map::HashMap::from([(account, RevmAccount { ..default_acc.clone() })]),
true,
&mut PostState::default(),
);
// destroy account
executor.commit_changes(hash_map::HashMap::from([(
account,
RevmAccount { is_destroyed: true, is_touched: true, ..default_acc.clone() },
)]));
executor.commit_changes(
hash_map::HashMap::from([(
account,
RevmAccount { is_destroyed: true, is_touched: true, ..default_acc.clone() },
)]),
true,
&mut PostState::default(),
);
// re-create account
executor.commit_changes(hash_map::HashMap::from([(
account,
RevmAccount { is_touched: true, storage_cleared: true, ..default_acc.clone() },
)]));
executor.commit_changes(
hash_map::HashMap::from([(
account,
RevmAccount { is_touched: true, storage_cleared: true, ..default_acc.clone() },
)]),
true,
&mut PostState::default(),
);
// touch account
executor
.commit_changes(hash_map::HashMap::from([(account, RevmAccount { ..default_acc })]));
executor.commit_changes(
hash_map::HashMap::from([(account, RevmAccount { ..default_acc })]),
true,
&mut PostState::default(),
);
let db = executor.db();

View File

@@ -11,8 +11,10 @@ pub mod eth_dao_fork;
pub mod substate;
/// Execution result types.
pub use reth_provider::execution_result;
pub use reth_provider::post_state;
pub mod blockchain_tree;
/// Executor
pub mod executor;

View File

@@ -2,249 +2,35 @@
use reth_interfaces::{provider::ProviderError, Result};
use reth_primitives::{Account, Address, BlockHash, BlockNumber, Bytecode, Bytes, H256, U256};
use reth_provider::{AccountProvider, BlockHashProvider, StateProvider};
use std::collections::{hash_map::Entry, BTreeMap, HashMap};
use reth_provider::{post_state::PostState, AccountProvider, BlockHashProvider, StateProvider};
use std::collections::BTreeMap;
use crate::execution_result::{AccountInfoChangeSet, ExecutionResult};
/// Memory backend, storing all state values in a `Map` in memory.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct SubStateData {
/// Account info where None means it is not existing. Not existing state is needed for Pre
/// TANGERINE forks. `code` is always `None`, and bytecode can be found in `contracts`.
pub accounts: HashMap<Address, AccountSubState>,
/// New bytecodes
pub bytecodes: HashMap<H256, (u32, Bytecode)>,
}
impl SubStateData {
/// Apply changesets to substate.
pub fn apply(&mut self, changesets: &[ExecutionResult]) {
for changeset in changesets {
self.apply_one(changeset)
}
}
/// Apply one changeset to substate.
pub fn apply_one(&mut self, changeset: &ExecutionResult) {
for tx_changeset in changeset.tx_changesets.iter() {
// apply accounts
for (address, account_change) in tx_changeset.changeset.iter() {
// revert account
self.apply_account(address, &account_change.account);
// revert its storage
self.apply_storage(address, &account_change.storage);
}
// apply bytecodes
for (hash, bytecode) in tx_changeset.new_bytecodes.iter() {
self.bytecodes.entry(*hash).or_insert((0, Bytecode(bytecode.clone()))).0 += 1;
}
}
// apply block reward
for (address, change) in changeset.block_changesets.iter() {
self.apply_account(address, change)
}
}
/// Apply account changeset to substate
fn apply_account(&mut self, address: &Address, change: &AccountInfoChangeSet) {
match change {
AccountInfoChangeSet::Created { new } => match self.accounts.entry(*address) {
Entry::Vacant(entry) => {
entry.insert(AccountSubState::created_account(*new));
}
Entry::Occupied(mut entry) => {
let account = entry.get_mut();
// increment counter
account.inc_storage_counter();
account.info = *new;
}
},
AccountInfoChangeSet::Destroyed { .. } => {
// set selfdestructed account
let account = self.accounts.entry(*address).or_default();
account.inc_storage_counter();
account.info = Default::default();
account.storage.clear();
}
AccountInfoChangeSet::Changed { old, .. } => {
self.accounts.entry(*address).or_default().info = *old;
}
AccountInfoChangeSet::NoChange { is_empty } => {
if *is_empty {
self.accounts.entry(*address).or_default();
}
}
}
}
/// Apply storage changeset to substate
fn apply_storage(&mut self, address: &Address, storage: &BTreeMap<U256, (U256, U256)>) {
if let Entry::Occupied(mut entry) = self.accounts.entry(*address) {
let account_storage = &mut entry.get_mut().storage;
for (key, (_, new_value)) in storage {
let key = H256(key.to_be_bytes());
account_storage.insert(key, *new_value);
}
}
}
/// Revert to old state in substate. Changesets will be reverted in reverse order,
pub fn revert(&mut self, changesets: &[ExecutionResult]) {
for changeset in changesets.iter().rev() {
// revert block changeset
for (address, change) in changeset.block_changesets.iter() {
self.revert_account(address, change)
}
for tx_changeset in changeset.tx_changesets.iter() {
// revert bytecodes
for (hash, _) in tx_changeset.new_bytecodes.iter() {
match self.bytecodes.entry(*hash) {
Entry::Vacant(_) => panic!("Bytecode should be present"),
Entry::Occupied(mut entry) => {
let (cnt, _) = entry.get_mut();
*cnt -= 1;
if *cnt == 0 {
entry.remove_entry();
}
}
}
}
// revert accounts
for (address, account_change) in tx_changeset.changeset.iter() {
// revert account
self.revert_account(address, &account_change.account);
// revert its storage
self.revert_storage(address, &account_change.storage);
}
}
}
}
/// Revert storage
fn revert_storage(&mut self, address: &Address, storage: &BTreeMap<U256, (U256, U256)>) {
if let Entry::Occupied(mut entry) = self.accounts.entry(*address) {
let account_storage = &mut entry.get_mut().storage;
for (key, (old_value, _)) in storage {
let key = H256(key.to_be_bytes());
account_storage.insert(key, *old_value);
}
}
}
/// Revert account
fn revert_account(&mut self, address: &Address, change: &AccountInfoChangeSet) {
match change {
AccountInfoChangeSet::Created { .. } => {
match self.accounts.entry(*address) {
Entry::Vacant(_) => {
// We inserted this account in apply fn.
panic!("It should be present, something is broken");
}
Entry::Occupied(mut entry) => {
let val = entry.get_mut();
if val.decr_storage_counter() {
// remove account that we didn't change from substate
entry.remove_entry();
return
}
val.info = Account::default();
val.storage.clear();
}
};
}
AccountInfoChangeSet::Destroyed { old } => match self.accounts.entry(*address) {
Entry::Vacant(_) => {
// We inserted this account in apply fn.
panic!("It should be present, something is broken");
}
Entry::Occupied(mut entry) => {
let val = entry.get_mut();
// Contrary to Created we are not removing this account as we dont know if
// this account was changer or not by `Changed` changeset.
val.decr_storage_counter();
val.info = *old;
}
},
AccountInfoChangeSet::Changed { old, .. } => {
self.accounts.entry(*address).or_default().info = *old;
}
AccountInfoChangeSet::NoChange { is_empty: _ } => {
// do nothing
}
}
}
}
/// Account changes in substate
#[derive(Debug, Clone, Default, Eq, PartialEq)]
pub struct AccountSubState {
/// New account state
pub info: Account,
/// If account is selfdestructed or newly created, storage will be cleared.
/// and we dont need to ask the provider for data.
/// As we need to have as
pub storage_is_clear: Option<u32>,
/// storage slots
pub storage: HashMap<H256, U256>,
}
impl AccountSubState {
/// Increment storage counter to mark this storage was cleared
pub fn inc_storage_counter(&mut self) {
self.storage_is_clear = Some(self.storage_is_clear.unwrap_or_default() + 1);
}
/// Decrement storage counter to represent that changeset that cleared storage was reverted.
pub fn decr_storage_counter(&mut self) -> bool {
let Some(cnt) = self.storage_is_clear else { return false};
if cnt == 1 {
self.storage_is_clear = None;
return true
}
false
}
/// Account is created
pub fn created_account(info: Account) -> Self {
Self { info, storage_is_clear: Some(1), storage: HashMap::new() }
}
/// Should we ask the provider for storage data
pub fn ask_provider(&self) -> bool {
self.storage_is_clear.is_none()
}
}
/// Wrapper around substate and provider, it decouples the database that can be Latest or historical
/// with substate changes that happened previously.
pub struct SubStateWithProvider<'a, SP: StateProvider> {
/// Substate
substate: &'a SubStateData,
/// Provider
/// A state provider that either resolves to data in a wrapped [`PostState`], or an underlying state
/// provider.
pub struct PostStateProvider<'a, SP: StateProvider> {
/// The wrapped state after execution of one or more transactions and/or blocks.
state: &'a PostState,
/// The inner state provider.
provider: SP,
/// side chain block hashes
/// The blocks in the sidechain.
sidechain_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
/// Last N canonical hashes,
/// The blocks in the canonical chain.
canonical_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
}
impl<'a, SP: StateProvider> SubStateWithProvider<'a, SP> {
/// Create new substate with provider
impl<'a, SP: StateProvider> PostStateProvider<'a, SP> {
/// Create new post-state provider
pub fn new(
substate: &'a SubStateData,
state: &'a PostState,
provider: SP,
sidechain_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
canonical_block_hashes: &'a BTreeMap<BlockNumber, BlockHash>,
) -> Self {
Self { substate, provider, sidechain_block_hashes, canonical_block_hashes }
Self { state, provider, sidechain_block_hashes, canonical_block_hashes }
}
}
/* Implement StateProvider traits */
impl<'a, SP: StateProvider> BlockHashProvider for SubStateWithProvider<'a, SP> {
impl<'a, SP: StateProvider> BlockHashProvider for PostStateProvider<'a, SP> {
fn block_hash(&self, number: U256) -> Result<Option<H256>> {
// All block numbers fit inside u64 and revm checks if it is last 256 block numbers.
let block_number = number.as_limbs()[0];
@@ -262,33 +48,45 @@ impl<'a, SP: StateProvider> BlockHashProvider for SubStateWithProvider<'a, SP> {
}
}
impl<'a, SP: StateProvider> AccountProvider for SubStateWithProvider<'a, SP> {
impl<'a, SP: StateProvider> AccountProvider for PostStateProvider<'a, SP> {
fn basic_account(&self, address: Address) -> Result<Option<Account>> {
if let Some(account) = self.substate.accounts.get(&address).map(|acc| acc.info) {
return Ok(Some(account))
if let Some(account) = self.state.account(&address) {
Ok(*account)
} else {
self.provider.basic_account(address)
}
self.provider.basic_account(address)
}
}
impl<'a, SP: StateProvider> StateProvider for SubStateWithProvider<'a, SP> {
impl<'a, SP: StateProvider> StateProvider for PostStateProvider<'a, SP> {
fn storage(
&self,
account: Address,
storage_key: reth_primitives::StorageKey,
) -> Result<Option<reth_primitives::StorageValue>> {
if let Some(substate_account) = self.substate.accounts.get(&account) {
if let Some(storage) = substate_account.storage.get(&storage_key) {
return Ok(Some(*storage))
}
if !substate_account.ask_provider() {
if let Some(storage) = self.state.account_storage(&account) {
if storage.wiped {
return Ok(Some(U256::ZERO))
}
if let Some(value) =
storage.storage.get(&U256::from_be_bytes(storage_key.to_fixed_bytes()))
{
return Ok(Some(*value))
}
}
self.provider.storage(account, storage_key)
}
/// Get account and storage proofs.
fn bytecode_by_hash(&self, code_hash: H256) -> Result<Option<Bytecode>> {
if let Some(bytecode) = self.state.bytecode(&code_hash).cloned() {
return Ok(Some(bytecode))
}
self.provider.bytecode_by_hash(code_hash)
}
fn proof(
&self,
_address: Address,
@@ -296,11 +94,4 @@ impl<'a, SP: StateProvider> StateProvider for SubStateWithProvider<'a, SP> {
) -> Result<(Vec<Bytes>, H256, Vec<Vec<Bytes>>)> {
Err(ProviderError::HistoryStateRoot.into())
}
fn bytecode_by_hash(&self, code_hash: H256) -> Result<Option<Bytecode>> {
if let Some((_, bytecode)) = self.substate.bytecodes.get(&code_hash).cloned() {
return Ok(Some(bytecode))
}
self.provider.bytecode_by_hash(code_hash)
}
}

View File

@@ -11,7 +11,9 @@ use reth_db::{
};
use reth_interfaces::provider::ProviderError;
use reth_primitives::{Address, Block, U256};
use reth_provider::{BlockExecutor, ExecutorFactory, LatestStateProviderRef, Transaction};
use reth_provider::{
post_state::PostState, BlockExecutor, ExecutorFactory, LatestStateProviderRef, Transaction,
};
use tracing::*;
/// The [`StageId`] of the execution stage.
@@ -112,7 +114,7 @@ impl<EF: ExecutorFactory> ExecutionStage<EF> {
let mut executor = self.executor_factory.with_sp(LatestStateProviderRef::new(&**tx));
// Fetch transactions, execute them and generate results
let mut changesets = Vec::with_capacity(block_batch.len());
let mut changesets = PostState::default();
for (header, td, body, ommers, withdrawals) in block_batch.into_iter() {
let block_number = header.number;
tracing::trace!(target: "sync::stages::execution", ?block_number, "Execute block.");
@@ -154,11 +156,12 @@ impl<EF: ExecutorFactory> ExecutionStage<EF> {
Some(signers),
)
.map_err(|error| StageError::ExecutionError { block: block_number, error })?;
changesets.push(changeset);
changesets.extend(changeset);
}
// put execution results to database
tx.insert_execution_result(changesets, self.executor_factory.chain_spec(), last_block)?;
let first_transition_id = tx.get_block_transition(last_block)?;
changesets.write_to_db(&**tx, first_transition_id)?;
let done = !capped;
info!(target: "sync::stages::execution", stage_progress = end_block, done, "Sync iteration finished");

View File

@@ -1,206 +0,0 @@
//! Output of execution.
use reth_db::{models::AccountBeforeTx, tables, transaction::DbTxMut, Error as DbError};
use reth_primitives::{Account, Address, Receipt, H256, U256};
use revm_primitives::Bytecode;
use std::collections::BTreeMap;
/// Execution Result containing vector of transaction changesets
/// and block reward if present
#[derive(Debug, Default, Eq, PartialEq, Clone)]
pub struct ExecutionResult {
/// Transaction changeset containing [Receipt], changed [Accounts][Account] and Storages.
pub tx_changesets: Vec<TransactionChangeSet>,
/// Post block account changesets. This might include block reward, uncle rewards, withdrawals
/// or irregular state changes (DAO fork).
pub block_changesets: BTreeMap<Address, AccountInfoChangeSet>,
}
/// After transaction is executed this structure contain
/// transaction [Receipt] every change to state ([Account], Storage, [Bytecode])
/// that this transaction made and its old values
/// so that history account table can be updated.
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct TransactionChangeSet {
/// Transaction receipt
pub receipt: Receipt,
/// State change that this transaction made on state.
pub changeset: BTreeMap<Address, AccountChangeSet>,
/// new bytecode created as result of transaction execution.
pub new_bytecodes: BTreeMap<H256, Bytecode>,
}
/// Contains old/new account changes
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum AccountInfoChangeSet {
/// The account is newly created. Account can be created by just by sending balance,
///
/// Revert of this changeset is empty account,
Created {
/// The newly created account.
new: Account,
},
/// An account was deleted (selfdestructed) or we have touched
/// an empty account and we need to remove/destroy it.
/// (Look at state clearing [EIP-158](https://eips.ethereum.org/EIPS/eip-158))
///
/// Revert of this changeset is old account
Destroyed {
/// The account that was destroyed.
old: Account,
},
/// The account was changed.
///
/// revert of this changeset is old account
Changed {
/// The account after the change.
new: Account,
/// The account prior to the change.
old: Account,
},
/// Nothing was changed for the account (nonce/balance).
NoChange {
/// Used to clear existing empty accounts pre-EIP-161.
is_empty: bool,
},
}
impl Default for AccountInfoChangeSet {
fn default() -> Self {
AccountInfoChangeSet::NoChange { is_empty: false }
}
}
impl AccountInfoChangeSet {
/// Create new account info changeset
pub fn new(old: Option<Account>, new: Option<Account>) -> Self {
match (old, new) {
(Some(old), Some(new)) => {
if new != old {
Self::Changed { new, old }
} else {
if new.is_empty() {}
Self::NoChange { is_empty: true }
}
}
(None, Some(new)) => Self::Created { new },
(Some(old), None) => Self::Destroyed { old },
(None, None) => Self::NoChange { is_empty: false },
}
}
/// Apply the changes from the changeset to a database transaction.
pub fn apply_to_db<'a, TX: DbTxMut<'a>>(
self,
tx: &TX,
address: Address,
tx_index: u64,
has_state_clear_eip: bool,
) -> Result<(), DbError> {
match self {
AccountInfoChangeSet::Changed { old, new } => {
// insert old account in AccountChangeSet
// check for old != new was already done
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: Some(old) },
)?;
tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Created { new } => {
// Ignore account that are created empty and state clear (SpuriousDragon) hardfork
// is activated.
if has_state_clear_eip && new.is_empty() {
return Ok(())
}
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: None },
)?;
tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Destroyed { old } => {
tx.delete::<tables::PlainAccountState>(address, None)?;
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: Some(old) },
)?;
}
AccountInfoChangeSet::NoChange { is_empty } => {
if has_state_clear_eip && is_empty {
tx.delete::<tables::PlainAccountState>(address, None)?;
}
}
}
Ok(())
}
}
/// Diff change set that is needed for creating history index and updating current world state.
#[derive(Debug, Default, Eq, PartialEq, Clone)]
pub struct AccountChangeSet {
/// Old and New account account change.
pub account: AccountInfoChangeSet,
/// Storage containing key -> (OldValue,NewValue). in case that old value is not existing
/// we can expect to have U256::ZERO, same with new value.
pub storage: BTreeMap<U256, (U256, U256)>,
/// Just to make sure that we are taking selfdestruct cleaning we have this field that wipes
/// storage. There are instances where storage is changed but account is not touched, so we
/// can't take into account that if new account is None that it is selfdestruct.
pub wipe_storage: bool,
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use reth_db::{
database::Database,
mdbx::{test_utils, Env, EnvKind, WriteMap},
transaction::DbTx,
};
use reth_primitives::H160;
use super::*;
#[test]
fn apply_account_info_changeset() {
let db: Arc<Env<WriteMap>> = test_utils::create_test_db(EnvKind::RW);
let address = H160::zero();
let tx_num = 0;
let acc1 = Account { balance: U256::from(1), nonce: 2, bytecode_hash: Some(H256::zero()) };
let acc2 = Account { balance: U256::from(3), nonce: 4, bytecode_hash: Some(H256::zero()) };
let tx = db.tx_mut().unwrap();
// check Changed changeset
AccountInfoChangeSet::Changed { new: acc1, old: acc2 }
.apply_to_db(&tx, address, tx_num, true)
.unwrap();
assert_eq!(
tx.get::<tables::AccountChangeSet>(tx_num),
Ok(Some(AccountBeforeTx { address, info: Some(acc2) }))
);
assert_eq!(tx.get::<tables::PlainAccountState>(address), Ok(Some(acc1)));
AccountInfoChangeSet::Created { new: acc1 }
.apply_to_db(&tx, address, tx_num, true)
.unwrap();
assert_eq!(
tx.get::<tables::AccountChangeSet>(tx_num),
Ok(Some(AccountBeforeTx { address, info: None }))
);
assert_eq!(tx.get::<tables::PlainAccountState>(address), Ok(Some(acc1)));
// delete old value, as it is dupsorted
tx.delete::<tables::AccountChangeSet>(tx_num, None).unwrap();
AccountInfoChangeSet::Destroyed { old: acc2 }
.apply_to_db(&tx, address, tx_num, true)
.unwrap();
assert_eq!(tx.get::<tables::PlainAccountState>(address), Ok(None));
assert_eq!(
tx.get::<tables::AccountChangeSet>(tx_num),
Ok(Some(AccountBeforeTx { address, info: Some(acc2) }))
);
}
}

View File

@@ -27,7 +27,7 @@ pub use providers::{
pub mod trie;
/// Execution result
pub mod execution_result;
pub mod post_state;
/// Helper types for interacting with the database
mod transaction;

View File

@@ -0,0 +1,828 @@
//! Output of execution.
use reth_db::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
models::{AccountBeforeTx, TransitionIdAddress},
tables,
transaction::{DbTx, DbTxMut},
Error as DbError,
};
use reth_primitives::{
Account, Address, Bytecode, Receipt, StorageEntry, TransitionId, H256, U256,
};
use std::collections::BTreeMap;
/// Storage for an account.
///
/// # Wiped Storage
///
/// The field `wiped` denotes whether any of the values contained in storage are valid or not; if
/// `wiped` is `true`, the storage should be considered empty.
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub struct Storage {
/// Whether the storage was wiped or not.
pub wiped: bool,
/// The storage slots.
pub storage: BTreeMap<U256, U256>,
}
/// Storage for an account with the old and new values for each slot.
/// TODO: Do we actually need (old, new) anymore, or is (old) sufficient? (Check the writes)
/// If we don't, we can unify this and [Storage].
pub type StorageChangeset = BTreeMap<U256, (U256, U256)>;
/// A change to the state of accounts or storage.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Change {
/// A new account was created.
AccountCreated {
/// The ID of the transition this change is a part of.
id: TransitionId,
/// The address of the account that was created.
address: Address,
/// The account.
account: Account,
},
/// An existing account was changed.
AccountChanged {
/// The ID of the transition this change is a part of.
id: TransitionId,
/// The address of the account that was changed.
address: Address,
/// The account before the change.
old: Account,
/// The account after the change.
new: Account,
},
/// Storage slots for an account were changed.
StorageChanged {
/// The ID of the transition this change is a part of.
id: TransitionId,
/// The address of the account associated with the storage slots.
address: Address,
/// The storage changeset.
changeset: StorageChangeset,
},
/// Storage was wiped
StorageWiped {
/// The ID of the transition this change is a part of.
id: TransitionId,
/// The address of the account whose storage was wiped.
address: Address,
},
/// An account was destroyed.
///
/// This removes all of the information associated with the account. An accompanying
/// [Change::StorageWiped] will also be present to mark the deletion of storage.
///
/// If a change to an account satisfies the conditions for EIP-158, this change variant is also
/// applied instead of the change that would otherwise have happened.
AccountDestroyed {
/// The ID of the transition this change is a part of.
id: TransitionId,
/// The address of the destroyed account.
address: Address,
/// The account before it was destroyed.
old: Account,
},
}
impl Change {
/// Get the transition ID for the change
pub fn transition_id(&self) -> TransitionId {
match self {
Change::AccountChanged { id, .. } |
Change::AccountCreated { id, .. } |
Change::StorageChanged { id, .. } |
Change::StorageWiped { id, .. } |
Change::AccountDestroyed { id, .. } => *id,
}
}
/// Get the address of the account this change operates on.
pub fn address(&self) -> Address {
match self {
Change::AccountChanged { address, .. } |
Change::AccountCreated { address, .. } |
Change::StorageChanged { address, .. } |
Change::StorageWiped { address, .. } |
Change::AccountDestroyed { address, .. } => *address,
}
}
/// Set the transition ID of this change.
pub fn set_transition_id(&mut self, new_id: TransitionId) {
match self {
Change::AccountChanged { ref mut id, .. } |
Change::AccountCreated { ref mut id, .. } |
Change::StorageChanged { ref mut id, .. } |
Change::StorageWiped { ref mut id, .. } |
Change::AccountDestroyed { ref mut id, .. } => {
*id = new_id;
}
}
}
}
/// The state of accounts after execution of one or more transactions, including receipts and new
/// bytecode.
///
/// The latest state can be found in `accounts`, `storage`, and `bytecode`. The receipts for the
/// transactions that lead to these changes can be found in `receipts`, and each change leading to
/// this state can be found in `changes`.
///
/// # Wiped Storage
///
/// The [Storage] type has a field, `wiped`, which denotes whether any of the values contained
/// in storage are valid or not; if `wiped` is `true`, the storage for the account should be
/// considered empty.
///
/// # Transitions
///
/// Each [Change] has an `id` field that marks what transition it is part of. Each transaction is
/// its own transition, but there may be 0 or 1 transitions associated with the block.
///
/// The block level transition includes:
///
/// - Block rewards
/// - Ommer rewards
/// - Withdrawals
/// - The irregular state change for the DAO hardfork
///
/// [PostState::finish_transition] should be called after every transaction, and after every block.
///
/// The first transaction executed and added to the [PostState] has a transition ID of 0, the next
/// one a transition ID of 1, and so on. If the [PostState] is for a single block, and the number of
/// transitions ([PostState::transitions_count]) is greater than the number of transactions in the
/// block, then the last transition is the block transition.
///
/// For multi-block [PostState]s it is not possible to figure out what transition ID maps on to a
/// transaction or a block.
///
/// # Shaving Allocations
///
/// Since most [PostState]s in reth are for multiple blocks it is better to pre-allocate capacity
/// for receipts and changes, which [PostState::new] does, and thus it (or
/// [PostState::with_tx_capacity]) should be preferred to using the [Default] implementation.
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub struct PostState {
/// The ID of the current transition.
current_transition_id: TransitionId,
/// The state of all modified accounts after execution.
///
/// If the value contained is `None`, then the account should be deleted.
accounts: BTreeMap<Address, Option<Account>>,
/// The state of all modified storage after execution
///
/// If the contained [Storage] is marked as wiped, then all storage values should be cleared
/// from the database.
storage: BTreeMap<Address, Storage>,
/// The changes to state that happened during execution
changes: Vec<Change>,
/// New code created during the execution
bytecode: BTreeMap<H256, Bytecode>,
/// The receipt(s) of the executed transaction(s).
receipts: Vec<Receipt>,
}
/// Used to determine preallocation sizes of [PostState]'s internal [Vec]s. It denotes the number of
/// best-guess changes each transaction causes to state.
const BEST_GUESS_CHANGES_PER_TX: usize = 8;
/// How many [Change]s to preallocate for in [PostState].
///
/// This is just a guesstimate based on:
///
/// - Each block having ~200-300 transactions
/// - Each transaction having some amount of changes
const PREALLOC_CHANGES_SIZE: usize = 256 * BEST_GUESS_CHANGES_PER_TX;
impl PostState {
/// Create an empty [PostState].
pub fn new() -> Self {
Self { changes: Vec::with_capacity(PREALLOC_CHANGES_SIZE), ..Default::default() }
}
/// Create an empty [PostState] with pre-allocated space for a certain amount of transactions.
pub fn with_tx_capacity(txs: usize) -> Self {
Self {
changes: Vec::with_capacity(txs * BEST_GUESS_CHANGES_PER_TX),
receipts: Vec::with_capacity(txs),
..Default::default()
}
}
/// Get the latest state of accounts.
pub fn accounts(&self) -> &BTreeMap<Address, Option<Account>> {
&self.accounts
}
/// Get the latest state for a specific account.
///
/// # Returns
///
/// - `None` if the account does not exist
/// - `Some(&None)` if the account existed, but has since been deleted.
/// - `Some(..)` if the account currently exists
pub fn account(&self, address: &Address) -> Option<&Option<Account>> {
self.accounts.get(address)
}
/// Get the latest state of storage.
pub fn storage(&self) -> &BTreeMap<Address, Storage> {
&self.storage
}
/// Get the storage for an account.
pub fn account_storage(&self, address: &Address) -> Option<&Storage> {
self.storage.get(address)
}
/// Get the changes causing this [PostState].
pub fn changes(&self) -> &[Change] {
&self.changes
}
/// Get the newly created bytecodes
pub fn bytecodes(&self) -> &BTreeMap<H256, Bytecode> {
&self.bytecode
}
/// Get a bytecode in the post-state.
pub fn bytecode(&self, code_hash: &H256) -> Option<&Bytecode> {
self.bytecode.get(code_hash)
}
/// Get the receipts for the transactions executed to form this [PostState].
pub fn receipts(&self) -> &[Receipt] {
&self.receipts
}
/// Get the number of transitions causing this [PostState]
pub fn transitions_count(&self) -> usize {
self.current_transition_id as usize
}
/// Extend this [PostState] with the changes in another [PostState].
pub fn extend(&mut self, other: PostState) {
self.changes.reserve(other.changes.len());
let mut next_transition_id = self.current_transition_id;
for mut change in other.changes.into_iter() {
next_transition_id = self.current_transition_id + change.transition_id();
change.set_transition_id(next_transition_id);
self.add_and_apply(change);
}
self.receipts.extend(other.receipts);
self.bytecode.extend(other.bytecode);
self.current_transition_id = next_transition_id + 1;
}
/// Reverts each change up to and including any change that is part of `transition_id`.
///
/// The reverted changes are removed from this post-state, and their effects are reverted.
///
/// The reverted changes are returned.
pub fn revert_to(&mut self, transition_id: usize) -> Vec<Change> {
let mut changes_to_revert = Vec::new();
self.changes.retain(|change| {
if change.transition_id() >= transition_id as u64 {
changes_to_revert.push(change.clone());
false
} else {
true
}
});
for change in changes_to_revert.iter_mut().rev() {
change.set_transition_id(change.transition_id() - transition_id as TransitionId);
self.revert(change.clone());
}
self.current_transition_id = transition_id as TransitionId;
changes_to_revert
}
/// Reverts each change up to and including any change that is part of `transition_id`.
///
/// The reverted changes are removed from this post-state, and their effects are reverted.
///
/// A new post-state containing the pre-revert state, as well as the reverted changes *only* is
/// returned.
///
/// This effectively splits the post state in two:
///
/// 1. This post-state has the changes reverted
/// 2. The returned post-state does *not* have the changes reverted, but only contains the
/// descriptions of the changes that were reverted in the first post-state.
pub fn split_at(&mut self, transition_id: usize) -> Self {
// Clone ourselves
let mut non_reverted_state = self.clone();
// Revert the desired changes
let reverted_changes = self.revert_to(transition_id);
// Compute the new `current_transition_id` for `non_reverted_state`.
let new_transition_id =
reverted_changes.last().map(|c| c.transition_id()).unwrap_or_default();
non_reverted_state.changes = reverted_changes;
non_reverted_state.current_transition_id = new_transition_id + 1;
non_reverted_state
}
/// Add a newly created account to the post-state.
pub fn create_account(&mut self, address: Address, account: Account) {
self.add_and_apply(Change::AccountCreated {
id: self.current_transition_id,
address,
account,
});
}
/// Add a changed account to the post-state.
///
/// If the account also has changed storage values, [PostState::change_storage] should also be
/// called.
pub fn change_account(&mut self, address: Address, old: Account, new: Account) {
self.add_and_apply(Change::AccountChanged {
id: self.current_transition_id,
address,
old,
new,
});
}
/// Mark an account as destroyed.
pub fn destroy_account(&mut self, address: Address, account: Account) {
self.add_and_apply(Change::AccountDestroyed {
id: self.current_transition_id,
address,
old: account,
});
self.add_and_apply(Change::StorageWiped { id: self.current_transition_id, address });
}
/// Add changed storage values to the post-state.
pub fn change_storage(&mut self, address: Address, changeset: StorageChangeset) {
self.add_and_apply(Change::StorageChanged {
id: self.current_transition_id,
address,
changeset,
});
}
/// Add new bytecode to the post-state.
pub fn add_bytecode(&mut self, code_hash: H256, bytecode: Bytecode) {
// TODO: Is this faster than just doing `.insert`?
// Assumption: `insert` will override the value if present, but since the code hash for a
// given bytecode will always be the same, we are overriding with the same value.
//
// In other words: if this entry already exists, replacing the bytecode will replace with
// the same value, which is wasteful.
self.bytecode.entry(code_hash).or_insert(bytecode);
}
/// Add a transaction receipt to the post-state.
///
/// Transactions should always include their receipts in the post-state.
pub fn add_receipt(&mut self, receipt: Receipt) {
self.receipts.push(receipt);
}
/// Mark all prior changes as being part of one transition, and start a new one.
pub fn finish_transition(&mut self) {
self.current_transition_id += 1;
}
/// Add a new change, and apply its transformations to the current state
pub fn add_and_apply(&mut self, change: Change) {
match &change {
Change::AccountCreated { address, account, .. } |
Change::AccountChanged { address, new: account, .. } => {
self.accounts.insert(*address, Some(*account));
}
Change::AccountDestroyed { address, .. } => {
self.accounts.insert(*address, None);
}
Change::StorageChanged { address, changeset, .. } => {
let storage = self.storage.entry(*address).or_default();
storage.wiped = false;
for (slot, (_, current_value)) in changeset {
storage.storage.insert(*slot, *current_value);
}
}
Change::StorageWiped { address, .. } => {
let storage = self.storage.entry(*address).or_default();
storage.wiped = true;
}
}
self.changes.push(change);
}
/// Revert a change, applying the inverse of its transformations to the current state.
fn revert(&mut self, change: Change) {
match &change {
Change::AccountCreated { address, .. } => {
self.accounts.remove(address);
}
Change::AccountChanged { address, old, .. } => {
self.accounts.insert(*address, Some(*old));
}
Change::AccountDestroyed { address, old, .. } => {
self.accounts.insert(*address, Some(*old));
}
Change::StorageChanged { address, changeset, .. } => {
let storage = self.storage.entry(*address).or_default();
storage.wiped = false;
for (slot, (old_value, _)) in changeset {
storage.storage.insert(*slot, *old_value);
}
}
Change::StorageWiped { address, .. } => {
let storage = self.storage.entry(*address).or_default();
storage.wiped = false;
}
}
}
/// Write the post state to the database.
pub fn write_to_db<'a, TX: DbTxMut<'a> + DbTx<'a>>(
mut self,
tx: &TX,
first_transition_id: TransitionId,
) -> Result<(), DbError> {
// Collect and sort changesets by their key to improve write performance
let mut changesets = std::mem::take(&mut self.changes);
changesets
.sort_unstable_by_key(|changeset| (changeset.transition_id(), changeset.address()));
// Partition changesets into account and storage changes
let (account_changes, storage_changes): (Vec<Change>, Vec<Change>) =
changesets.into_iter().partition(|changeset| {
matches!(
changeset,
Change::AccountChanged { .. } |
Change::AccountCreated { .. } |
Change::AccountDestroyed { .. }
)
});
// Write account changes
let mut account_changeset_cursor = tx.cursor_dup_write::<tables::AccountChangeSet>()?;
for changeset in account_changes.into_iter() {
match changeset {
Change::AccountDestroyed { id, address, old } |
Change::AccountChanged { id, address, old, .. } => {
account_changeset_cursor.append_dup(
first_transition_id + id,
AccountBeforeTx { address, info: Some(old) },
)?;
}
Change::AccountCreated { id, address, .. } => {
account_changeset_cursor.append_dup(
first_transition_id + id,
AccountBeforeTx { address, info: None },
)?;
}
_ => unreachable!(),
}
}
// Write storage changes
let mut storages_cursor = tx.cursor_dup_write::<tables::PlainStorageState>()?;
let mut storage_changeset_cursor = tx.cursor_dup_write::<tables::StorageChangeSet>()?;
for changeset in storage_changes.into_iter() {
match changeset {
Change::StorageChanged { id, address, changeset } => {
let storage_id = TransitionIdAddress((first_transition_id + id, address));
for (key, (old_value, _)) in changeset {
storage_changeset_cursor.append_dup(
storage_id,
StorageEntry { key: H256(key.to_be_bytes()), value: old_value },
)?;
}
}
Change::StorageWiped { id, address } => {
let storage_id = TransitionIdAddress((first_transition_id + id, address));
if let Some((_, entry)) = storages_cursor.seek_exact(address)? {
storage_changeset_cursor.append_dup(storage_id, entry)?;
while let Some(entry) = storages_cursor.next_dup_val()? {
storage_changeset_cursor.append_dup(storage_id, entry)?;
}
}
}
_ => unreachable!(),
}
}
// Write new storage state
for (address, storage) in self.storage.into_iter() {
if storage.wiped {
if storages_cursor.seek_exact(address)?.is_some() {
storages_cursor.delete_current_duplicates()?;
}
// If the storage is marked as wiped, it might still contain values. This is to
// avoid deallocating where possible, but these values should not be written to the
// database.
continue
}
for (key, value) in storage.storage {
let key = H256(key.to_be_bytes());
if let Some(entry) = storages_cursor.seek_by_key_subkey(address, key)? {
if entry.key == key {
storages_cursor.delete_current()?;
}
}
if value != U256::ZERO {
storages_cursor.upsert(address, StorageEntry { key, value })?;
}
}
}
// Write new account state
let mut accounts_cursor = tx.cursor_write::<tables::PlainAccountState>()?;
for (address, account) in self.accounts.into_iter() {
if let Some(account) = account {
accounts_cursor.upsert(address, account)?;
} else if accounts_cursor.seek_exact(address)?.is_some() {
accounts_cursor.delete_current()?;
}
}
// Write bytecode
let mut bytecodes_cursor = tx.cursor_write::<tables::Bytecodes>()?;
for (hash, bytecode) in self.bytecode.into_iter() {
bytecodes_cursor.upsert(hash, bytecode)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_db::{
database::Database,
mdbx::{test_utils, Env, EnvKind, WriteMap},
transaction::DbTx,
};
use std::sync::Arc;
#[test]
fn extend() {
let mut a = PostState::new();
a.create_account(Address::zero(), Account::default());
a.destroy_account(Address::zero(), Account::default());
a.finish_transition();
assert_eq!(a.transitions_count(), 1);
assert_eq!(a.changes().len(), 3);
let mut b = PostState::new();
b.create_account(Address::repeat_byte(0xff), Account::default());
b.finish_transition();
assert_eq!(b.transitions_count(), 1);
assert_eq!(b.changes.len(), 1);
let mut c = a.clone();
c.extend(b.clone());
assert_eq!(c.transitions_count(), 2);
assert_eq!(c.changes.len(), a.changes.len() + b.changes.len());
}
#[test]
fn write_to_db_account_info() {
let db: Arc<Env<WriteMap>> = test_utils::create_test_db(EnvKind::RW);
let tx = db.tx_mut().expect("Could not get database tx");
let mut post_state = PostState::new();
let address_a = Address::zero();
let address_b = Address::repeat_byte(0xff);
let account_a = Account { balance: U256::from(1), nonce: 1, bytecode_hash: None };
let account_b = Account { balance: U256::from(2), nonce: 2, bytecode_hash: None };
let account_b_changed = Account { balance: U256::from(3), nonce: 3, bytecode_hash: None };
// 0x00.. is created
post_state.create_account(address_a, account_a);
// 0x11.. is changed (balance + 1, nonce + 1)
post_state.change_account(address_b, account_b, account_b_changed);
post_state.write_to_db(&tx, 0).expect("Could not write post state to DB");
// Check plain state
assert_eq!(
tx.get::<tables::PlainAccountState>(address_a).expect("Could not read account state"),
Some(account_a),
"Account A state is wrong"
);
assert_eq!(
tx.get::<tables::PlainAccountState>(address_b).expect("Could not read account state"),
Some(account_b_changed),
"Account B state is wrong"
);
// Check change set
let mut changeset_cursor = tx
.cursor_dup_read::<tables::AccountChangeSet>()
.expect("Could not open changeset cursor");
assert_eq!(
changeset_cursor.seek_exact(0).expect("Could not read account change set"),
Some((0, AccountBeforeTx { address: address_a, info: None })),
"Account A changeset is wrong"
);
assert_eq!(
changeset_cursor.next_dup().expect("Changeset table is malformed"),
Some((0, AccountBeforeTx { address: address_b, info: Some(account_b) })),
"Account B changeset is wrong"
);
let mut post_state = PostState::new();
// 0x11.. is destroyed
post_state.destroy_account(address_b, account_b_changed);
post_state.write_to_db(&tx, 1).expect("Could not write second post state to DB");
// Check new plain state for account B
assert_eq!(
tx.get::<tables::PlainAccountState>(address_b).expect("Could not read account state"),
None,
"Account B should be deleted"
);
// Check change set
assert_eq!(
changeset_cursor.seek_exact(1).expect("Could not read account change set"),
Some((1, AccountBeforeTx { address: address_b, info: Some(account_b_changed) })),
"Account B changeset is wrong after deletion"
);
}
#[test]
fn write_to_db_storage() {
let db: Arc<Env<WriteMap>> = test_utils::create_test_db(EnvKind::RW);
let tx = db.tx_mut().expect("Could not get database tx");
let mut post_state = PostState::new();
let address_a = Address::zero();
let address_b = Address::repeat_byte(0xff);
// 0x00 => 0 => 1
// 0x01 => 0 => 2
let storage_a_changeset = BTreeMap::from([
(U256::from(0), (U256::from(0), U256::from(1))),
(U256::from(1), (U256::from(0), U256::from(2))),
]);
// 0x01 => 1 => 2
let storage_b_changeset = BTreeMap::from([(U256::from(1), (U256::from(1), U256::from(2)))]);
post_state.change_storage(address_a, storage_a_changeset);
post_state.change_storage(address_b, storage_b_changeset);
post_state.write_to_db(&tx, 0).expect("Could not write post state to DB");
// Check plain storage state
let mut storage_cursor = tx
.cursor_dup_read::<tables::PlainStorageState>()
.expect("Could not open plain storage state cursor");
assert_eq!(
storage_cursor.seek_exact(address_a).unwrap(),
Some((address_a, StorageEntry { key: H256::zero(), value: U256::from(1) })),
"Slot 0 for account A should be 1"
);
assert_eq!(
storage_cursor.next_dup().unwrap(),
Some((
address_a,
StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) }
)),
"Slot 1 for account A should be 2"
);
assert_eq!(
storage_cursor.next_dup().unwrap(),
None,
"Account A should only have 2 storage slots"
);
assert_eq!(
storage_cursor.seek_exact(address_b).unwrap(),
Some((
address_b,
StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) }
)),
"Slot 1 for account B should be 2"
);
assert_eq!(
storage_cursor.next_dup().unwrap(),
None,
"Account B should only have 1 storage slot"
);
// Check change set
let mut changeset_cursor = tx
.cursor_dup_read::<tables::StorageChangeSet>()
.expect("Could not open storage changeset cursor");
assert_eq!(
changeset_cursor.seek_exact(TransitionIdAddress((0, address_a))).unwrap(),
Some((
TransitionIdAddress((0, address_a)),
StorageEntry { key: H256::zero(), value: U256::from(0) }
)),
"Slot 0 for account A should have changed from 0"
);
assert_eq!(
changeset_cursor.next_dup().unwrap(),
Some((
TransitionIdAddress((0, address_a)),
StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(0) }
)),
"Slot 1 for account A should have changed from 0"
);
assert_eq!(
changeset_cursor.next_dup().unwrap(),
None,
"Account A should only be in the changeset 2 times"
);
assert_eq!(
changeset_cursor.seek_exact(TransitionIdAddress((0, address_b))).unwrap(),
Some((
TransitionIdAddress((0, address_b)),
StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(1) }
)),
"Slot 1 for account B should have changed from 1"
);
assert_eq!(
changeset_cursor.next_dup().unwrap(),
None,
"Account B should only be in the changeset 1 time"
);
// Delete account A
let mut post_state = PostState::new();
post_state.destroy_account(address_a, Account::default());
post_state.write_to_db(&tx, 1).expect("Could not write post state to DB");
assert_eq!(
storage_cursor.seek_exact(address_a).unwrap(),
None,
"Account A should have no storage slots after deletion"
);
assert_eq!(
changeset_cursor.seek_exact(TransitionIdAddress((1, address_a))).unwrap(),
Some((
TransitionIdAddress((1, address_a)),
StorageEntry { key: H256::zero(), value: U256::from(1) }
)),
"Slot 0 for account A should have changed from 1 on deletion"
);
assert_eq!(
changeset_cursor.next_dup().unwrap(),
Some((
TransitionIdAddress((1, address_a)),
StorageEntry { key: H256::from(U256::from(1).to_be_bytes()), value: U256::from(2) }
)),
"Slot 1 for account A should have changed from 2 on deletion"
);
assert_eq!(
changeset_cursor.next_dup().unwrap(),
None,
"Account A should only be in the changeset 2 times on deletion"
);
}
#[test]
fn revert_to() {
let mut state = PostState::new();
state.create_account(
Address::repeat_byte(0),
Account { nonce: 1, balance: U256::from(1), bytecode_hash: None },
);
state.finish_transition();
let revert_to = state.current_transition_id;
state.create_account(
Address::repeat_byte(0xff),
Account { nonce: 2, balance: U256::from(2), bytecode_hash: None },
);
state.finish_transition();
assert_eq!(state.transitions_count(), 2);
assert_eq!(state.accounts().len(), 2);
let reverted_changes = state.revert_to(revert_to as usize);
assert_eq!(state.accounts().len(), 1);
assert_eq!(state.transitions_count(), 1);
assert_eq!(reverted_changes.len(), 1);
}
}

View File

@@ -1,15 +1,10 @@
//! Dummy blocks and data for tests
use crate::{
execution_result::{
AccountChangeSet, AccountInfoChangeSet, ExecutionResult, TransactionChangeSet,
},
Transaction,
};
use crate::{post_state::PostState, Transaction};
use reth_db::{database::Database, models::StoredBlockBody, tables};
use reth_primitives::{
hex_literal::hex, proofs::EMPTY_ROOT, Account, Header, Receipt, SealedBlock,
SealedBlockWithSenders, Withdrawal, H160, H256, U256,
hex_literal::hex, proofs::EMPTY_ROOT, Account, Header, SealedBlock, SealedBlockWithSenders,
Withdrawal, H160, H256, U256,
};
use reth_rlp::Decodable;
use std::collections::BTreeMap;
@@ -54,7 +49,7 @@ pub struct BlockChainTestData {
/// Genesis
pub genesis: SealedBlock,
/// Blocks with its execution result
pub blocks: Vec<(SealedBlockWithSenders, ExecutionResult)>,
pub blocks: Vec<(SealedBlockWithSenders, PostState)>,
}
impl Default for BlockChainTestData {
@@ -75,7 +70,7 @@ pub fn genesis() -> SealedBlock {
}
/// Block one that points to genesis
fn block1() -> (SealedBlockWithSenders, ExecutionResult) {
fn block1() -> (SealedBlockWithSenders, PostState) {
let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice();
let mut block = SealedBlock::decode(&mut block_rlp).unwrap();
block.withdrawals = Some(vec![Withdrawal::default()]);
@@ -86,28 +81,29 @@ fn block1() -> (SealedBlockWithSenders, ExecutionResult) {
header.parent_hash = H256::zero();
block.header = header.seal_slow();
let mut account_changeset = AccountChangeSet {
account: AccountInfoChangeSet::Created {
new: Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
},
..Default::default()
};
account_changeset.storage.insert(U256::from(5), (U256::ZERO, U256::from(10)));
let mut post_state = PostState::default();
// Transaction changes
post_state.create_account(
H160([0x60; 20]),
Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
);
post_state.change_storage(
H160([0x60; 20]),
BTreeMap::from([(U256::from(5), (U256::ZERO, U256::from(10)))]),
);
post_state.finish_transition();
// Block changes
post_state.create_account(
H160([0x61; 20]),
Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
);
post_state.finish_transition();
let exec_res = ExecutionResult {
tx_changesets: vec![TransactionChangeSet {
receipt: Receipt::default(), /* receipts are not saved. */
changeset: BTreeMap::from([(H160([0x60; 20]), account_changeset.clone())]),
new_bytecodes: BTreeMap::from([]),
}],
block_changesets: BTreeMap::from([(H160([0x61; 20]), account_changeset.account)]),
};
(SealedBlockWithSenders { block, senders: vec![H160([0x30; 20])] }, exec_res)
(SealedBlockWithSenders { block, senders: vec![H160([0x30; 20])] }, post_state)
}
/// Block two that points to block 1
fn block2() -> (SealedBlockWithSenders, ExecutionResult) {
fn block2() -> (SealedBlockWithSenders, PostState) {
let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice();
let mut block = SealedBlock::decode(&mut block_rlp).unwrap();
block.withdrawals = Some(vec![Withdrawal::default()]);
@@ -120,27 +116,25 @@ fn block2() -> (SealedBlockWithSenders, ExecutionResult) {
H256(hex!("d846db2ab174c492cfe985c18fa75b154e20572bc33bb1c67cf5d2995791bdb7"));
block.header = header.seal_slow();
let mut account_changeset = AccountChangeSet::default();
// storage will be moved
let info_changeset = AccountInfoChangeSet::Changed {
old: Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
new: Account { nonce: 2, balance: U256::from(15), bytecode_hash: None },
};
account_changeset.account = info_changeset;
account_changeset.storage.insert(U256::from(5), (U256::from(10), U256::from(15)));
let mut post_state = PostState::default();
// Transaction changes
post_state.change_account(
H160([0x60; 20]),
Account { nonce: 1, balance: U256::from(10), bytecode_hash: None },
Account { nonce: 2, balance: U256::from(15), bytecode_hash: None },
);
post_state.change_storage(
H160([0x60; 20]),
BTreeMap::from([(U256::from(5), (U256::from(10), U256::from(15)))]),
);
post_state.finish_transition();
// Block changes
post_state.change_account(
H160([0x60; 20]),
Account { nonce: 2, balance: U256::from(15), bytecode_hash: None },
Account { nonce: 3, balance: U256::from(20), bytecode_hash: None },
);
post_state.finish_transition();
let block_changeset = AccountInfoChangeSet::Changed {
old: Account { nonce: 2, balance: U256::from(15), bytecode_hash: None },
new: Account { nonce: 3, balance: U256::from(20), bytecode_hash: None },
};
let exec_res = ExecutionResult {
tx_changesets: vec![TransactionChangeSet {
receipt: Receipt::default(), /* receipts are not saved. */
changeset: BTreeMap::from([(H160([0x60; 20]), account_changeset.clone())]),
new_bytecodes: BTreeMap::from([]),
}],
block_changesets: BTreeMap::from([(H160([0x60; 20]), block_changeset)]),
};
(SealedBlockWithSenders { block, senders: vec![H160([0x31; 20])] }, exec_res)
(SealedBlockWithSenders { block, senders: vec![H160([0x31; 20])] }, post_state)
}

View File

@@ -1,6 +1,6 @@
//! Executor Factory
use crate::{execution_result::ExecutionResult, StateProvider};
use crate::{post_state::PostState, StateProvider};
use reth_interfaces::executor::Error;
use reth_primitives::{Address, Block, ChainSpec, U256};
@@ -33,7 +33,7 @@ pub trait BlockExecutor<SP: StateProvider> {
block: &Block,
total_difficulty: U256,
senders: Option<Vec<Address>>,
) -> Result<ExecutionResult, Error>;
) -> Result<PostState, Error>;
/// Executes the block and checks receipts
fn execute_and_verify_receipt(
@@ -41,5 +41,5 @@ pub trait BlockExecutor<SP: StateProvider> {
block: &Block,
total_difficulty: U256,
senders: Option<Vec<Address>>,
) -> Result<ExecutionResult, Error>;
) -> Result<PostState, Error>;
}

View File

@@ -1,3 +1,8 @@
use crate::{
insert_canonical_block,
post_state::{Change, PostState, StorageChangeset},
trie::{DBTrieLoader, TrieError},
};
use itertools::{izip, Itertools};
use reth_db::{
common::KeyValue,
@@ -6,7 +11,7 @@ use reth_db::{
models::{
sharded_key,
storage_sharded_key::{self, StorageShardedKey},
ShardedKey, StoredBlockBody, TransitionIdAddress,
AccountBeforeTx, ShardedKey, StoredBlockBody, TransitionIdAddress,
},
table::Table,
tables,
@@ -15,25 +20,16 @@ use reth_db::{
};
use reth_interfaces::{db::Error as DbError, provider::ProviderError};
use reth_primitives::{
keccak256, proofs::EMPTY_ROOT, Account, Address, BlockHash, BlockNumber, Bytecode, ChainSpec,
Hardfork, Header, Receipt, SealedBlock, SealedBlockWithSenders, StorageEntry,
TransactionSignedEcRecovered, TransitionId, TxNumber, H256, U256,
keccak256, proofs::EMPTY_ROOT, Account, Address, BlockHash, BlockNumber, ChainSpec, Hardfork,
Header, SealedBlock, SealedBlockWithSenders, StorageEntry, TransactionSignedEcRecovered,
TransitionId, TxNumber, H256, U256,
};
use reth_tracing::tracing::{info, trace};
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet},
fmt::Debug,
ops::{Bound, Deref, DerefMut, Range, RangeBounds},
};
use crate::{
execution_result::{AccountInfoChangeSet, TransactionChangeSet},
insert_canonical_block,
trie::{DBTrieLoader, TrieError},
};
use crate::execution_result::{AccountChangeSet, ExecutionResult};
/// A container for any DB transaction that will open a new inner transaction when the current
/// one is committed.
// NOTE: This container is needed since `Transaction::commit` takes `mut self`, so methods in
@@ -317,7 +313,7 @@ where
pub fn get_block_execution_result_range(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<ExecutionResult>, TransactionError> {
) -> Result<Vec<PostState>, TransactionError> {
self.get_take_block_execution_result_range::<false>(range)
}
@@ -327,7 +323,7 @@ where
pub fn take_block_execution_result_range(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<ExecutionResult>, TransactionError> {
) -> Result<Vec<PostState>, TransactionError> {
self.get_take_block_execution_result_range::<true>(range)
}
@@ -336,7 +332,7 @@ where
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(SealedBlockWithSenders, ExecutionResult)>, TransactionError> {
) -> Result<Vec<(SealedBlockWithSenders, PostState)>, TransactionError> {
self.get_take_block_and_execution_range::<false>(chain_spec, range)
}
@@ -345,7 +341,7 @@ where
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(SealedBlockWithSenders, ExecutionResult)>, TransactionError> {
) -> Result<Vec<(SealedBlockWithSenders, PostState)>, TransactionError> {
self.get_take_block_and_execution_range::<true>(chain_spec, range)
}
@@ -514,57 +510,23 @@ where
Ok(())
}
/// Insert full block and make it canonical
/// Insert full block and make it canonical.
///
/// This is atomic operation and transaction will do one commit at the end of the function.
pub fn insert_block(
&mut self,
block: SealedBlockWithSenders,
chain_spec: &ChainSpec,
changeset: ExecutionResult,
) -> Result<(), TransactionError> {
/// This inserts the block and builds history related indexes. Once all blocks in a chain have
/// been committed, the state root needs to be inserted separately with
/// [`Transaction::insert_hashes`].
///
/// # Note
///
/// This assumes that we are using beacon consensus and that the block is post-merge, which
/// means that the block will have no block reward.
pub fn insert_block(&mut self, block: SealedBlockWithSenders) -> Result<(), TransactionError> {
// Header, Body, SenderRecovery, TD, TxLookup stages
let (block, senders) = block.into_components();
let block_number = block.number;
let block_state_root = block.state_root;
let block_hash = block.hash();
let parent_block_number = block.number.saturating_sub(1);
let (from, to) =
insert_canonical_block(self.deref_mut(), block, Some(senders), false).unwrap();
// execution stage
self.insert_execution_result(vec![changeset], chain_spec, parent_block_number)?;
// storage hashing stage
{
let lists = self.get_addresses_and_keys_of_changed_storages(from, to)?;
let storages = self.get_plainstate_storages(lists.into_iter())?;
self.insert_storage_for_hashing(storages.into_iter())?;
}
// account hashing stage
{
let lists = self.get_addresses_of_changed_accounts(from, to)?;
let accounts = self.get_plainstate_accounts(lists.into_iter())?;
self.insert_account_for_hashing(accounts.into_iter())?;
}
// merkle tree
{
let current_root = self.get_header(parent_block_number)?.state_root;
let mut loader = DBTrieLoader::new(self.deref_mut());
let root = loader.update_root(current_root, from..to).and_then(|e| e.root())?;
if root != block_state_root {
return Err(TransactionError::StateTrieRootMismatch {
got: root,
expected: block_state_root,
block_number,
block_hash,
})
}
}
// account history stage
{
let indices = self.get_account_transition_ids_from_changeset(from, to)?;
@@ -580,6 +542,58 @@ where
Ok(())
}
/// Calculate the hashes of all changed accounts and storages, and finally calculate the state
/// root.
///
/// The chain goes from `fork_block_number + 1` to `current_block_number`, and hashes are
/// calculated from `from_transition_id` to `to_transition_id`.
///
/// The resulting state root is compared with `expected_state_root`.
pub fn insert_hashes(
&mut self,
fork_block_number: BlockNumber,
from_transition_id: TransitionId,
to_transition_id: TransitionId,
current_block_number: BlockNumber,
current_block_hash: H256,
expected_state_root: H256,
) -> Result<(), TransactionError> {
// storage hashing stage
{
let lists = self
.get_addresses_and_keys_of_changed_storages(from_transition_id, to_transition_id)?;
let storages = self.get_plainstate_storages(lists.into_iter())?;
self.insert_storage_for_hashing(storages.into_iter())?;
}
// account hashing stage
{
let lists =
self.get_addresses_of_changed_accounts(from_transition_id, to_transition_id)?;
let accounts = self.get_plainstate_accounts(lists.into_iter())?;
self.insert_account_for_hashing(accounts.into_iter())?;
}
// merkle tree
{
let current_root = self.get_header(fork_block_number)?.state_root;
let mut loader = DBTrieLoader::new(self.deref_mut());
let root = loader
.update_root(current_root, from_transition_id..to_transition_id)
.and_then(|e| e.root())?;
if root != expected_state_root {
return Err(TransactionError::StateTrieRootMismatch {
got: root,
expected: expected_state_root,
block_number: current_block_number,
block_hash: current_block_hash,
})
}
}
Ok(())
}
/// Return list of entries from table
///
/// If TAKE is true, opened cursor would be write and it would delete all values from db.
@@ -764,27 +778,32 @@ where
Ok(blocks)
}
/// Transverse over changesets and plain state and recreated the execution results.
/// Traverse over changesets and plain state and recreate the [`PostState`]s for the given range
/// of blocks.
///
/// Iterate over [tables::BlockTransitionIndex] and take all transitions.
/// Then iterate over all [tables::StorageChangeSet] and [tables::AccountChangeSet] in reverse
/// order and populate all changesets. To be able to populate changesets correctly and to
/// have both, new and old value of account/storage, we needs to have local state and access
/// to [tables::PlainAccountState] [tables::PlainStorageState].
/// While iteration over acocunt/storage changesets.
/// At first instance of account/storage we are taking old value from changeset,
/// new value from plain state and saving old value to local state.
/// As second accounter of same account/storage we are again taking old value from changeset,
/// but new value is taken from local state and old value is again updated to local state.
/// 1. Iterate over the [BlockTransitionIndex][tables::BlockTransitionIndex] table to get all
/// the transitions
/// 2. Iterate over the [StorageChangeSet][tables::StorageChangeSet] table
/// and the [AccountChangeSet][tables::AccountChangeSet] tables in reverse order to reconstruct
/// the changesets.
/// - In order to have both the old and new values in the changesets, we also access the
/// plain state tables.
/// 3. While iterating over the changeset tables, if we encounter a new account or storage slot,
/// we:
/// 1. Take the old value from the changeset
/// 2. Take the new value from the plain state
/// 3. Save the old value to the local state
/// 4. While iterating over the changeset tables, if we encounter an account/storage slot we
/// have seen before we:
/// 1. Take the old value from the changeset
/// 2. Take the new value from the local state
/// 3. Set the local state to the value in the changeset
///
/// Now if TAKE is true we will use local state and update all old values to plain state tables.
///
/// After that, iterate over [`tables::BlockBodies`] and pack created changesets in block chunks
/// taking care if block has block changesets or not.
/// If `TAKE` is `true`, the local state will be written to the plain state tables.
fn get_take_block_execution_result_range<const TAKE: bool>(
&self,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<ExecutionResult>, TransactionError> {
) -> Result<Vec<PostState>, TransactionError> {
let block_transition =
self.get_or_take::<tables::BlockTransitionIndex, TAKE>(range.clone())?;
@@ -815,10 +834,7 @@ where
// Double option around Account represent if Account state is know (first option) and
// account is removed (Second Option)
type LocalPlainState = BTreeMap<Address, (Option<Option<Account>>, BTreeMap<H256, U256>)>;
type Changesets = BTreeMap<
TransitionId,
BTreeMap<Address, (AccountInfoChangeSet, BTreeMap<H256, (U256, U256)>)>,
>;
type Changesets = BTreeMap<TransitionId, Vec<Change>>;
let mut local_plain_state: LocalPlainState = BTreeMap::new();
@@ -832,33 +848,51 @@ where
// add account changeset changes
for (transition_id, account_before) in account_changeset.into_iter().rev() {
let new_info = match local_plain_state.entry(account_before.address) {
let AccountBeforeTx { info: old_info, address } = account_before;
let new_info = match local_plain_state.entry(address) {
Entry::Vacant(entry) => {
let new_account =
plain_accounts_cursor.seek(account_before.address)?.map(|(_s, i)| i);
entry.insert((Some(account_before.info), BTreeMap::new()));
let new_account = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1);
entry.insert((Some(old_info), BTreeMap::new()));
new_account
}
Entry::Occupied(mut entry) => {
let new_account =
std::mem::replace(&mut entry.get_mut().0, Some(account_before.info));
let new_account = std::mem::replace(&mut entry.get_mut().0, Some(old_info));
new_account.expect("As we are stacking account first, account would always be Some(Some) or Some(None)")
}
};
let account_info_changeset = AccountInfoChangeSet::new(account_before.info, new_info);
// insert changeset to transition id. Multiple account for same transition Id are not
// possible.
all_changesets
.entry(transition_id)
.or_default()
.entry(account_before.address)
.or_default()
.0 = account_info_changeset
let change = match (old_info, new_info) {
(Some(old), Some(new)) => {
if new != old {
Change::AccountChanged {
id: transition_id,
address,
old,
new,
}
} else {
unreachable!("Junk data in database: an account changeset did not represent any change");
}
}
(None, Some(account)) => Change::AccountCreated {
id: transition_id,
address,
account
},
(Some(old), None) => Change::AccountDestroyed {
id: transition_id,
address,
old
},
(None, None) => unreachable!("Junk data in database: an account changeset transitioned from no account to no account"),
};
all_changesets.entry(transition_id).or_default().push(change);
}
// add storage changeset changes
let mut storage_changes: BTreeMap<TransitionIdAddress, StorageChangeset> = BTreeMap::new();
for (transition_and_address, storage_entry) in storage_changeset.into_iter().rev() {
let TransitionIdAddress((transition_id, address)) = transition_and_address;
let TransitionIdAddress((_, address)) = transition_and_address;
let new_storage =
match local_plain_state.entry(address).or_default().1.entry(storage_entry.key) {
Entry::Vacant(entry) => {
@@ -873,13 +907,20 @@ where
std::mem::replace(entry.get_mut(), storage_entry.value)
}
};
all_changesets
.entry(transition_id)
.or_default()
.entry(address)
.or_default()
.1
.insert(storage_entry.key, (storage_entry.value, new_storage));
storage_changes.entry(transition_and_address).or_default().insert(
U256::from_be_bytes(storage_entry.key.0),
(storage_entry.value, new_storage),
);
}
for (TransitionIdAddress((transition_id, address)), storage_changeset) in
storage_changes.into_iter()
{
all_changesets.entry(transition_id).or_default().push(Change::StorageChanged {
id: transition_id,
address,
changeset: storage_changeset,
});
}
if TAKE {
@@ -894,10 +935,12 @@ where
plain_accounts_cursor.delete_current()?;
}
}
// revert storages
for (storage_key, storage_value) in storage.into_iter() {
let storage_entry = StorageEntry { key: storage_key, value: storage_value };
// delete previous value
// TODO: This does not use dupsort features
if plain_storage_cursor
.seek_by_key_subkey(address, storage_key)?
.filter(|s| s.key == storage_key)
@@ -905,6 +948,8 @@ where
{
plain_storage_cursor.delete_current()?
}
// TODO: This does not use dupsort features
// insert value if needed
if storage_value != U256::ZERO {
plain_storage_cursor.insert(address, storage_entry)?;
@@ -913,73 +958,39 @@ where
}
}
// NOTE: Some storage changesets can be empty,
// all account changeset have at least beneficiary fee transfer.
// iterate over block body and create ExecutionResult
let mut block_exec_results = Vec::new();
let mut changeset_iter = all_changesets.into_iter();
let mut block_transition_iter = block_transition.into_iter();
let mut next_transition_id = from;
let mut next_changeset = changeset_iter.next().unwrap_or_default();
// loop break if we are at the end of the blocks.
for (_, block_body) in block_bodies.into_iter() {
let mut block_exec_res = ExecutionResult::default();
let mut block_exec_res = PostState::new();
for _ in 0..block_body.tx_count {
// only if next_changeset
let changeset = if next_transition_id == next_changeset.0 {
let changeset = next_changeset
.1
.into_iter()
.map(|(address, (account, storage))| {
(
address,
AccountChangeSet {
account,
storage: storage
.into_iter()
.map(|(key, val)| (U256::from_be_bytes(key.0), val))
.collect(),
wipe_storage: false, /* it is always false as all storage
* changesets for selfdestruct are
* already accounted. */
},
)
})
.collect();
next_changeset = changeset_iter.next().unwrap_or_default();
changeset
} else {
BTreeMap::new()
};
if let Some(changes) = all_changesets.remove(&next_transition_id) {
for mut change in changes.into_iter() {
change
.set_transition_id(block_exec_res.transitions_count() as TransitionId);
block_exec_res.add_and_apply(change);
}
}
block_exec_res.finish_transition();
next_transition_id += 1;
block_exec_res.tx_changesets.push(TransactionChangeSet {
receipt: Receipt::default(), /* TODO(receipt) when they are saved, load them
* from db */
changeset,
new_bytecodes: Default::default(), /* TODO(bytecode), bytecode is not cleared
* so it is same sa previous. */
});
}
let Some((_,block_transition)) = block_transition_iter.next() else { break};
// if block transition points to 1+next transition id it means that there is block
// changeset.
if block_transition == next_transition_id + 1 {
// assert last_transition_id == block_transition
if next_transition_id == next_changeset.0 {
// take block changeset
block_exec_res.block_changesets = next_changeset
.1
.into_iter()
.map(|(address, (account, _))| (address, account))
.collect();
next_changeset = changeset_iter.next().unwrap_or_default();
if let Some(changes) = all_changesets.remove(&next_transition_id) {
for mut change in changes.into_iter() {
change
.set_transition_id(block_exec_res.transitions_count() as TransitionId);
block_exec_res.add_and_apply(change);
}
block_exec_res.finish_transition();
next_transition_id += 1;
}
next_transition_id += 1;
}
block_exec_results.push(block_exec_res)
}
@@ -991,7 +1002,7 @@ where
&self,
chain_spec: &ChainSpec,
range: impl RangeBounds<BlockNumber> + Clone,
) -> Result<Vec<(SealedBlockWithSenders, ExecutionResult)>, TransactionError> {
) -> Result<Vec<(SealedBlockWithSenders, PostState)>, TransactionError> {
if TAKE {
let (from_transition, parent_number, parent_state_root) = match range.start_bound() {
Bound::Included(n) => {
@@ -1355,134 +1366,6 @@ where
Ok(())
}
/// Used inside execution stage to commit created account storage changesets for transaction or
/// block state change.
pub fn insert_execution_result(
&self,
changesets: Vec<ExecutionResult>,
chain_spec: &ChainSpec,
parent_block_number: u64,
) -> Result<(), TransactionError> {
// Get last tx count so that we can know amount of transaction in the block.
let mut current_transition_id = self
.get::<tables::BlockTransitionIndex>(parent_block_number)?
.ok_or(ProviderError::BlockTransition { block_number: parent_block_number })?;
info!(target: "sync::stages::execution", current_transition_id, blocks = changesets.len(), "Inserting execution results");
// apply changes to plain database.
let mut block_number = parent_block_number;
for results in changesets.into_iter() {
block_number += 1;
let spurious_dragon_active =
chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(block_number);
// insert state change set
for result in results.tx_changesets.into_iter() {
for (address, account_change_set) in result.changeset.into_iter() {
let AccountChangeSet { account, wipe_storage, storage } = account_change_set;
// apply account change to db. Updates AccountChangeSet and PlainAccountState
// tables.
trace!(target: "sync::stages::execution", ?address, current_transition_id, ?account, wipe_storage, "Applying account changeset");
account.apply_to_db(
&**self,
address,
current_transition_id,
spurious_dragon_active,
)?;
let storage_id = TransitionIdAddress((current_transition_id, address));
// cast key to H256 and trace the change
let storage = storage
.into_iter()
.map(|(key, (old_value,new_value))| {
let hkey = H256(key.to_be_bytes());
trace!(target: "sync::stages::execution", ?address, current_transition_id, ?hkey, ?old_value, ?new_value, "Applying storage changeset");
(hkey, old_value,new_value)
})
.collect::<Vec<_>>();
let mut cursor_storage_changeset =
self.cursor_write::<tables::StorageChangeSet>()?;
cursor_storage_changeset.seek_exact(storage_id)?;
if wipe_storage {
// iterate over storage and save them before entry is deleted.
self.cursor_read::<tables::PlainStorageState>()?
.walk(Some(address))?
.take_while(|res| {
res.as_ref().map(|(k, _)| *k == address).unwrap_or_default()
})
.try_for_each(|entry| {
let (_, old_value) = entry?;
cursor_storage_changeset.append(storage_id, old_value)
})?;
// delete all entries
self.delete::<tables::PlainStorageState>(address, None)?;
// insert storage changeset
for (key, _, new_value) in storage {
// old values are already cleared.
if new_value != U256::ZERO {
self.put::<tables::PlainStorageState>(
address,
StorageEntry { key, value: new_value },
)?;
}
}
} else {
// insert storage changeset
for (key, old_value, new_value) in storage {
let old_entry = StorageEntry { key, value: old_value };
let new_entry = StorageEntry { key, value: new_value };
// insert into StorageChangeSet
cursor_storage_changeset.append(storage_id, old_entry)?;
// Always delete old value as duplicate table, put will not override it
self.delete::<tables::PlainStorageState>(address, Some(old_entry))?;
if new_value != U256::ZERO {
self.put::<tables::PlainStorageState>(address, new_entry)?;
}
}
}
}
// insert bytecode
for (hash, bytecode) in result.new_bytecodes.into_iter() {
// make different types of bytecode. Checked and maybe even analyzed (needs to
// be packed). Currently save only raw bytes.
let bytes = bytecode.bytes();
trace!(target: "sync::stages::execution", ?hash, ?bytes, len = bytes.len(), "Inserting bytecode");
self.put::<tables::Bytecodes>(hash, Bytecode(bytecode))?;
// NOTE: bytecode bytes are not inserted in change set and can be found in
// separate table
}
current_transition_id += 1;
}
let have_block_changeset = !results.block_changesets.is_empty();
// If there are any post block changes, we will add account changesets to db.
for (address, changeset) in results.block_changesets.into_iter() {
trace!(target: "sync::stages::execution", ?address, current_transition_id, "Applying block reward");
changeset.apply_to_db(
&**self,
address,
current_transition_id,
spurious_dragon_active,
)?;
}
// Transition is incremeneted every time before Paris hardfork and after
// Shanghai only if there are Withdrawals in the block. So it is correct to
// to increment transition id every time there is a block changeset present.
if have_block_changeset {
current_transition_id += 1;
}
}
Ok(())
}
/// Return full table as Vec
pub fn table<T: Table>(&self) -> Result<Vec<KeyValue<T>>, DbError>
where
@@ -1598,7 +1481,7 @@ pub enum TransactionError {
mod test {
use crate::{insert_canonical_block, test_utils::blocks::*, Transaction};
use reth_db::{mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut};
use reth_primitives::{proofs::EMPTY_ROOT, ChainSpecBuilder, MAINNET};
use reth_primitives::{proofs::EMPTY_ROOT, ChainSpecBuilder, TransitionId, MAINNET};
use std::ops::DerefMut;
#[test]
@@ -1623,7 +1506,17 @@ mod test {
tx.put::<tables::AccountsTrie>(EMPTY_ROOT, vec![0x80]).unwrap();
assert_genesis_block(&tx, data.genesis);
tx.insert_block(block1.clone(), &chain_spec, exec_res1.clone()).unwrap();
exec_res1.clone().write_to_db(tx.deref_mut(), 0).unwrap();
tx.insert_block(block1.clone()).unwrap();
tx.insert_hashes(
genesis.number,
0,
exec_res1.transitions_count() as TransitionId,
block1.number,
block1.hash,
block1.state_root,
)
.unwrap();
// get one block
let get = tx.get_block_and_execution_range(&chain_spec, 1..=1).unwrap();
@@ -1634,8 +1527,32 @@ mod test {
assert_eq!(take, vec![(block1.clone(), exec_res1.clone())]);
assert_genesis_block(&tx, genesis.clone());
tx.insert_block(block1.clone(), &chain_spec, exec_res1.clone()).unwrap();
tx.insert_block(block2.clone(), &chain_spec, exec_res2.clone()).unwrap();
exec_res1.clone().write_to_db(tx.deref_mut(), 0).unwrap();
tx.insert_block(block1.clone()).unwrap();
tx.insert_hashes(
genesis.number,
0,
exec_res1.transitions_count() as TransitionId,
block1.number,
block1.hash,
block1.state_root,
)
.unwrap();
exec_res2
.clone()
.write_to_db(tx.deref_mut(), exec_res1.transitions_count() as TransitionId)
.unwrap();
tx.insert_block(block2.clone()).unwrap();
tx.insert_hashes(
block1.number,
exec_res1.transitions_count() as TransitionId,
exec_res2.transitions_count() as TransitionId,
2,
block2.hash,
block2.state_root,
)
.unwrap();
// get second block
let get = tx.get_block_and_execution_range(&chain_spec, 2..=2).unwrap();