diff --git a/Cargo.lock b/Cargo.lock index 257a1f8c0c..2c450331fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3516,9 +3516,11 @@ dependencies = [ "reth-bodies-downloaders", "reth-db", "reth-eth-wire", + "reth-executor", "reth-headers-downloaders", "reth-interfaces", "reth-primitives", + "reth-rlp", "tempfile", "thiserror", "tokio", diff --git a/crates/codecs/derive/src/compact/flags.rs b/crates/codecs/derive/src/compact/flags.rs index 2cc1519d88..6bc64a06f5 100644 --- a/crates/codecs/derive/src/compact/flags.rs +++ b/crates/codecs/derive/src/compact/flags.rs @@ -115,7 +115,7 @@ fn build_struct_field_flags( /// Returns the total number of bytes used by the flags struct. fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> u8 { let remaining = 8 - total_bits % 8; - let total_bytes = if remaining != 8 { + if remaining != 8 { let bsize = format_ident!("B{remaining}"); field_flags.push(quote! { #[skip] @@ -124,8 +124,7 @@ fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> u8 { (total_bits + remaining) / 8 } else { total_bits / 8 - }; - total_bytes + } } /// Placeholder struct for when there are no bitfields to be added. diff --git a/crates/db/src/kv/cursor.rs b/crates/db/src/kv/cursor.rs index 33208bdfdd..2cb4a3cf84 100644 --- a/crates/db/src/kv/cursor.rs +++ b/crates/db/src/kv/cursor.rs @@ -1,6 +1,6 @@ //! Cursor wrapper for libmdbx-sys. -use std::marker::PhantomData; +use std::{borrow::Cow, marker::PhantomData}; use crate::utils::*; use reth_interfaces::db::{ @@ -108,11 +108,14 @@ impl<'tx, K: TransactionKind, T: DupSort> DbDupCursorRO<'tx, T> for Cursor<'tx, key: T::Key, subkey: T::SubKey, ) -> Result, Error> { + // encode key and decode it after. + let key = key.encode().as_ref().to_vec(); + let start = self .inner - .get_both_range(key.encode().as_ref(), subkey.encode().as_ref()) + .get_both_range(key.as_ref(), subkey.encode().as_ref()) .map_err(|e| Error::Read(e.into()))? - .map(decode_one::); + .map(|val| decoder::((Cow::Owned(key), val))); Ok(DupWalker::<'cursor, 'tx, T, Self> { cursor: self, start, _tx_phantom: PhantomData {} }) } diff --git a/crates/db/src/kv/mod.rs b/crates/db/src/kv/mod.rs index f92ace32f3..cc49dff941 100644 --- a/crates/db/src/kv/mod.rs +++ b/crates/db/src/kv/mod.rs @@ -356,7 +356,7 @@ mod tests { let mut cursor = tx.cursor_dup::().unwrap(); let mut walker = cursor.walk_dup(key, H256::from_low_u64_be(1)).unwrap(); assert_eq!( - value11, + (key, value11), walker .next() .expect("element should exist.") diff --git a/crates/executor/src/executor.rs b/crates/executor/src/executor.rs index 5e9d4dee5e..276d1fe137 100644 --- a/crates/executor/src/executor.rs +++ b/crates/executor/src/executor.rs @@ -1,12 +1,12 @@ use crate::{ - revm_wrap::{self, SubState}, + revm_wrap::{self, to_reth_acc, SubState}, Config, }; use hashbrown::hash_map::Entry; use reth_interfaces::{executor::Error, provider::StateProvider}; use reth_primitives::{ - bloom::logs_bloom, Account, Address, Bloom, Log, Receipt, SealedHeader, - TransactionSignedEcRecovered, H256, U256, + bloom::logs_bloom, Account, Address, Bloom, Header, Log, Receipt, TransactionSignedEcRecovered, + H256, U256, }; use revm::{ db::AccountState, Account as RevmAccount, AccountInfo, AnalysisKind, Bytecode, ExecutionResult, @@ -20,15 +20,37 @@ pub struct Executor { pub config: Config, } +/// Contains old/new account change +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum AccountInfoChangeSet { + /// Account is newly created. + Created { + /// Newly created acc + new: Account, + }, + /// If account is deleted (selfdestructed) or if we have touched + /// a empty account, we would need to remove/destroy it. + /// (Look at state clearing [EIP-158](https://eips.ethereum.org/EIPS/eip-158)) + Destroyed { + /// Old account. + old: Account, + }, + /// Account is changed. + Changed { + /// New account after transaction change. + new: Account, + /// Old account before transaction. + old: Account, + }, + /// There is not change in account information (nonce/balance). + NoChange, +} + /// Diff change set that is neede for creating history index and updating current world state. #[derive(Debug, Clone)] pub struct AccountChangeSet { - /// Old and New account. - /// If account is deleted (selfdestructed) we would have (Some,None). - /// If account is newly created we would have (None,Some). - /// If it is only a storage change we would have (None,None) - /// If account is changed (balance,nonce) we would have (Some,Some) - pub account: (Option, Option), + /// Old and New account account change. + pub account: AccountInfoChangeSet, /// Storage containing key -> (OldValue,NewValue). in case that old value is not existing /// we can expect to have U256::zero(), same with new value. pub storage: BTreeMap, @@ -67,15 +89,7 @@ pub fn commit_changes( change.insert( address, AccountChangeSet { - account: ( - Some(Account { - nonce: db_account.info.nonce, - balance: db_account.info.balance, - bytecode_hash: Some(db_account.info.code_hash), /* TODO none if - * KECCAK_EMPTY */ - }), - None, - ), + account: AccountInfoChangeSet::Destroyed { old: to_reth_acc(&db_account.info) }, storage: BTreeMap::new(), wipe_storage: true, }, @@ -107,30 +121,33 @@ pub fn commit_changes( // get old account that is going to be overwritten or none if it does not exist // and get new account that was just inserted. new account mut ref is used for // inserting storage - let (old_account, has_change, new_account) = match db.accounts.entry(address) { + let (account_info_changeset, new_account) = match db.accounts.entry(address) { Entry::Vacant(entry) => { let entry = entry.insert(Default::default()); entry.info = account.info.clone(); - (None, true, entry) + // account was not existing, so this means new account is created + (AccountInfoChangeSet::Created { new: to_reth_acc(&entry.info) }, entry) } Entry::Occupied(entry) => { let entry = entry.into_mut(); - let (old_account, has_change) = + + // account is present inside cache but is markes as NotExisting. + let account_changeset = if matches!(entry.account_state, AccountState::NotExisting) { - (None, true) + AccountInfoChangeSet::Created { new: to_reth_acc(&account.info) } + } else if entry.info != account.info { + AccountInfoChangeSet::Changed { + old: to_reth_acc(&entry.info), + new: to_reth_acc(&account.info), + } } else { - (Some(entry.info.clone()), entry.info != account.info) + AccountInfoChangeSet::NoChange }; entry.info = account.info.clone(); - (old_account, has_change, entry) + (account_changeset, entry) } }; - // cast from revm type to reth type - let old_account = old_account.map(|acc| Account { - balance: acc.balance, - nonce: acc.nonce, - bytecode_hash: Some(acc.code_hash), // TODO if KECCAK_EMPTY return None - }); + let mut wipe_storage = false; new_account.account_state = if account.storage_cleared { @@ -153,24 +170,7 @@ pub fn commit_changes( // Insert into change. change.insert( address, - AccountChangeSet { - account: if has_change { - ( - old_account, - Some(new_account).map(|acc| { - Account { - balance: acc.info.balance, - nonce: acc.info.nonce, - bytecode_hash: Some(acc.info.code_hash), // TODO if KECCAK_EMPTY return None - } - }), - ) - } else { - (None, None) - }, - storage, - wipe_storage, - }, + AccountChangeSet { account: account_info_changeset, storage, wipe_storage }, ); } } @@ -193,7 +193,7 @@ pub struct TransactionStatePatch { /// Execute and verify block pub fn execute_and_verify_receipt( - header: &SealedHeader, + header: &Header, transactions: &[TransactionSignedEcRecovered], config: &Config, db: &mut SubState, @@ -232,7 +232,7 @@ pub fn verify_receipt<'a>( /// Verify block. Execute all transaction and compare results. /// Return diff is on transaction granularity. We are returning vector of pub fn execute( - header: &SealedHeader, + header: &Header, transactions: &[TransactionSignedEcRecovered], config: &Config, db: &mut SubState, @@ -281,6 +281,8 @@ pub fn execute( revm::Return::SelfDestruct ); + // TODO add handling of other errors + // Add spend gas. cumulative_gas_used += gas_used; @@ -403,13 +405,8 @@ mod tests { HashMap::new(), ); - let account3_old_info = Account { - balance: 0x3635c9adc5dea00000u128.into(), - nonce: 0x00, - bytecode_hash: Some(H256(hex!( - "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ))), - }; + let account3_old_info = + Account { balance: 0x3635c9adc5dea00000u128.into(), nonce: 0x00, bytecode_hash: None }; db.insert_account( H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")), @@ -436,45 +433,31 @@ mod tests { assert_eq!(patch.new_bytecodes.len(), 0, "Should have zero new bytecodes"); let account1 = H160(hex!("1000000000000000000000000000000000000000")); - let _account1_info = Account { - balance: 0x00.into(), - nonce: 0x00, - bytecode_hash: Some(H256(hex!( - "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ))), - }; + let _account1_info = Account { balance: 0x00.into(), nonce: 0x00, bytecode_hash: None }; let account2 = H160(hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba")); let account2_info = Account { - balance: (0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128).into(), /* TODO remove - * 2eth block - * reward */ + // TODO remove 2eth block reward + balance: (0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128).into(), nonce: 0x00, - bytecode_hash: Some(H256(hex!( - "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ))), + bytecode_hash: None, }; let account3 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); - let account3_info = Account { - balance: 0x3635c9adc5de996b46u128.into(), - nonce: 0x01, - bytecode_hash: Some(H256(hex!( - "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ))), - }; + let account3_info = + Account { balance: 0x3635c9adc5de996b46u128.into(), nonce: 0x01, bytecode_hash: None }; assert_eq!( patch.state_diff.get(&account1).unwrap().account, - (None, None), + AccountInfoChangeSet::NoChange, "No change to account" ); assert_eq!( patch.state_diff.get(&account2).unwrap().account, - (None, Some(account2_info)), + AccountInfoChangeSet::Created { new: account2_info }, "New acccount" ); assert_eq!( patch.state_diff.get(&account3).unwrap().account, - (Some(account3_old_info), Some(account3_info)), + AccountInfoChangeSet::Changed { old: account3_old_info, new: account3_info }, "Change to account state" ); diff --git a/crates/executor/src/revm_wrap.rs b/crates/executor/src/revm_wrap.rs index e96ad3864e..eb39d42212 100644 --- a/crates/executor/src/revm_wrap.rs +++ b/crates/executor/src/revm_wrap.rs @@ -1,7 +1,7 @@ use reth_interfaces::{provider::StateProvider, Error}; use reth_primitives::{ - Header, Transaction, TransactionKind, TransactionSignedEcRecovered, TxEip1559, TxEip2930, - TxLegacy, H160, H256, KECCAK_EMPTY, U256, + Account, Header, Transaction, TransactionKind, TransactionSignedEcRecovered, TxEip1559, + TxEip2930, TxLegacy, H160, H256, KECCAK_EMPTY, U256, }; use revm::{ db::{CacheDB, DatabaseRef}, @@ -179,3 +179,17 @@ pub fn is_log_equal(revm_log: &revm::Log, reth_log: &reth_primitives::Log) -> bo .zip(reth_log.topics.iter()) .any(|(revm_topic, reth_topic)| revm_topic != reth_topic) } + +/// Create reth primitive [Account] from [revm::AccountInfo]. +/// Check if revm bytecode hash is [KECCAK_EMPTY] and put None to reth [Account] +pub fn to_reth_acc(revm_acc: &revm::AccountInfo) -> Account { + Account { + balance: revm_acc.balance, + nonce: revm_acc.nonce, + bytecode_hash: if revm_acc.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_acc.code_hash) + }, + } +} diff --git a/crates/interfaces/src/db/mod.rs b/crates/interfaces/src/db/mod.rs index 2f24b56df4..f2fe5783ae 100644 --- a/crates/interfaces/src/db/mod.rs +++ b/crates/interfaces/src/db/mod.rs @@ -251,7 +251,7 @@ pub struct DupWalker<'cursor, 'tx, T: DupSort, CURSOR: DbDupCursorRO<'tx, T>> { /// Cursor to be used to walk through the table. pub cursor: &'cursor mut CURSOR, /// Value where to start the walk. - pub start: Option>, + pub start: IterPairResult, /// Phantom data for 'tx. As it is only used for `DbDupCursorRO`. pub _tx_phantom: PhantomData<&'tx T>, } @@ -259,13 +259,13 @@ pub struct DupWalker<'cursor, 'tx, T: DupSort, CURSOR: DbDupCursorRO<'tx, T>> { impl<'cursor, 'tx, T: DupSort, CURSOR: DbDupCursorRO<'tx, T>> std::iter::Iterator for DupWalker<'cursor, 'tx, T, CURSOR> { - type Item = Result; + type Item = Result<(T::Key, T::Value), Error>; fn next(&mut self) -> Option { let start = self.start.take(); if start.is_some() { return start } - self.cursor.next_dup_val().transpose() + self.cursor.next_dup().transpose() } } diff --git a/crates/interfaces/src/db/models/accounts.rs b/crates/interfaces/src/db/models/accounts.rs index 13eae8fcc5..3fe48aaeeb 100644 --- a/crates/interfaces/src/db/models/accounts.rs +++ b/crates/interfaces/src/db/models/accounts.rs @@ -13,13 +13,15 @@ use reth_primitives::{Account, Address, TxNumber}; use serde::{Deserialize, Serialize}; /// Account as it is saved inside [`AccountChangeSet`]. [`Address`] is the subkey. +/// TODO there should be `not_existing` boolean or Account be made as `Option` to +/// handle scenario where account was not present before transaction. #[main_codec] -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct AccountBeforeTx { /// Address for the account. Acts as `DupSort::SubKey`. - address: Address, + pub address: Address, /// Account state before the transaction. - info: Account, + pub info: Account, } /// [`TxNumber`] concatenated with [`Address`]. Used as a key for [`StorageChangeSet`] diff --git a/crates/interfaces/src/db/models/blocks.rs b/crates/interfaces/src/db/models/blocks.rs index 493fd6864a..c4da3f4bed 100644 --- a/crates/interfaces/src/db/models/blocks.rs +++ b/crates/interfaces/src/db/models/blocks.rs @@ -22,7 +22,7 @@ pub type NumTransactions = u64; /// /// The [TxNumber]s for all the transactions in the block are `base_tx_id..(base_tx_id + /// tx_amount)`. -#[derive(Debug, Default, PartialEq, Clone)] +#[derive(Debug, Default, Eq, PartialEq, Clone)] #[main_codec] pub struct StoredBlockBody { /// The ID of the first transaction in the block. @@ -33,6 +33,13 @@ pub struct StoredBlockBody { pub ommers: Vec
, } +impl StoredBlockBody { + /// Return next block tx id. + pub fn next_block_tx_id(&self) -> TxNumber { + self.base_tx_id + self.tx_amount + } +} + /// Hash of the block header. Value for [`CanonicalHeaders`] pub type HeaderHash = H256; diff --git a/crates/interfaces/src/db/tables.rs b/crates/interfaces/src/db/tables.rs index 6a37336b57..af75df324f 100644 --- a/crates/interfaces/src/db/tables.rs +++ b/crates/interfaces/src/db/tables.rs @@ -151,6 +151,9 @@ table!( table!( /// Stores all smart contract bytecodes. + /// There will be multiple accounts that have same bytecode + /// So we would need to introduce reference counter. + /// This will be small optimization on state. Bytecodes => H256 => Bytecode); dupsort!( @@ -208,15 +211,20 @@ table!( dupsort!( /// Stores the state of an account before a certain transaction changed it. + /// Change on state can be: account is created, selfdestructed, touched while empty + /// or changed (balance,nonce). AccountChangeSet => TxNumber => [Address] AccountBeforeTx); dupsort!( /// Stores the state of a storage key before a certain transaction changed it. + /// If [`StorageEntry::value`] is zero, this means storage was not existing + /// and needs to be removed. StorageChangeSet => TxNumberAddress => [H256] StorageEntry); table!( /// Stores the transaction sender for each transaction. - TxSenders => TxNumber => Address); // Is it necessary? if so, inverted index index so we dont repeat addresses? + /// It is needed to speed up execution stage and allows fetching signer without doing transaction signed recovery + TxSenders => TxNumber => Address); table!( /// Configuration values. diff --git a/crates/interfaces/src/provider/block.rs b/crates/interfaces/src/provider/block.rs index e3e422c7b4..853eb4ecdf 100644 --- a/crates/interfaces/src/provider/block.rs +++ b/crates/interfaces/src/provider/block.rs @@ -1,8 +1,15 @@ -use crate::Result; +use crate::{ + db::{ + models::{BlockNumHash, StoredBlockBody}, + tables, DbTx, DbTxMut, + }, + provider::Error as ProviderError, + Result, +}; use auto_impl::auto_impl; use reth_primitives::{ rpc::{BlockId, BlockNumber}, - Block, BlockHash, BlockHashOrNumber, Header, H256, U256, + Block, BlockHash, BlockHashOrNumber, BlockLocked, Header, H256, U256, }; /// Client trait for fetching `Header` related data. @@ -99,3 +106,61 @@ pub struct ChainInfo { /// Safe block pub safe_finalized: Option, } + +/// Get value from [tables::CumulativeTxCount] by block hash +/// as the table is indexed by NumHash key we are obtaining number from +/// [tables::HeaderNumbers] +pub fn get_cumulative_tx_count_by_hash<'a, TX: DbTxMut<'a> + DbTx<'a>>( + tx: &TX, + block_hash: H256, +) -> Result { + let block_number = tx + .get::(block_hash)? + .ok_or(ProviderError::BlockHashNotExist { block_hash })?; + + let block_num_hash = BlockNumHash((block_number, block_hash)); + + tx.get::(block_num_hash)? + .ok_or_else(|| ProviderError::BlockBodyNotExist { block_num_hash }.into()) +} + +/// Fill block to database. Useful for tests. +/// Check parent dependency in [tables::HeaderNumbers] and in [tables::CumulativeTxCount] tables. +/// Inserts blocks data to [tables::CanonicalHeaders], [tables::Headers], [tables::HeaderNumbers], +/// and transactions data to [tables::TxSenders], [tables::Transactions], +/// [tables::CumulativeTxCount] and [tables::BlockBodies] +pub fn insert_canonical_block<'a, TX: DbTxMut<'a> + DbTx<'a>>( + tx: &TX, + block: &BlockLocked, +) -> Result<()> { + let block_num_hash = BlockNumHash((block.number, block.hash())); + tx.put::(block.number, block.hash())?; + // Put header with canonical hashes. + tx.put::(block_num_hash, block.header.as_ref().clone())?; + tx.put::(block.hash(), block.number)?; + + let start_tx_number = + if block.number == 0 { 0 } else { get_cumulative_tx_count_by_hash(tx, block.parent_hash)? }; + + // insert body + tx.put::( + block_num_hash, + StoredBlockBody { + base_tx_id: start_tx_number, + tx_amount: block.body.len() as u64, + ommers: block.ommers.iter().map(|h| h.as_ref().clone()).collect(), + }, + )?; + + let mut tx_number = start_tx_number; + for eth_tx in block.body.iter() { + let rec_tx = eth_tx.clone().into_ecrecovered().unwrap(); + tx.put::(tx_number, rec_tx.signer())?; + tx.put::(tx_number, rec_tx.as_ref().clone())?; + tx_number += 1; + } + + tx.put::(block_num_hash, tx_number)?; + + Ok(()) +} diff --git a/crates/interfaces/src/provider/db_provider.rs b/crates/interfaces/src/provider/db_provider.rs index b02b4b93ac..08cd0b3440 100644 --- a/crates/interfaces/src/provider/db_provider.rs +++ b/crates/interfaces/src/provider/db_provider.rs @@ -5,7 +5,10 @@ mod block; mod storage; use std::sync::Arc; -pub use storage::{StateProviderImplHistory, StateProviderImplLatest}; +pub use storage::{ + StateProviderImplHistory, StateProviderImplLatest, StateProviderImplRefHistory, + StateProviderImplRefLatest, +}; use crate::db::Database; diff --git a/crates/interfaces/src/provider/db_provider/storage.rs b/crates/interfaces/src/provider/db_provider/storage.rs index 2dbc59aa22..57c2e299a5 100644 --- a/crates/interfaces/src/provider/db_provider/storage.rs +++ b/crates/interfaces/src/provider/db_provider/storage.rs @@ -50,23 +50,70 @@ impl StateProviderFactory for ProviderImpl { } } -/// State provider with given hash +/// State provider for given transaction number pub struct StateProviderImplHistory<'a, TX: DbTx<'a>> { - /// Transaction - db: TX, + /// Database transaction + tx: TX, /// Transaction number is main indexer of account and storage changes transaction_number: TxNumber, + /// Phantom lifetime `'a` _phantom: PhantomData<&'a TX>, } impl<'a, TX: DbTx<'a>> StateProviderImplHistory<'a, TX> { /// Create new StateProvider from history transaction number - pub fn new(db: TX, transaction_number: TxNumber) -> Self { - Self { db, transaction_number, _phantom: PhantomData {} } + pub fn new(tx: TX, transaction_number: TxNumber) -> Self { + Self { tx, transaction_number, _phantom: PhantomData {} } } } impl<'a, TX: DbTx<'a>> AccountProvider for StateProviderImplHistory<'a, TX> { + /// Get basic account information. + fn basic_account(&self, address: Address) -> Result> { + StateProviderImplRefHistory::new(&self.tx, self.transaction_number).basic_account(address) + } +} + +impl<'a, TX: DbTx<'a>> StateProvider for StateProviderImplHistory<'a, TX> { + fn storage(&self, account: Address, storage_key: StorageKey) -> Result> { + StateProviderImplRefHistory::new(&self.tx, self.transaction_number) + .storage(account, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: H256) -> Result> { + StateProviderImplRefHistory::new(&self.tx, self.transaction_number) + .bytecode_by_hash(code_hash) + } + + fn block_hash(&self, number: U256) -> Result> { + StateProviderImplRefHistory::new(&self.tx, self.transaction_number).block_hash(number) + } +} +/// State provider with given hash +/// +/// It will access: +/// [tables::AccountHistory] +/// [tables::Bytecodes] +/// [tables::StorageHistory] +/// [tables::AccountChangeSet] +/// [tables::StorageChangeSet] +pub struct StateProviderImplRefHistory<'a, 'b, TX: DbTx<'a>> { + /// Transaction + tx: &'b TX, + /// Transaction number is main indexer of account and storage changes + transaction_number: TxNumber, + /// Phantom lifetime `'a` + _phantom: PhantomData<&'a TX>, +} + +impl<'a, 'b, TX: DbTx<'a>> StateProviderImplRefHistory<'a, 'b, TX> { + /// Create new StateProvider from history transaction number + pub fn new(tx: &'b TX, transaction_number: TxNumber) -> Self { + Self { tx, transaction_number, _phantom: PhantomData {} } + } +} + +impl<'a, 'b, TX: DbTx<'a>> AccountProvider for StateProviderImplRefHistory<'a, 'b, TX> { /// Get basic account information. fn basic_account(&self, _address: Address) -> Result> { // TODO add when AccountHistory is defined @@ -74,12 +121,12 @@ impl<'a, TX: DbTx<'a>> AccountProvider for StateProviderImplHistory<'a, TX> { } } -impl<'a, TX: DbTx<'a>> StateProvider for StateProviderImplHistory<'a, TX> { +impl<'a, 'b, TX: DbTx<'a>> StateProvider for StateProviderImplRefHistory<'a, 'b, TX> { /// Get storage. fn storage(&self, account: Address, storage_key: StorageKey) -> Result> { // TODO when StorageHistory is defined let transaction_number = - self.db.get::(Vec::new())?.map(|_integer_list| + self.tx.get::(Vec::new())?.map(|_integer_list| // TODO select integer that is one less from transaction_number self.transaction_number); @@ -87,7 +134,7 @@ impl<'a, TX: DbTx<'a>> StateProvider for StateProviderImplHistory<'a, TX> { return Ok(None) } let num = transaction_number.unwrap(); - let mut cursor = self.db.cursor_dup::()?; + let mut cursor = self.tx.cursor_dup::()?; if let Some((_, entry)) = cursor.seek_exact((num, account).into())? { if entry.key == storage_key { @@ -105,38 +152,74 @@ impl<'a, TX: DbTx<'a>> StateProvider for StateProviderImplHistory<'a, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: H256) -> Result> { - self.db.get::(code_hash).map_err(Into::into).map(|r| r.map(Bytes::from)) + self.tx.get::(code_hash).map_err(Into::into).map(|r| r.map(Bytes::from)) } /// Get block hash by number. fn block_hash(&self, number: U256) -> Result> { - self.db.get::(number.as_u64()).map_err(Into::into) + self.tx.get::(number.as_u64()).map_err(Into::into) } } -/// State Provider over latest state +/// State provider for latests state pub struct StateProviderImplLatest<'a, TX: DbTx<'a>> { /// database transaction db: TX, - /// Phantom data over lifetime - phantom: PhantomData<&'a TX>, + /// Phantom lifetime `'a` + _phantom: PhantomData<&'a TX>, } impl<'a, TX: DbTx<'a>> StateProviderImplLatest<'a, TX> { /// Create new state provider pub fn new(db: TX) -> Self { - Self { db, phantom: PhantomData {} } + Self { db, _phantom: PhantomData {} } } } impl<'a, TX: DbTx<'a>> AccountProvider for StateProviderImplLatest<'a, TX> { /// Get basic account information. fn basic_account(&self, address: Address) -> Result> { - self.db.get::(address).map_err(Into::into) + StateProviderImplRefLatest::new(&self.db).basic_account(address) } } impl<'a, TX: DbTx<'a>> StateProvider for StateProviderImplLatest<'a, TX> { + fn storage(&self, account: Address, storage_key: StorageKey) -> Result> { + StateProviderImplRefLatest::new(&self.db).storage(account, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: H256) -> Result> { + StateProviderImplRefLatest::new(&self.db).bytecode_by_hash(code_hash) + } + + fn block_hash(&self, number: U256) -> Result> { + StateProviderImplRefLatest::new(&self.db).block_hash(number) + } +} + +/// State Provider over latest state that takes tx reference +pub struct StateProviderImplRefLatest<'a, 'b, TX: DbTx<'a>> { + /// database transaction + db: &'b TX, + /// Phantom data over lifetime + phantom: PhantomData<&'a TX>, +} + +impl<'a, 'b, TX: DbTx<'a>> StateProviderImplRefLatest<'a, 'b, TX> { + /// Create new state provider + pub fn new(db: &'b TX) -> Self { + Self { db, phantom: PhantomData {} } + } +} + +impl<'a, 'b, TX: DbTx<'a>> AccountProvider for StateProviderImplRefLatest<'a, 'b, TX> { + /// Get basic account information. + fn basic_account(&self, address: Address) -> Result> { + self.db.get::(address).map_err(Into::into) + } +} + +impl<'a, 'b, TX: DbTx<'a>> StateProvider for StateProviderImplRefLatest<'a, 'b, TX> { /// Get storage. fn storage(&self, account: Address, storage_key: StorageKey) -> Result> { let mut cursor = self.db.cursor_dup::()?; diff --git a/crates/interfaces/src/provider/error.rs b/crates/interfaces/src/provider/error.rs index 2384c9be47..99a5167adf 100644 --- a/crates/interfaces/src/provider/error.rs +++ b/crates/interfaces/src/provider/error.rs @@ -1,5 +1,7 @@ use reth_primitives::{BlockHash, BlockNumber}; +use crate::db::models::BlockNumHash; + /// KV error type. They are using u32 to represent error code. #[allow(missing_docs)] #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] @@ -10,4 +12,6 @@ pub enum Error { BlockTxNumberNotExists { block_hash: BlockHash }, #[error("Block hash {block_hash:?} does not exists in Headers table")] BlockHashNotExist { block_hash: BlockHash }, + #[error("Block body not exists {block_num_hash:?}")] + BlockBodyNotExist { block_num_hash: BlockNumHash }, } diff --git a/crates/interfaces/src/provider/mod.rs b/crates/interfaces/src/provider/mod.rs index 3cd6791e40..27d46fc281 100644 --- a/crates/interfaces/src/provider/mod.rs +++ b/crates/interfaces/src/provider/mod.rs @@ -3,7 +3,10 @@ pub mod db_provider; mod error; mod state; -pub use block::{BlockProvider, ChainInfo, HeaderProvider}; +pub use block::{ + get_cumulative_tx_count_by_hash, insert_canonical_block, BlockProvider, ChainInfo, + HeaderProvider, +}; pub use db_provider::{self as db, ProviderImpl}; pub use error::Error; pub use state::{AccountProvider, StateProvider, StateProviderFactory}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ffca1fe750..a28d95de4b 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -782,6 +782,14 @@ impl TransactionSignedEcRecovered { self.signer } + /// Create [TransactionSignedEcRecovered] from [TransactionSigned] and author of transaction + pub fn from_signed_transactions_and_signer( + signed_transaction: TransactionSigned, + signer: Address, + ) -> Self { + Self { signed_transaction, signer } + } + /// Transform back to [`TransactionSigned`] pub fn into_signed(self) -> TransactionSigned { self.signed_transaction diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index eebcfb347d..28be13f19f 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -8,14 +8,20 @@ readme = "README.md" description = "Staged syncing primitives used in reth." [dependencies] +# reth libs reth-primitives = { path = "../primitives" } reth-interfaces = { path = "../interfaces" } reth-db = { path = "../db" } +reth-executor = { path = "../executor" } +reth-rlp = { path = "../common/rlp" } + +#async +tokio = { version = "1.21.2", features = ["sync"] } + async-trait = "0.1.57" thiserror = "1.0.37" tracing = "0.1.36" tracing-futures = "0.2.5" -tokio = { version = "1.21.2", features = ["sync"] } aquamarine = "0.1.12" metrics = "0.20.1" futures-util = "0.3.25" diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index 45a5c9133d..6ae8f3c2b8 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -1,6 +1,6 @@ use crate::pipeline::PipelineEvent; -use reth_interfaces::{consensus, db::Error as DbError}; -use reth_primitives::{BlockNumber, H256}; +use reth_interfaces::{consensus, db::Error as DbError, executor}; +use reth_primitives::{BlockNumber, TxNumber, H256}; use thiserror::Error; use tokio::sync::mpsc::error::SendError; @@ -19,6 +19,15 @@ pub enum StageError { /// The stage encountered a database error. #[error("An internal database error occurred: {0}")] Database(#[from] DbError), + #[error("Stage encountered a execution error in block {block}: {error}.")] + /// The stage encountered a execution error + ExecutionError { + /// The block that failed execution. + block: BlockNumber, + /// The underlying execution error. + #[source] + error: executor::Error, + }, /// The stage encountered a database integrity error. #[error("A database integrity error occurred: {0}")] DatabaseIntegrity(#[from] DatabaseIntegrityError), @@ -34,6 +43,7 @@ pub enum StageError { /// A database integrity error. /// The sender stage error #[derive(Error, Debug)] +#[allow(missing_docs)] pub enum DatabaseIntegrityError { // TODO(onbjerg): What's the difference between this and the one below? /// The canonical hash for a block is missing from the database. @@ -70,6 +80,14 @@ pub enum DatabaseIntegrityError { /// The block number key number: BlockNumber, }, + #[error("Gap in transaction table. Missing tx number #{missing}.")] + TransactionsGap { missing: TxNumber }, + #[error("Gap in transaction signer table. Missing tx number #{missing}.")] + TransactionsSignerGap { missing: TxNumber }, + #[error("Got to the end of transaction table")] + EndOfTransactionTable, + #[error("Got to the end of transaction table")] + EndOfTransactionSenderTable, /// The total difficulty from the block header is missing. #[error("Total difficulty not found for block #{number}")] TotalDifficulty { diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs new file mode 100644 index 0000000000..d6e43f6000 --- /dev/null +++ b/crates/stages/src/stages/execution.rs @@ -0,0 +1,580 @@ +use crate::{ + db::StageDB, DatabaseIntegrityError, ExecInput, ExecOutput, Stage, StageError, StageId, + UnwindInput, UnwindOutput, +}; +use reth_executor::{ + config::SpecUpgrades, + executor::{AccountChangeSet, AccountInfoChangeSet}, + revm_wrap::{State, SubState}, + Config, +}; +use reth_interfaces::{ + db::{ + models::{AccountBeforeTx, BlockNumHash, TxNumberAddress}, + tables, Database, DbCursorRO, DbCursorRW, DbDupCursorRO, DbTx, DbTxMut, + }, + provider::db::StateProviderImplRefLatest, +}; +use reth_primitives::{Address, StorageEntry, TransactionSignedEcRecovered, H256, U256}; +use std::{fmt::Debug, ops::DerefMut}; + +const EXECUTION: StageId = StageId("Execution"); + +/// The execution stage executes all transactions and +/// update history indexes. +/// +/// Input tables: +/// [tables::CanonicalHeaders] get next block to execute. +/// [tables::Headers] get for revm environment variables. +/// [tables::CumulativeTxCount] to get tx number +/// [tables::Transactions] to execute +/// +/// For state access [StateProvider] provides us latest state and history state +/// For latest most recent state [StateProvider] would need (Used for execution Stage): +/// [tables::PlainAccountState] +/// [tables::Bytecodes] +/// [tables::PlainStorageState] +/// +/// Tables updated after state finishes execution: +/// [tables::PlainAccountState] +/// [tables::PlainStorageState] +/// [tables::Bytecodes] +/// [tables::AccountChangeSet] +/// [tables::StorageChangeSet] +/// +/// For unwinds we are accessing: +/// [tables::CumulativeTxCount] get tx index to know what needs to be unwinded +/// [tables::AccountHistory] to remove change set and apply old values to +/// [tables::PlainAccountState] [tables::StorageHistory] to remove change set and apply old values +/// to [tables::PlainStorageState] +#[derive(Debug)] +pub struct ExecutionStage; + +/// Specify batch sizes of block in execution +/// TODO make this as config +const BATCH_SIZE: u64 = 1000; + +#[async_trait::async_trait] +impl Stage for ExecutionStage { + /// Return the id of the stage + fn id(&self) -> StageId { + EXECUTION + } + + /// Execute the stage + async fn execute( + &mut self, + db: &mut StageDB<'_, DB>, + input: ExecInput, + ) -> Result { + let db_tx = db.deref_mut(); + // none and zero are same as for genesis block (zeroed block) we are making assumption to + // not have transaction. + let last_block = input.stage_progress.unwrap_or_default(); + let start_block = last_block + 1; + + // Get next canonical block hashes to execute. + let mut canonicals = db_tx.cursor::()?; + // Get header with canonical hashes. + let mut headers = db_tx.cursor::()?; + // Get bodies (to get tx index) with canonical hashes. + let mut cumulative_tx_count = db_tx.cursor::()?; + // Get transaction of the block that we are executing. + let mut tx = db_tx.cursor::()?; + // Skip sender recovery and load signer from database. + let mut tx_sender = db_tx.cursor::()?; + + // get canonical blocks (num,hash) + let canonical_batch = canonicals + .walk(start_block)? + .take(BATCH_SIZE as usize) + .map(|i| i.map(BlockNumHash)) + .collect::, _>>()?; + + // no more canonical blocks, we are done with execution. + if canonical_batch.is_empty() { + return Ok(ExecOutput { done: true, reached_tip: true, stage_progress: last_block }) + } + + // get headers from canonical numbers + let headers_batch = canonical_batch + .iter() + .map(|ch_index| { + // TODO see if walker next has better performance then seek_exact calls. + headers.seek_exact(*ch_index).map_err(StageError::Database).and_then(|res| { + res.ok_or_else(|| { + DatabaseIntegrityError::Header { + number: ch_index.number(), + hash: ch_index.hash(), + } + .into() + }) + .map(|(_, header)| header) + }) + }) + .collect::, _>>()?; + + // get last tx count so that we can know amount of transaction in the block. + let mut last_tx_count = if last_block == 0 { + 0u64 + } else { + // headers_batch is not empty, + let parent_hash = headers_batch[0].parent_hash; + + let (_, tx_cnt) = cumulative_tx_count + .seek_exact(BlockNumHash((last_block, parent_hash)))? + .ok_or(DatabaseIntegrityError::CumulativeTxCount { + number: last_block, + hash: parent_hash, + })?; + tx_cnt + }; + + let cumulative_tx_count_batch = canonical_batch + .iter() + .map(|ch_index| { + // TODO see if walker next has better performance then seek_exact calls. + cumulative_tx_count + .seek_exact(*ch_index) + .map_err(StageError::Database) + .and_then(|res| { + res.ok_or_else(|| { + DatabaseIntegrityError::CumulativeTxCount { + number: ch_index.number(), + hash: ch_index.hash(), + } + .into() + }) + }) + .map(|(_, cumulative_tx_count)| { + let ret = (last_tx_count, cumulative_tx_count); + last_tx_count = cumulative_tx_count; + ret + }) + }) + .collect::, _>>()?; + + // Fetch transactions, execute them and generate results + let mut block_change_patches = Vec::with_capacity(canonical_batch.len()); + for (header, body_range) in headers_batch.iter().zip(cumulative_tx_count_batch.iter()) { + let start_tx_index = body_range.0; + let end_tx_index = body_range.1; + let body_tx_cnt = end_tx_index - start_tx_index; + // iterate over all transactions + let mut tx_walker = tx.walk(start_tx_index)?; + let mut transactions = Vec::with_capacity(body_tx_cnt as usize); + // get next N transactions. + for index in start_tx_index..end_tx_index { + let (tx_index, tx) = + tx_walker.next().ok_or(DatabaseIntegrityError::EndOfTransactionTable)??; + if tx_index != index { + return Err(DatabaseIntegrityError::TransactionsGap { missing: tx_index }.into()) + } + transactions.push(tx); + } + + // take signers + let mut tx_sender_walker = tx_sender.walk(start_tx_index)?; + let mut signers = Vec::with_capacity(body_tx_cnt as usize); + for index in start_tx_index..end_tx_index { + let (tx_index, tx) = tx_sender_walker + .next() + .ok_or(DatabaseIntegrityError::EndOfTransactionSenderTable)??; + if tx_index != index { + return Err( + DatabaseIntegrityError::TransactionsSignerGap { missing: tx_index }.into() + ) + } + signers.push(tx); + } + // create ecRecovered transaction by matching tx and its signer + let recovered_transactions: Vec<_> = transactions + .into_iter() + .zip(signers.into_iter()) + .map(|(tx, signer)| { + TransactionSignedEcRecovered::from_signed_transactions_and_signer(tx, signer) + }) + .collect(); + + // for now use default eth config + let config = Config { chain_id: 1.into(), spec_upgrades: SpecUpgrades::new_ethereum() }; + + let mut state_provider = + SubState::new(State::new(StateProviderImplRefLatest::new(db_tx))); + + // executiong and store output to results + block_change_patches.push(( + reth_executor::executor::execute_and_verify_receipt( + header, + &recovered_transactions, + &config, + &mut state_provider, + ) + .map_err(|error| StageError::ExecutionError { block: header.number, error })?, + start_tx_index, + )); + } + + // apply changes to plain database. + for (results, start_tx_index) in block_change_patches.into_iter() { + for (index, result) in results.into_iter().enumerate() { + let tx_index = start_tx_index + index as u64; + // insert account change set + for (address, AccountChangeSet { account, wipe_storage, storage }) in + result.state_diff.into_iter() + { + match account { + AccountInfoChangeSet::Changed { old, new } => { + // insert old account in AccountChangeSet + // check for old != new was already done + db_tx.put::( + tx_index, + AccountBeforeTx { address, info: old }, + )?; + db_tx.put::(address, new)?; + } + AccountInfoChangeSet::Created { new } => { + // TODO put None accounts inside changeset when `AccountBeforeTx` get + // fixed + db_tx.put::(address, new)?; + } + AccountInfoChangeSet::Destroyed { old } => { + db_tx.delete::(address, None)?; + db_tx.put::( + tx_index, + AccountBeforeTx { address, info: old }, + )?; + } + AccountInfoChangeSet::NoChange => { + // do nothing storage account didn't change + } + } + + // wipe storage + if wipe_storage { + // TODO insert all changes to StorageChangeSet + db_tx.delete::(address, None)?; + } + // insert storage changeset + let storage_id = TxNumberAddress((tx_index, address)); + for (key, (old_value, new_value)) in storage { + let mut hkey = H256::zero(); + key.to_big_endian(&mut hkey.0); + + // insert into StorageChangeSet + db_tx.put::( + storage_id.clone(), + StorageEntry { key: hkey, value: old_value }, + )?; + + if new_value.is_zero() { + db_tx.delete::( + address, + Some(StorageEntry { key: hkey, value: old_value }), + )?; + } else { + db_tx.put::( + address, + StorageEntry { key: hkey, value: new_value }, + )?; + } + } + } + // insert bytecode + for (hash, bytecode) in result.new_bytecodes.into_iter() { + // make different types of bytecode. Checked and maybe even analyzed (needs to + // be packed). Currently save only raw bytes. + db_tx.put::( + hash, + bytecode.bytes()[..bytecode.len()].to_vec(), + )?; + + // NOTE: bytecode bytes are not inserted in change set and it stand in saparate + // table + } + } + } + + let last_block = last_block + canonical_batch.len() as u64; + let is_done = canonical_batch.len() < BATCH_SIZE as usize; + Ok(ExecOutput { done: is_done, reached_tip: true, stage_progress: last_block }) + } + + /// Unwind the stage. + async fn unwind( + &mut self, + db: &mut StageDB<'_, DB>, + input: UnwindInput, + ) -> Result> { + let unwind_from = input.stage_progress; + let unwind_to = input.unwind_to; + let _bad_block = input.bad_block; + + // get block body tx indexes + let db_tx = db.deref_mut(); + + // Get transaction of the block that we are executing. + let mut account_changeset = db_tx.cursor_dup_mut::()?; + // Skip sender recovery and load signer from database. + let mut storage_changeset = db_tx.cursor_dup_mut::()?; + + // get from tx_number + let unwind_from_hash = db_tx + .get::(unwind_from)? + .ok_or(DatabaseIntegrityError::CanonicalHeader { number: unwind_from })?; + + let from_tx_number = db_tx + .get::(BlockNumHash((unwind_from, unwind_from_hash)))? + .ok_or(DatabaseIntegrityError::CumulativeTxCount { + number: unwind_from, + hash: unwind_from_hash, + })?; + + // get to tx_number + let to_tx_number = if unwind_to == 0 { + 0 + } else { + let parent_number = unwind_to - 1; + let parent_hash = db_tx + .get::(parent_number)? + .ok_or(DatabaseIntegrityError::CanonicalHeader { number: parent_number })?; + + db_tx + .get::(BlockNumHash((parent_number, parent_hash)))? + .ok_or(DatabaseIntegrityError::CumulativeTxCount { + number: parent_number, + hash: parent_hash, + })? + }; + + if to_tx_number > from_tx_number { + panic!("Parents CumulativeTxCount {to_tx_number} is higer then present block #{unwind_to} TxCount {from_tx_number}") + } + let num_of_tx = (from_tx_number - to_tx_number) as usize; + + // if there is no transaction ids, this means blocks were empty and block reward change set + // is not present. + if num_of_tx == 0 { + return Ok(UnwindOutput { stage_progress: unwind_to }) + } + + // get all batches for account change + // Check if walk and walk_dup would do the same thing + let account_changeset_batch = account_changeset + .walk_dup(to_tx_number, Address::zero())? + .take(num_of_tx) + .collect::, _>>()?; + + // revert all changes to PlainState + for (_, changeset) in account_changeset_batch.into_iter().rev() { + db_tx.put::(changeset.address, changeset.info)?; + // TODO remove account if none when `AccountBeforeTx` get fixed + // if account.is_none() { + // delete account from plain state. Storage will be cleaned it is own way. + //} + } + + // get all batches for account change + let storage_chageset_batch = storage_changeset + .walk_dup(TxNumberAddress((to_tx_number, Address::zero())), H256::zero())? + .take(num_of_tx) + .collect::, _>>()?; + + // revert all changes to PlainStorage + for (TxNumberAddress((_, address)), storage) in storage_chageset_batch.into_iter().rev() { + db_tx.put::(address, storage.clone())?; + if storage.value == U256::zero() { + // delete value that is zero + db_tx.delete::(address, Some(storage))?; + } + } + + // Discard unwinded changesets + let mut entry = account_changeset.last()?; + while let Some((tx_number, _)) = entry { + if tx_number < to_tx_number { + break + } + account_changeset.delete_current()?; + entry = account_changeset.prev()?; + } + + let mut entry = storage_changeset.last()?; + while let Some((TxNumberAddress((tx_number, _)), _)) = entry { + if tx_number < to_tx_number { + break + } + storage_changeset.delete_current()?; + entry = storage_changeset.prev()?; + } + + Ok(UnwindOutput { stage_progress: input.unwind_to }) + } +} + +#[cfg(test)] +mod tests { + use std::ops::Deref; + + use super::*; + use reth_db::{ + kv::{test_utils::create_test_db, EnvKind}, + mdbx::WriteMap, + }; + use reth_interfaces::provider::insert_canonical_block; + use reth_primitives::{hex_literal::hex, keccak256, Account, BlockLocked, H160, U256}; + use reth_rlp::Decodable; + + #[tokio::test] + async fn sanity_execution_of_block() { + // TODO cleanup the setup after https://github.com/foundry-rs/reth/issues/332 + // is merged as it has similar framework + let state_db = create_test_db::(EnvKind::RW); + let mut db = StageDB::new(state_db.as_ref()).unwrap(); + let input = ExecInput { + previous_stage: None, + /// The progress of this stage the last time it was executed. + stage_progress: None, + }; + let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); + let genesis = BlockLocked::decode(&mut genesis_rlp).unwrap(); + let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); + let block = BlockLocked::decode(&mut block_rlp).unwrap(); + insert_canonical_block(db.deref_mut(), &genesis).unwrap(); + insert_canonical_block(db.deref_mut(), &block).unwrap(); + db.commit().unwrap(); + + // insert pre state + let tx = db.deref_mut(); + let acc1 = H160(hex!("1000000000000000000000000000000000000000")); + let acc2 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); + let code = hex!("5a465a905090036002900360015500"); + let balance = U256::from(0x3635c9adc5dea00000u128); + let code_hash = keccak256(code); + tx.put::( + acc1, + Account { nonce: 0, balance: 0.into(), bytecode_hash: Some(code_hash) }, + ) + .unwrap(); + tx.put::( + acc2, + Account { nonce: 0, balance, bytecode_hash: None }, + ) + .unwrap(); + tx.put::(code_hash, code.to_vec()).unwrap(); + db.commit().unwrap(); + + // execute + let output = ExecutionStage.execute(&mut db, input).await.unwrap(); + db.commit().unwrap(); + assert_eq!(output, ExecOutput { stage_progress: 1, done: true, reached_tip: true }); + let tx = db.deref_mut(); + // check post state + let account1 = H160(hex!("1000000000000000000000000000000000000000")); + let account1_info = + Account { balance: 0x00.into(), nonce: 0x00, bytecode_hash: Some(code_hash) }; + let account2 = H160(hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba")); + let account2_info = Account { + // TODO remove 2eth block reward + balance: (0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128).into(), + nonce: 0x00, + bytecode_hash: None, + }; + let account3 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); + let account3_info = + Account { balance: 0x3635c9adc5de996b46u128.into(), nonce: 0x01, bytecode_hash: None }; + + // assert accounts + assert_eq!( + tx.get::(account1), + Ok(Some(account1_info)), + "Post changed of a account" + ); + assert_eq!( + tx.get::(account2), + Ok(Some(account2_info)), + "Post changed of a account" + ); + assert_eq!( + tx.get::(account3), + Ok(Some(account3_info)), + "Post changed of a account" + ); + // assert storage + // Get on dupsort would return only first value. This is good enought for this test. + assert_eq!( + tx.get::(account1), + Ok(Some(StorageEntry { key: H256::from_low_u64_be(1), value: 2.into() })), + "Post changed of a account" + ); + } + + #[tokio::test] + async fn sanity_execute_unwind() { + // TODO cleanup the setup after https://github.com/foundry-rs/reth/issues/332 + // is merged as it has similar framework + + let state_db = create_test_db::(EnvKind::RW); + let mut db = StageDB::new(state_db.as_ref()).unwrap(); + let input = ExecInput { + previous_stage: None, + /// The progress of this stage the last time it was executed. + stage_progress: None, + }; + let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); + let genesis = BlockLocked::decode(&mut genesis_rlp).unwrap(); + let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); + let block = BlockLocked::decode(&mut block_rlp).unwrap(); + insert_canonical_block(db.deref_mut(), &genesis).unwrap(); + insert_canonical_block(db.deref_mut(), &block).unwrap(); + db.commit().unwrap(); + + // variables + let code = hex!("5a465a905090036002900360015500"); + let balance = U256::from(0x3635c9adc5dea00000u128); + let code_hash = keccak256(code); + // pre state + let tx = db.deref_mut(); + let acc1 = H160(hex!("1000000000000000000000000000000000000000")); + let acc1_info = Account { nonce: 0, balance: 0.into(), bytecode_hash: Some(code_hash) }; + let acc2 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); + let acc2_info = Account { nonce: 0, balance, bytecode_hash: None }; + + tx.put::(acc1, acc1_info).unwrap(); + tx.put::(acc2, acc2_info).unwrap(); + tx.put::(code_hash, code.to_vec()).unwrap(); + db.commit().unwrap(); + + // execute + let _ = ExecutionStage.execute(&mut db, input).await.unwrap(); + db.commit().unwrap(); + + let o = ExecutionStage + .unwind(&mut db, UnwindInput { stage_progress: 1, unwind_to: 0, bad_block: None }) + .await + .unwrap(); + + assert_eq!(o, UnwindOutput { stage_progress: 0 }); + + // assert unwind stage + let tx = db.deref(); + assert_eq!( + tx.get::(acc1), + Ok(Some(acc1_info)), + "Pre changed of a account" + ); + assert_eq!( + tx.get::(acc2), + Ok(Some(acc2_info)), + "Post changed of a account" + ); + + // TODO check this after Option is added to tables:: + // let acc3 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")); + // assert_eq!( + // tx.get::(acc3), + // Ok(Some(Account { balance: 0.into(), nonce: 0, bytecode_hash: Some(KECCAK_EMPTY) })), + // "Post changed of a account" + // ); + } +} diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index dd0fd39c3d..a273eb6374 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -1,5 +1,7 @@ /// The bodies stage. pub mod bodies; +/// The execution stage that generates state diff. +pub mod execution; /// The headers stage. pub mod headers; /// The sender recovery stage.