feat: refactor few stages to providers, introduce insert_block (#1474)

Co-authored-by: Georgios Konstantopoulos <me@gakonst.com>
This commit is contained in:
rakita
2023-03-01 21:42:45 +01:00
committed by GitHub
parent 6136e0deb4
commit 42e3f56108
19 changed files with 659 additions and 406 deletions

View File

@@ -0,0 +1,173 @@
//! Output of execution.
use reth_db::{models::AccountBeforeTx, tables, transaction::DbTxMut, Error as DbError};
use reth_primitives::{Account, Address, Receipt, H256, U256};
use revm_primitives::Bytecode;
use std::collections::BTreeMap;
/// Execution Result containing vector of transaction changesets
/// and block reward if present
#[derive(Debug)]
pub struct ExecutionResult {
/// Transaction changeset containing [Receipt], changed [Accounts][Account] and Storages.
pub tx_changesets: Vec<TransactionChangeSet>,
/// Post block account changesets. This might include block reward, uncle rewards, withdrawals
/// or irregular state changes (DAO fork).
pub block_changesets: BTreeMap<Address, AccountInfoChangeSet>,
}
/// After transaction is executed this structure contain
/// transaction [Receipt] every change to state ([Account], Storage, [Bytecode])
/// that this transaction made and its old values
/// so that history account table can be updated.
#[derive(Debug, Clone)]
pub struct TransactionChangeSet {
/// Transaction receipt
pub receipt: Receipt,
/// State change that this transaction made on state.
pub changeset: BTreeMap<Address, AccountChangeSet>,
/// new bytecode created as result of transaction execution.
pub new_bytecodes: BTreeMap<H256, Bytecode>,
}
/// Contains old/new account changes
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum AccountInfoChangeSet {
/// The account is newly created.
Created {
/// The newly created account.
new: Account,
},
/// An account was deleted (selfdestructed) or we have touched
/// an empty account and we need to remove/destroy it.
/// (Look at state clearing [EIP-158](https://eips.ethereum.org/EIPS/eip-158))
Destroyed {
/// The account that was destroyed.
old: Account,
},
/// The account was changed.
Changed {
/// The account after the change.
new: Account,
/// The account prior to the change.
old: Account,
},
/// Nothing was changed for the account (nonce/balance).
NoChange,
}
impl AccountInfoChangeSet {
/// Apply the changes from the changeset to a database transaction.
pub fn apply_to_db<'a, TX: DbTxMut<'a>>(
self,
tx: &TX,
address: Address,
tx_index: u64,
has_state_clear_eip: bool,
) -> Result<(), DbError> {
match self {
AccountInfoChangeSet::Changed { old, new } => {
// insert old account in AccountChangeSet
// check for old != new was already done
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: Some(old) },
)?;
tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Created { new } => {
// Ignore account that are created empty and state clear (SpuriousDragon) hardfork
// is activated.
if has_state_clear_eip && new.is_empty() {
return Ok(())
}
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: None },
)?;
tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Destroyed { old } => {
tx.delete::<tables::PlainAccountState>(address, None)?;
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: Some(old) },
)?;
}
AccountInfoChangeSet::NoChange => {
// do nothing storage account didn't change
}
}
Ok(())
}
}
/// Diff change set that is needed for creating history index and updating current world state.
#[derive(Debug, Clone)]
pub struct AccountChangeSet {
/// Old and New account account change.
pub account: AccountInfoChangeSet,
/// Storage containing key -> (OldValue,NewValue). in case that old value is not existing
/// we can expect to have U256::ZERO, same with new value.
pub storage: BTreeMap<U256, (U256, U256)>,
/// Just to make sure that we are taking selfdestruct cleaning we have this field that wipes
/// storage. There are instances where storage is changed but account is not touched, so we
/// can't take into account that if new account is None that it is selfdestruct.
pub wipe_storage: bool,
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use reth_db::{
database::Database,
mdbx::{test_utils, Env, EnvKind, WriteMap},
transaction::DbTx,
};
use reth_primitives::H160;
use super::*;
#[test]
fn apply_account_info_changeset() {
let db: Arc<Env<WriteMap>> = test_utils::create_test_db(EnvKind::RW);
let address = H160::zero();
let tx_num = 0;
let acc1 = Account { balance: U256::from(1), nonce: 2, bytecode_hash: Some(H256::zero()) };
let acc2 = Account { balance: U256::from(3), nonce: 4, bytecode_hash: Some(H256::zero()) };
let tx = db.tx_mut().unwrap();
// check Changed changeset
AccountInfoChangeSet::Changed { new: acc1, old: acc2 }
.apply_to_db(&tx, address, tx_num, true)
.unwrap();
assert_eq!(
tx.get::<tables::AccountChangeSet>(tx_num),
Ok(Some(AccountBeforeTx { address, info: Some(acc2) }))
);
assert_eq!(tx.get::<tables::PlainAccountState>(address), Ok(Some(acc1)));
AccountInfoChangeSet::Created { new: acc1 }
.apply_to_db(&tx, address, tx_num, true)
.unwrap();
assert_eq!(
tx.get::<tables::AccountChangeSet>(tx_num),
Ok(Some(AccountBeforeTx { address, info: None }))
);
assert_eq!(tx.get::<tables::PlainAccountState>(address), Ok(Some(acc1)));
// delete old value, as it is dupsorted
tx.delete::<tables::AccountChangeSet>(tx_num, None).unwrap();
AccountInfoChangeSet::Destroyed { old: acc2 }
.apply_to_db(&tx, address, tx_num, true)
.unwrap();
assert_eq!(tx.get::<tables::PlainAccountState>(address), Ok(None));
assert_eq!(
tx.get::<tables::AccountChangeSet>(tx_num),
Ok(Some(AccountBeforeTx { address, info: Some(acc2) }))
);
}
}

View File

@@ -22,6 +22,12 @@ pub use providers::{
LatestStateProviderRef, ShareableDatabase,
};
/// Merkle trie
pub mod trie;
/// Execution result
pub mod execution_result;
/// Helper types for interacting with the database
mod transaction;
pub use transaction::{Transaction, TransactionError};

View File

@@ -1,19 +1,36 @@
#![allow(dead_code)]
use itertools::Itertools;
use reth_db::{
cursor::DbCursorRO,
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
database::{Database, DatabaseGAT},
models::StoredBlockBody,
models::{
sharded_key,
storage_sharded_key::{self, StorageShardedKey},
ShardedKey, StoredBlockBody, TransitionIdAddress,
},
table::Table,
tables,
transaction::{DbTx, DbTxMut},
TransitionList,
};
use reth_interfaces::{db::Error as DbError, provider::ProviderError};
use reth_primitives::{BlockHash, BlockNumber, Header, TransitionId, TxNumber, U256};
use reth_primitives::{
keccak256, Account, Address, BlockHash, BlockNumber, ChainSpec, Hardfork, Header, SealedBlock,
StorageEntry, TransitionId, TxNumber, H256, U256,
};
use reth_tracing::tracing::{info, trace};
use std::{
collections::{BTreeMap, BTreeSet},
fmt::Debug,
ops::{Deref, DerefMut},
};
use crate::{
insert_canonical_block,
trie::{DBTrieLoader, TrieError},
};
use crate::execution_result::{AccountChangeSet, ExecutionResult};
/// A container for any DB transaction that will open a new inner transaction when the current
/// one is committed.
// NOTE: This container is needed since `Transaction::commit` takes `mut self`, so methods in
@@ -71,6 +88,11 @@ where
Ok(Self { db, tx: Some(db.tx_mut()?) })
}
/// Creates a new container with given database and transaction handles.
pub fn new_raw(db: &'this DB, tx: <DB as DatabaseGAT<'this>>::TXMut) -> Self {
Self { db, tx: Some(tx) }
}
/// Accessor to the internal Database
pub fn inner(&self) -> &'this DB {
self.db
@@ -213,6 +235,505 @@ where
}
Ok(())
}
/// Load last shard and check if it is full and remove if it is not. If list is empty, last
/// shard was full or there is no shards at all.
fn take_last_account_shard(&self, address: Address) -> Result<Vec<u64>, TransactionError> {
let mut cursor = self.cursor_read::<tables::AccountHistory>()?;
let last = cursor.seek_exact(ShardedKey::new(address, u64::MAX))?;
if let Some((shard_key, list)) = last {
// delete old shard so new one can be inserted.
self.delete::<tables::AccountHistory>(shard_key, None)?;
let list = list.iter(0).map(|i| i as u64).collect::<Vec<_>>();
return Ok(list)
}
Ok(Vec::new())
}
/// Load last shard and check if it is full and remove if it is not. If list is empty, last
/// shard was full or there is no shards at all.
pub fn take_last_storage_shard(
&self,
address: Address,
storage_key: H256,
) -> Result<Vec<u64>, TransactionError> {
let mut cursor = self.cursor_read::<tables::StorageHistory>()?;
let last = cursor.seek_exact(StorageShardedKey::new(address, storage_key, u64::MAX))?;
if let Some((storage_shard_key, list)) = last {
// delete old shard so new one can be inserted.
self.delete::<tables::StorageHistory>(storage_shard_key, None)?;
let list = list.iter(0).map(|i| i as u64).collect::<Vec<_>>();
return Ok(list)
}
Ok(Vec::new())
}
}
/// Stages impl
impl<'this, DB> Transaction<'this, DB>
where
DB: Database,
{
/// Insert full block and make it canonical
///
/// This is atomic operation and transaction will do one commit at the end of the function.
pub fn insert_block(
&mut self,
block: &SealedBlock,
chain_spec: &ChainSpec,
changeset: ExecutionResult,
) -> Result<(), TransactionError> {
// Header, Body, SenderRecovery, TD, TxLookup stages
let (from, to) = insert_canonical_block(self.deref_mut(), block, false).unwrap();
let parent_block_number = block.number - 1;
// execution stage
self.insert_execution_result(vec![changeset], chain_spec, parent_block_number)?;
// storage hashing stage
{
let lists = self.get_addresses_and_keys_of_changed_storages(from, to)?;
let storages = self.get_plainstate_storages(lists.into_iter())?;
self.insert_storage_for_hashing(storages.into_iter())?;
}
// account hashing stage
{
let lists = self.get_addresses_of_changed_accounts(from, to)?;
let accounts = self.get_plainstate_accounts(lists.into_iter())?;
self.insert_account_for_hashing(accounts.into_iter())?;
}
// merkle tree
{
let current_root = self.get_header(parent_block_number)?.state_root;
let loader = DBTrieLoader::default();
let root = loader.update_root(self, current_root, from..to)?;
if root != block.state_root {
return Err(TransactionError::StateTrieRootMismatch {
got: root,
expected: block.state_root,
block_number: block.number,
block_hash: block.hash(),
})
}
}
// account history stage
{
let indices = self.get_account_transition_ids_from_changeset(from, to)?;
self.insert_account_history_index(indices)?;
}
// storage history stage
{
let indices = self.get_storage_transition_ids_from_changeset(from, to)?;
self.insert_storage_history_index(indices)?;
}
// commit block to database
self.commit()?;
Ok(())
}
/// Iterate over account changesets and return all account address that were changed.
pub fn get_addresses_and_keys_of_changed_storages(
&self,
from: TransitionId,
to: TransitionId,
) -> Result<BTreeMap<Address, BTreeSet<H256>>, TransactionError> {
Ok(self
.cursor_read::<tables::StorageChangeSet>()?
.walk_range(
TransitionIdAddress((from, Address::zero()))..
TransitionIdAddress((to, Address::zero())),
)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
// fold all storages and save its old state so we can remove it from HashedStorage
// it is needed as it is dup table.
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, BTreeSet<H256>>,
(TransitionIdAddress((_, address)), storage_entry)| {
accounts.entry(address).or_default().insert(storage_entry.key);
accounts
},
))
}
/// Get plainstate storages
#[allow(clippy::type_complexity)]
pub fn get_plainstate_storages(
&self,
iter: impl IntoIterator<Item = (Address, impl IntoIterator<Item = H256>)>,
) -> Result<Vec<(Address, Vec<(H256, U256)>)>, TransactionError> {
let mut plain_storage = self.cursor_dup_read::<tables::PlainStorageState>()?;
iter.into_iter()
.map(|(address, storage)| {
storage
.into_iter()
.map(|key| -> Result<_, TransactionError> {
let ret = plain_storage
.seek_by_key_subkey(address, key)?
.filter(|v| v.key == key)
.unwrap_or_default();
Ok((key, ret.value))
})
.collect::<Result<Vec<(_, _)>, _>>()
.map(|storage| (address, storage))
})
.collect::<Result<Vec<(_, _)>, _>>()
}
/// iterate over storages and insert them to hashing table
pub fn insert_storage_for_hashing(
&self,
storages: impl IntoIterator<Item = (Address, impl IntoIterator<Item = (H256, U256)>)>,
) -> Result<(), TransactionError> {
// hash values
let hashed = storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| {
let storage = storage.into_iter().fold(BTreeMap::new(), |mut map, (key, value)| {
map.insert(keccak256(key), value);
map
});
map.insert(keccak256(address), storage);
map
});
let mut hashed_storage = self.cursor_dup_write::<tables::HashedStorage>()?;
// Hash the address and key and apply them to HashedStorage (if Storage is None
// just remove it);
hashed.into_iter().try_for_each(|(hashed_address, storage)| {
storage.into_iter().try_for_each(|(key, value)| -> Result<(), TransactionError> {
if hashed_storage
.seek_by_key_subkey(hashed_address, key)?
.filter(|entry| entry.key == key)
.is_some()
{
hashed_storage.delete_current()?;
}
if value != U256::ZERO {
hashed_storage.upsert(hashed_address, StorageEntry { key, value })?;
}
Ok(())
})
})?;
Ok(())
}
/// Iterate over account changesets and return all account address that were changed.
pub fn get_addresses_of_changed_accounts(
&self,
from: TransitionId,
to: TransitionId,
) -> Result<BTreeSet<Address>, TransactionError> {
Ok(self
.cursor_read::<tables::AccountChangeSet>()?
.walk_range(from..to)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
// fold all account to one set of changed accounts
.fold(BTreeSet::new(), |mut accounts: BTreeSet<Address>, (_, account_before)| {
accounts.insert(account_before.address);
accounts
}))
}
/// Get plainstate account from iterator
pub fn get_plainstate_accounts(
&self,
iter: impl IntoIterator<Item = Address>,
) -> Result<Vec<(Address, Option<Account>)>, TransactionError> {
let mut plain_accounts = self.cursor_read::<tables::PlainAccountState>()?;
Ok(iter
.into_iter()
.map(|address| plain_accounts.seek_exact(address).map(|a| (address, a.map(|(_, v)| v))))
.collect::<Result<Vec<_>, _>>()?)
}
/// iterate over accounts and insert them to hashing table
pub fn insert_account_for_hashing(
&self,
accounts: impl IntoIterator<Item = (Address, Option<Account>)>,
) -> Result<(), TransactionError> {
let mut hashed_accounts = self.cursor_write::<tables::HashedAccount>()?;
let hashes_accounts = accounts.into_iter().fold(
BTreeMap::new(),
|mut map: BTreeMap<H256, Option<Account>>, (address, account)| {
map.insert(keccak256(address), account);
map
},
);
hashes_accounts.into_iter().try_for_each(
|(hashed_address, account)| -> Result<(), TransactionError> {
if let Some(account) = account {
hashed_accounts.upsert(hashed_address, account)?
} else if hashed_accounts.seek_exact(hashed_address)?.is_some() {
hashed_accounts.delete_current()?;
}
Ok(())
},
)?;
Ok(())
}
/// Get all transaction ids where account got changed.
pub fn get_storage_transition_ids_from_changeset(
&self,
from: TransitionId,
to: TransitionId,
) -> Result<BTreeMap<(Address, H256), Vec<u64>>, TransactionError> {
let storage_changeset = self
.cursor_read::<tables::StorageChangeSet>()?
.walk(Some((from, Address::zero()).into()))?
.take_while(|res| res.as_ref().map(|(k, _)| k.transition_id() < to).unwrap_or_default())
.collect::<Result<Vec<_>, _>>()?;
// fold all storages to one set of changes
let storage_changeset_lists = storage_changeset.into_iter().fold(
BTreeMap::new(),
|mut storages: BTreeMap<(Address, H256), Vec<u64>>, (index, storage)| {
storages
.entry((index.address(), storage.key))
.or_default()
.push(index.transition_id());
storages
},
);
Ok(storage_changeset_lists)
}
/// Get all transaction ids where account got changed.
pub fn get_account_transition_ids_from_changeset(
&self,
from: TransitionId,
to: TransitionId,
) -> Result<BTreeMap<Address, Vec<u64>>, TransactionError> {
let account_changesets = self
.cursor_read::<tables::AccountChangeSet>()?
.walk(Some(from))?
.take_while(|res| res.as_ref().map(|(k, _)| *k < to).unwrap_or_default())
.collect::<Result<Vec<_>, _>>()?;
let account_transtions = account_changesets
.into_iter()
// fold all account to one set of changed accounts
.fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, Vec<u64>>, (index, account)| {
accounts.entry(account.address).or_default().push(index);
accounts
},
);
Ok(account_transtions)
}
/// Insert storage change index to database. Used inside StorageHistoryIndex stage
pub fn insert_storage_history_index(
&self,
storage_transitions: BTreeMap<(Address, H256), Vec<u64>>,
) -> Result<(), TransactionError> {
for ((address, storage_key), mut indices) in storage_transitions {
let mut last_shard = self.take_last_storage_shard(address, storage_key)?;
last_shard.append(&mut indices);
// chunk indices and insert them in shards of N size.
let mut chunks = last_shard
.iter()
.chunks(storage_sharded_key::NUM_OF_INDICES_IN_SHARD)
.into_iter()
.map(|chunks| chunks.map(|i| *i as usize).collect::<Vec<usize>>())
.collect::<Vec<_>>();
let last_chunk = chunks.pop();
// chunk indices and insert them in shards of N size.
chunks.into_iter().try_for_each(|list| {
self.put::<tables::StorageHistory>(
StorageShardedKey::new(
address,
storage_key,
*list.last().expect("Chuck does not return empty list") as TransitionId,
),
TransitionList::new(list).expect("Indices are presorted and not empty"),
)
})?;
// Insert last list with u64::MAX
if let Some(last_list) = last_chunk {
self.put::<tables::StorageHistory>(
StorageShardedKey::new(address, storage_key, u64::MAX),
TransitionList::new(last_list).expect("Indices are presorted and not empty"),
)?;
}
}
Ok(())
}
/// Insert account change index to database. Used inside AccountHistoryIndex stage
pub fn insert_account_history_index(
&self,
account_transitions: BTreeMap<Address, Vec<u64>>,
) -> Result<(), TransactionError> {
// insert indexes to AccountHistory.
for (address, mut indices) in account_transitions {
let mut last_shard = self.take_last_account_shard(address)?;
last_shard.append(&mut indices);
// chunk indices and insert them in shards of N size.
let mut chunks = last_shard
.iter()
.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD)
.into_iter()
.map(|chunks| chunks.map(|i| *i as usize).collect::<Vec<usize>>())
.collect::<Vec<_>>();
let last_chunk = chunks.pop();
chunks.into_iter().try_for_each(|list| {
self.put::<tables::AccountHistory>(
ShardedKey::new(
address,
*list.last().expect("Chuck does not return empty list") as TransitionId,
),
TransitionList::new(list).expect("Indices are presorted and not empty"),
)
})?;
// Insert last list with u64::MAX
if let Some(last_list) = last_chunk {
self.put::<tables::AccountHistory>(
ShardedKey::new(address, u64::MAX),
TransitionList::new(last_list).expect("Indices are presorted and not empty"),
)?
}
}
Ok(())
}
/// Used inside execution stage to commit created account storage changesets for transaction or
/// block state change.
pub fn insert_execution_result(
&self,
changesets: Vec<ExecutionResult>,
chain_spec: &ChainSpec,
parent_block_number: u64,
) -> Result<(), TransactionError> {
// Get last tx count so that we can know amount of transaction in the block.
let mut current_transition_id = self
.get::<tables::BlockTransitionIndex>(parent_block_number)?
.ok_or(ProviderError::BlockTransition { block_number: parent_block_number })?;
info!(target: "sync::stages::execution", current_transition_id, blocks = changesets.len(), "Inserting execution results");
// apply changes to plain database.
let mut block_number = parent_block_number;
for results in changesets.into_iter() {
block_number += 1;
let spurious_dragon_active =
chain_spec.fork(Hardfork::SpuriousDragon).active_at_block(block_number);
// insert state change set
for result in results.tx_changesets.into_iter() {
for (address, account_change_set) in result.changeset.into_iter() {
let AccountChangeSet { account, wipe_storage, storage } = account_change_set;
// apply account change to db. Updates AccountChangeSet and PlainAccountState
// tables.
trace!(target: "sync::stages::execution", ?address, current_transition_id, ?account, wipe_storage, "Applying account changeset");
account.apply_to_db(
&**self,
address,
current_transition_id,
spurious_dragon_active,
)?;
let storage_id = TransitionIdAddress((current_transition_id, address));
// cast key to H256 and trace the change
let storage = storage
.into_iter()
.map(|(key, (old_value,new_value))| {
let hkey = H256(key.to_be_bytes());
trace!(target: "sync::stages::execution", ?address, current_transition_id, ?hkey, ?old_value, ?new_value, "Applying storage changeset");
(hkey, old_value,new_value)
})
.collect::<Vec<_>>();
let mut cursor_storage_changeset =
self.cursor_write::<tables::StorageChangeSet>()?;
cursor_storage_changeset.seek_exact(storage_id)?;
if wipe_storage {
// iterate over storage and save them before entry is deleted.
self.cursor_read::<tables::PlainStorageState>()?
.walk(Some(address))?
.take_while(|res| {
res.as_ref().map(|(k, _)| *k == address).unwrap_or_default()
})
.try_for_each(|entry| {
let (_, old_value) = entry?;
cursor_storage_changeset.append(storage_id, old_value)
})?;
// delete all entries
self.delete::<tables::PlainStorageState>(address, None)?;
// insert storage changeset
for (key, _, new_value) in storage {
// old values are already cleared.
if new_value != U256::ZERO {
self.put::<tables::PlainStorageState>(
address,
StorageEntry { key, value: new_value },
)?;
}
}
} else {
// insert storage changeset
for (key, old_value, new_value) in storage {
let old_entry = StorageEntry { key, value: old_value };
let new_entry = StorageEntry { key, value: new_value };
// insert into StorageChangeSet
cursor_storage_changeset.append(storage_id, old_entry)?;
// Always delete old value as duplicate table, put will not override it
self.delete::<tables::PlainStorageState>(address, Some(old_entry))?;
if new_value != U256::ZERO {
self.put::<tables::PlainStorageState>(address, new_entry)?;
}
}
}
}
// insert bytecode
for (hash, bytecode) in result.new_bytecodes.into_iter() {
// make different types of bytecode. Checked and maybe even analyzed (needs to
// be packed). Currently save only raw bytes.
let bytecode = bytecode.bytes();
trace!(target: "sync::stages::execution", ?hash, ?bytecode, len = bytecode.len(), "Inserting bytecode");
self.put::<tables::Bytecodes>(hash, bytecode[..bytecode.len()].to_vec())?;
// NOTE: bytecode bytes are not inserted in change set and can be found in
// separate table
}
current_transition_id += 1;
}
// If there are any post block changes, we will add account changesets to db.
for (address, changeset) in results.block_changesets.into_iter() {
trace!(target: "sync::stages::execution", ?address, current_transition_id, "Applying block reward");
changeset.apply_to_db(
&**self,
address,
current_transition_id,
spurious_dragon_active,
)?;
}
current_transition_id += 1;
}
Ok(())
}
}
/// An error that can occur when using the transaction container
@@ -224,4 +745,19 @@ pub enum TransactionError {
/// The transaction encountered a database integrity error.
#[error("A database integrity error occurred: {0}")]
DatabaseIntegrity(#[from] ProviderError),
/// The transaction encountered merkle trie error.
#[error("Merkle trie calculation error: {0}")]
MerkleTrie(#[from] TrieError),
/// Root mismatch
#[error("Merkle trie root mismatch on block: #{block_number:?} {block_hash:?}. got: {got:?} expected:{got:?}")]
StateTrieRootMismatch {
/// Expected root
expected: H256,
/// Calculated root
got: H256,
/// Block number
block_number: BlockNumber,
/// Block hash
block_hash: BlockHash,
},
}

View File

@@ -0,0 +1,670 @@
use crate::Transaction;
use cita_trie::{PatriciaTrie, Trie};
use hasher::HasherKeccak;
use reth_db::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
database::Database,
models::{AccountBeforeTx, TransitionIdAddress},
tables,
transaction::{DbTx, DbTxMut},
};
use reth_primitives::{
keccak256, proofs::EMPTY_ROOT, Account, Address, StorageEntry, StorageTrieEntry, TransitionId,
H256, KECCAK_EMPTY, U256,
};
use reth_rlp::{
encode_fixed_size, Decodable, DecodeError, Encodable, RlpDecodable, RlpEncodable,
EMPTY_STRING_CODE,
};
use reth_tracing::tracing::*;
use std::{
collections::{BTreeMap, BTreeSet},
ops::Range,
sync::Arc,
};
/// Merkle Trie error types
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
pub enum TrieError {
#[error("Some error occurred: {0}")]
InternalError(#[from] cita_trie::TrieError),
#[error("The root node wasn't found in the DB")]
MissingRoot(H256),
#[error("{0:?}")]
DatabaseError(#[from] reth_db::Error),
#[error("{0:?}")]
DecodeError(#[from] DecodeError),
}
/// Database wrapper implementing HashDB trait.
struct HashDatabase<'tx, 'itx, DB: Database> {
tx: &'tx Transaction<'itx, DB>,
}
impl<'tx, 'itx, DB> cita_trie::DB for HashDatabase<'tx, 'itx, DB>
where
DB: Database,
{
type Error = TrieError;
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.tx.get::<tables::AccountsTrie>(H256::from_slice(key))?)
}
fn contains(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(<Self as cita_trie::DB>::get(self, key)?.is_some())
}
fn insert(&self, _key: Vec<u8>, _value: Vec<u8>) -> Result<(), Self::Error> {
unreachable!("Use batch instead.");
}
// Insert a batch of data into the cache.
fn insert_batch(&self, keys: Vec<Vec<u8>>, values: Vec<Vec<u8>>) -> Result<(), Self::Error> {
let mut cursor = self.tx.cursor_write::<tables::AccountsTrie>()?;
for (key, value) in keys.into_iter().zip(values.into_iter()) {
cursor.upsert(H256::from_slice(key.as_slice()), value)?;
}
Ok(())
}
fn remove_batch(&self, keys: &[Vec<u8>]) -> Result<(), Self::Error> {
let mut cursor = self.tx.cursor_write::<tables::AccountsTrie>()?;
for key in keys {
if cursor.seek_exact(H256::from_slice(key.as_slice()))?.is_some() {
cursor.delete_current()?;
}
}
Ok(())
}
fn remove(&self, _key: &[u8]) -> Result<(), Self::Error> {
unreachable!("Use batch instead.");
}
fn flush(&self) -> Result<(), Self::Error> {
Ok(())
}
}
impl<'tx, 'itx, DB: Database> HashDatabase<'tx, 'itx, DB> {
/// Instantiates a new Database for the accounts trie, with an empty root
fn new(tx: &'tx Transaction<'itx, DB>) -> Result<Self, TrieError> {
let root = EMPTY_ROOT;
if tx.get::<tables::AccountsTrie>(root)?.is_none() {
tx.put::<tables::AccountsTrie>(root, [EMPTY_STRING_CODE].to_vec())?;
}
Ok(Self { tx })
}
/// Instantiates a new Database for the accounts trie, with an existing root
fn from_root(tx: &'tx Transaction<'itx, DB>, root: H256) -> Result<Self, TrieError> {
if root == EMPTY_ROOT {
return Self::new(tx)
}
tx.get::<tables::AccountsTrie>(root)?.ok_or(TrieError::MissingRoot(root))?;
Ok(Self { tx })
}
}
/// Database wrapper implementing HashDB trait.
struct DupHashDatabase<'tx, 'itx, DB: Database> {
tx: &'tx Transaction<'itx, DB>,
key: H256,
}
impl<'tx, 'itx, DB> cita_trie::DB for DupHashDatabase<'tx, 'itx, DB>
where
DB: Database,
{
type Error = TrieError;
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let mut cursor = self.tx.cursor_dup_read::<tables::StoragesTrie>()?;
let subkey = H256::from_slice(key);
Ok(cursor
.seek_by_key_subkey(self.key, subkey)?
.filter(|entry| entry.hash == subkey)
.map(|entry| entry.node))
}
fn contains(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(<Self as cita_trie::DB>::get(self, key)?.is_some())
}
fn insert(&self, _key: Vec<u8>, _value: Vec<u8>) -> Result<(), Self::Error> {
unreachable!("Use batch instead.");
}
/// Insert a batch of data into the cache.
fn insert_batch(&self, keys: Vec<Vec<u8>>, values: Vec<Vec<u8>>) -> Result<(), Self::Error> {
let mut cursor = self.tx.cursor_dup_write::<tables::StoragesTrie>()?;
for (key, node) in keys.into_iter().zip(values.into_iter()) {
let hash = H256::from_slice(key.as_slice());
if cursor.seek_by_key_subkey(self.key, hash)?.filter(|e| e.hash == hash).is_some() {
cursor.delete_current()?;
}
cursor.upsert(self.key, StorageTrieEntry { hash, node })?;
}
Ok(())
}
fn remove_batch(&self, keys: &[Vec<u8>]) -> Result<(), Self::Error> {
let mut cursor = self.tx.cursor_dup_write::<tables::StoragesTrie>()?;
for key in keys {
let hash = H256::from_slice(key.as_slice());
if cursor.seek_by_key_subkey(self.key, hash)?.filter(|e| e.hash == hash).is_some() {
cursor.delete_current()?;
}
}
Ok(())
}
fn remove(&self, _key: &[u8]) -> Result<(), Self::Error> {
unreachable!("Use batch instead.");
}
fn flush(&self) -> Result<(), Self::Error> {
Ok(())
}
}
impl<'tx, 'itx, DB: Database> DupHashDatabase<'tx, 'itx, DB> {
/// Instantiates a new Database for the storage trie, with an empty root
fn new(tx: &'tx Transaction<'itx, DB>, key: H256) -> Result<Self, TrieError> {
let root = EMPTY_ROOT;
let mut cursor = tx.cursor_dup_write::<tables::StoragesTrie>()?;
if cursor.seek_by_key_subkey(key, root)?.filter(|entry| entry.hash == root).is_none() {
tx.put::<tables::StoragesTrie>(
key,
StorageTrieEntry { hash: root, node: [EMPTY_STRING_CODE].to_vec() },
)?;
}
Ok(Self { tx, key })
}
/// Instantiates a new Database for the storage trie, with an existing root
fn from_root(tx: &'tx Transaction<'itx, DB>, key: H256, root: H256) -> Result<Self, TrieError> {
if root == EMPTY_ROOT {
return Self::new(tx, key)
}
tx.cursor_dup_read::<tables::StoragesTrie>()?
.seek_by_key_subkey(key, root)?
.filter(|entry| entry.hash == root)
.ok_or(TrieError::MissingRoot(root))?;
Ok(Self { tx, key })
}
}
/// An Ethereum account, for RLP encoding traits deriving.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)]
pub(crate) struct EthAccount {
/// Account nonce.
nonce: u64,
/// Account balance.
balance: U256,
/// Account's storage root.
storage_root: H256,
/// Hash of the account's bytecode.
code_hash: H256,
}
impl From<Account> for EthAccount {
fn from(acc: Account) -> Self {
EthAccount {
nonce: acc.nonce,
balance: acc.balance,
storage_root: EMPTY_ROOT,
code_hash: acc.bytecode_hash.unwrap_or(KECCAK_EMPTY),
}
}
}
impl EthAccount {
pub(crate) fn from_with_root(acc: Account, storage_root: H256) -> EthAccount {
Self { storage_root, ..Self::from(acc) }
}
}
/// Struct for calculating the root of a merkle patricia tree,
/// while populating the database with intermediate hashes.
#[derive(Debug, Default)]
pub struct DBTrieLoader;
impl DBTrieLoader {
/// Calculates the root of the state trie, saving intermediate hashes in the database.
pub fn calculate_root<DB: Database>(
&self,
tx: &Transaction<'_, DB>,
) -> Result<H256, TrieError> {
tx.clear::<tables::AccountsTrie>()?;
tx.clear::<tables::StoragesTrie>()?;
let mut accounts_cursor = tx.cursor_read::<tables::HashedAccount>()?;
let mut walker = accounts_cursor.walk(None)?;
let db = Arc::new(HashDatabase::new(tx)?);
let hasher = Arc::new(HasherKeccak::new());
let mut trie = PatriciaTrie::new(Arc::clone(&db), Arc::clone(&hasher));
while let Some((hashed_address, account)) = walker.next().transpose()? {
let value = EthAccount::from_with_root(
account,
self.calculate_storage_root(tx, hashed_address)?,
);
let mut out = Vec::new();
Encodable::encode(&value, &mut out);
trie.insert(hashed_address.as_bytes().to_vec(), out)?;
}
let root = H256::from_slice(trie.root()?.as_slice());
Ok(root)
}
fn calculate_storage_root<DB: Database>(
&self,
tx: &Transaction<'_, DB>,
address: H256,
) -> Result<H256, TrieError> {
let db = Arc::new(DupHashDatabase::new(tx, address)?);
let hasher = Arc::new(HasherKeccak::new());
let mut trie = PatriciaTrie::new(Arc::clone(&db), Arc::clone(&hasher));
let mut storage_cursor = tx.cursor_dup_read::<tables::HashedStorage>()?;
// Should be able to use walk_dup, but any call to next() causes an assert fail in mdbx.c
// let mut walker = storage_cursor.walk_dup(address, H256::zero())?;
let mut current = storage_cursor.seek_by_key_subkey(address, H256::zero())?;
while let Some(StorageEntry { key: storage_key, value }) = current {
let out = encode_fixed_size(&value).to_vec();
trie.insert(storage_key.to_vec(), out)?;
current = storage_cursor.next_dup()?.map(|(_, v)| v);
}
let root = H256::from_slice(trie.root()?.as_slice());
Ok(root)
}
/// Calculates the root of the state trie by updating an existing trie.
pub fn update_root<DB: Database>(
&self,
tx: &Transaction<'_, DB>,
root: H256,
tid_range: Range<TransitionId>,
) -> Result<H256, TrieError> {
let mut accounts_cursor = tx.cursor_read::<tables::HashedAccount>()?;
let changed_accounts = self.gather_changes(tx, tid_range)?;
let db = Arc::new(HashDatabase::from_root(tx, root)?);
let hasher = Arc::new(HasherKeccak::new());
let mut trie = PatriciaTrie::from(Arc::clone(&db), Arc::clone(&hasher), root.as_bytes())?;
for (address, changed_storages) in changed_accounts {
let storage_root = if let Some(account) = trie.get(address.as_slice())? {
trie.remove(address.as_bytes())?;
let storage_root = EthAccount::decode(&mut account.as_slice())?.storage_root;
self.update_storage_root(tx, storage_root, address, changed_storages)?
} else {
self.calculate_storage_root(tx, address)?
};
if let Some((_, account)) = accounts_cursor.seek_exact(address)? {
let value = EthAccount::from_with_root(account, storage_root);
let mut out = Vec::new();
Encodable::encode(&value, &mut out);
trie.insert(address.as_bytes().to_vec(), out)?;
}
}
let root = H256::from_slice(trie.root()?.as_slice());
Ok(root)
}
fn update_storage_root<DB: Database>(
&self,
tx: &Transaction<'_, DB>,
root: H256,
address: H256,
changed_storages: BTreeSet<H256>,
) -> Result<H256, TrieError> {
let db = Arc::new(DupHashDatabase::from_root(tx, address, root)?);
let hasher = Arc::new(HasherKeccak::new());
let mut trie = PatriciaTrie::from(Arc::clone(&db), Arc::clone(&hasher), root.as_bytes())?;
let mut storage_cursor = tx.cursor_dup_read::<tables::HashedStorage>()?;
for key in changed_storages {
if let Some(StorageEntry { value, .. }) =
storage_cursor.seek_by_key_subkey(address, key)?.filter(|e| e.key == key)
{
let out = encode_fixed_size(&value).to_vec();
trie.insert(key.as_bytes().to_vec(), out)?;
} else {
trie.remove(key.as_bytes())?;
}
}
let root = H256::from_slice(trie.root()?.as_slice());
Ok(root)
}
fn gather_changes<DB: Database>(
&self,
tx: &Transaction<'_, DB>,
tid_range: Range<TransitionId>,
) -> Result<BTreeMap<H256, BTreeSet<H256>>, TrieError> {
let mut account_cursor = tx.cursor_read::<tables::AccountChangeSet>()?;
let mut account_changes: BTreeMap<Address, BTreeSet<H256>> = BTreeMap::new();
let mut walker = account_cursor.walk_range(tid_range.clone())?;
while let Some((_, AccountBeforeTx { address, .. })) = walker.next().transpose()? {
account_changes.insert(address, Default::default());
}
let mut storage_cursor = tx.cursor_dup_read::<tables::StorageChangeSet>()?;
let start = TransitionIdAddress((tid_range.start, Address::zero()));
let end = TransitionIdAddress((tid_range.end, Address::zero()));
let mut walker = storage_cursor.walk_range(start..end)?;
while let Some((TransitionIdAddress((_, address)), StorageEntry { key, .. })) =
walker.next().transpose()?
{
account_changes.entry(address).or_default().insert(key);
}
let hashed_changes = account_changes
.into_iter()
.map(|(address, storage)| {
(keccak256(address), storage.into_iter().map(keccak256).collect())
})
.collect();
Ok(hashed_changes)
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use proptest::{prelude::ProptestConfig, proptest};
use reth_db::{mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut};
use reth_primitives::{
hex_literal::hex,
keccak256,
proofs::{genesis_state_root, KeccakHasher, EMPTY_ROOT},
Address, ChainSpec, MAINNET,
};
use std::{collections::HashMap, str::FromStr};
use triehash::sec_trie_root;
#[test]
fn empty_trie() {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
assert_matches!(trie.calculate_root(&tx), Ok(got) if got == EMPTY_ROOT);
}
#[test]
fn single_account_trie() {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let address = Address::from_str("9fe4abd71ad081f091bd06dd1c16f7e92927561e").unwrap();
let account = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None };
tx.put::<tables::HashedAccount>(keccak256(address), account).unwrap();
let mut encoded_account = Vec::new();
EthAccount::from(account).encode(&mut encoded_account);
let expected = H256(sec_trie_root::<KeccakHasher, _, _, _>([(address, encoded_account)]).0);
assert_matches!(
trie.calculate_root(&tx),
Ok(got) if got == expected
);
}
#[test]
fn two_accounts_trie() {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let accounts = [
(
Address::from(hex!("9fe4abd71ad081f091bd06dd1c16f7e92927561e")),
Account { nonce: 155, balance: U256::from(414241124), bytecode_hash: None },
),
(
Address::from(hex!("f8a6edaad4a332e6e550d0915a7fd5300b0b12d1")),
Account { nonce: 3, balance: U256::from(78978), bytecode_hash: None },
),
];
for (address, account) in accounts {
tx.put::<tables::HashedAccount>(keccak256(address), account).unwrap();
}
let encoded_accounts = accounts.iter().map(|(k, v)| {
let mut out = Vec::new();
EthAccount::from(*v).encode(&mut out);
(k, out)
});
let expected = H256(sec_trie_root::<KeccakHasher, _, _, _>(encoded_accounts).0);
assert_matches!(
trie.calculate_root(&tx),
Ok(got) if got == expected
);
}
#[test]
fn single_storage_trie() {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let address = Address::from_str("9fe4abd71ad081f091bd06dd1c16f7e92927561e").unwrap();
let hashed_address = keccak256(address);
let storage = Vec::from([(H256::from_low_u64_be(2), U256::from(1))]);
for (k, v) in storage.clone() {
tx.put::<tables::HashedStorage>(
hashed_address,
StorageEntry { key: keccak256(k), value: v },
)
.unwrap();
}
let encoded_storage = storage.iter().map(|(k, v)| {
let out = encode_fixed_size(v).to_vec();
(k, out)
});
let expected = H256(sec_trie_root::<KeccakHasher, _, _, _>(encoded_storage).0);
assert_matches!(
trie.calculate_storage_root(&tx, hashed_address),
Ok(got) if got == expected
);
}
#[test]
fn single_account_with_storage_trie() {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let address = Address::from_str("9fe4abd71ad081f091bd06dd1c16f7e92927561e").unwrap();
let hashed_address = keccak256(address);
let storage = HashMap::from([
(H256::zero(), U256::from(3)),
(H256::from_low_u64_be(2), U256::from(1)),
]);
let code = "el buen fla";
let account = Account {
nonce: 155,
balance: U256::from(414241124u32),
bytecode_hash: Some(keccak256(code)),
};
tx.put::<tables::HashedAccount>(hashed_address, account).unwrap();
for (k, v) in storage.clone() {
tx.put::<tables::HashedStorage>(
hashed_address,
StorageEntry { key: keccak256(k), value: v },
)
.unwrap();
}
let mut out = Vec::new();
let encoded_storage = storage.iter().map(|(k, v)| {
let out = encode_fixed_size(v).to_vec();
(k, out)
});
let eth_account = EthAccount::from_with_root(
account,
H256(sec_trie_root::<KeccakHasher, _, _, _>(encoded_storage).0),
);
eth_account.encode(&mut out);
let expected = H256(sec_trie_root::<KeccakHasher, _, _, _>([(address, out)]).0);
assert_matches!(
trie.calculate_root(&tx),
Ok(got) if got == expected
);
}
#[test]
fn verify_genesis() {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let mut tx = Transaction::new(db.as_ref()).unwrap();
let ChainSpec { genesis, .. } = MAINNET.clone();
// Insert account state
for (address, account) in &genesis.alloc {
tx.put::<tables::HashedAccount>(
keccak256(address),
Account {
nonce: account.nonce.unwrap_or_default(),
balance: account.balance,
bytecode_hash: None,
},
)
.unwrap();
}
tx.commit().unwrap();
let state_root = genesis_state_root(&genesis.alloc);
assert_matches!(
trie.calculate_root(&tx),
Ok(got) if got == state_root
);
}
#[test]
fn gather_changes() {
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let address = Address::from_str("9fe4abd71ad081f091bd06dd1c16f7e92927561e").unwrap();
let hashed_address = keccak256(address);
let storage = HashMap::from([
(H256::zero(), U256::from(3)),
(H256::from_low_u64_be(2), U256::from(1)),
]);
let code = "el buen fla";
let account = Account {
nonce: 155,
balance: U256::from(414241124u32),
bytecode_hash: Some(keccak256(code)),
};
tx.put::<tables::HashedAccount>(hashed_address, account).unwrap();
tx.put::<tables::AccountChangeSet>(31, AccountBeforeTx { address, info: None }).unwrap();
for (k, v) in storage {
tx.put::<tables::HashedStorage>(
hashed_address,
StorageEntry { key: keccak256(k), value: v },
)
.unwrap();
tx.put::<tables::StorageChangeSet>(
(32, address).into(),
StorageEntry { key: k, value: U256::ZERO },
)
.unwrap();
}
let expected = BTreeMap::from([(
hashed_address,
BTreeSet::from([keccak256(H256::zero()), keccak256(H256::from_low_u64_be(2))]),
)]);
assert_matches!(
DBTrieLoader::default().gather_changes(&tx, 32..33),
Ok(got) if got == expected
);
}
fn test_with_accounts(accounts: BTreeMap<Address, (Account, BTreeSet<StorageEntry>)>) {
let trie = DBTrieLoader::default();
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let encoded_accounts = accounts
.into_iter()
.map(|(address, (account, storage))| {
let hashed_address = keccak256(address);
tx.put::<tables::HashedAccount>(hashed_address, account).unwrap();
// This is to mimic real data. Only contract accounts have storage.
let storage_root = if account.has_bytecode() {
let encoded_storage = storage.into_iter().map(|StorageEntry { key, value }| {
let hashed_key = keccak256(key);
let out = encode_fixed_size(&value).to_vec();
tx.put::<tables::HashedStorage>(
hashed_address,
StorageEntry { key: hashed_key, value },
)
.unwrap();
(key, out)
});
H256(sec_trie_root::<KeccakHasher, _, _, _>(encoded_storage).0)
} else {
EMPTY_ROOT
};
let mut out = Vec::new();
EthAccount::from_with_root(account, storage_root).encode(&mut out);
(address, out)
})
.collect::<Vec<(Address, Vec<u8>)>>();
let expected = H256(sec_trie_root::<KeccakHasher, _, _, _>(encoded_accounts).0);
assert_matches!(
trie.calculate_root(&tx),
Ok(got) if got == expected
, "where expected is {expected:?}");
}
#[test]
fn arbitrary() {
proptest!(ProptestConfig::with_cases(10), |(accounts: BTreeMap<Address, (Account, BTreeSet<StorageEntry>)>)| {
test_with_accounts(accounts);
});
}
}

View File

@@ -4,34 +4,39 @@ use reth_db::{
transaction::{DbTx, DbTxMut},
};
use reth_interfaces::{provider::ProviderError, Result};
use reth_primitives::{SealedBlock, U256};
use reth_primitives::{SealedBlock, TransitionId, U256};
/// Insert block data into corresponding tables. Used mainly for testing & internal tooling.
///
///
/// Check parent dependency in [tables::HeaderNumbers] and in [tables::BlockBodies] tables.
/// Inserts blocks data to [tables::CanonicalHeaders], [tables::Headers], [tables::HeaderNumbers],
/// and transactions data to [tables::TxSenders], [tables::Transactions],
/// [tables::BlockBodies] and [tables::BlockBodies]
/// Inserts header data to [tables::CanonicalHeaders], [tables::Headers], [tables::HeaderNumbers].
/// and transactions data to [tables::TxSenders], [tables::Transactions], [tables::TxHashNumber].
/// and transition indexes to [tables::BlockTransitionIndex] and [tables::TxTransitionIndex].
/// And block data [tables::BlockBodies], [tables::BlockBodies] and [tables::BlockWithdrawals].
///
/// Return [TransitionId] `(from,to)`
pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
tx: &TX,
block: &SealedBlock,
has_block_reward: bool,
parent_tx_num_transition_id: Option<(u64, u64)>,
) -> Result<()> {
) -> Result<(TransitionId, TransitionId)> {
tx.put::<tables::CanonicalHeaders>(block.number, block.hash())?;
// Put header with canonical hashes.
tx.put::<tables::Headers>(block.number, block.header.as_ref().clone())?;
tx.put::<tables::HeaderNumbers>(block.hash(), block.number)?;
tx.put::<tables::HeaderTD>(
block.number,
if has_block_reward {
U256::ZERO
} else {
U256::from(58_750_000_000_000_000_000_000_u128) + block.difficulty
}
.into(),
)?;
// total difficulty
let ttd = if block.number == 0 {
U256::ZERO
} else {
let parent_block_number = block.number - 1;
let parent_ttd = tx.get::<tables::HeaderTD>(parent_block_number)?.unwrap_or_default();
parent_ttd.0 + block.difficulty
};
tx.put::<tables::HeaderTD>(block.number, ttd.into())?;
// insert body ommers data
if !block.ommers.is_empty() {
@@ -56,7 +61,7 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
.ok_or(ProviderError::BlockTransition { block_number: prev_block_num })?;
(prev_body.start_tx_id + prev_body.tx_count, last_transition_id)
};
let from_transition = transition_id;
// insert body data
tx.put::<tables::BlockBodies>(
block.number,
@@ -65,9 +70,11 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
for transaction in block.body.iter() {
let rec_tx = transaction.clone().into_ecrecovered().unwrap();
let hash = rec_tx.hash();
tx.put::<tables::TxSenders>(current_tx_id, rec_tx.signer())?;
tx.put::<tables::Transactions>(current_tx_id, rec_tx.into())?;
tx.put::<tables::TxTransitionIndex>(current_tx_id, transition_id)?;
tx.put::<tables::TxHashNumber>(hash, current_tx_id)?;
transition_id += 1;
current_tx_id += 1;
}
@@ -88,7 +95,8 @@ pub fn insert_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
}
tx.put::<tables::BlockTransitionIndex>(block.number, transition_id)?;
Ok(())
let to_transition = transition_id;
Ok((from_transition, to_transition))
}
/// Inserts canonical block in blockchain. Parent tx num and transition id is taken from
@@ -97,6 +105,6 @@ pub fn insert_canonical_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
tx: &TX,
block: &SealedBlock,
has_block_reward: bool,
) -> Result<()> {
) -> Result<(TransitionId, TransitionId)> {
insert_block(tx, block, has_block_reward, None)
}