feat(trie): state root (#2206)

Co-authored-by: Georgios Konstantopoulos <me@gakonst.com>
This commit is contained in:
Roman Krasiuk
2023-04-12 19:48:33 +03:00
committed by GitHub
parent d99befe818
commit 0759b30404
38 changed files with 2234 additions and 1651 deletions

33
Cargo.lock generated
View File

@@ -649,17 +649,6 @@ dependencies = [
"inout",
]
[[package]]
name = "cita_trie"
version = "4.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afe7baab47f510f52ca8dc9c0eb9082020c627c7f22285bea30edc3511f7ee29"
dependencies = [
"hasher",
"parking_lot 0.12.1",
"rlp",
]
[[package]]
name = "clang-sys"
version = "1.6.0"
@@ -2395,15 +2384,6 @@ dependencies = [
"serde",
]
[[package]]
name = "hasher"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbbba678b6567f27ce22870d951f4208e1dc2fef64993bd4521b1d497ef8a3aa"
dependencies = [
"tiny-keccak",
]
[[package]]
name = "hashers"
version = "1.0.1"
@@ -5001,24 +4981,18 @@ dependencies = [
name = "reth-provider"
version = "0.1.0"
dependencies = [
"assert_matches",
"auto_impl",
"cita_trie",
"hasher",
"itertools",
"parking_lot 0.12.1",
"proptest",
"reth-codecs",
"reth-db",
"reth-interfaces",
"reth-primitives",
"reth-revm-primitives",
"reth-rlp",
"reth-tracing",
"reth-trie",
"thiserror",
"tokio",
"tracing",
"triehash",
]
[[package]]
@@ -5262,6 +5236,7 @@ dependencies = [
"reth-provider",
"reth-revm",
"reth-rlp",
"reth-trie",
"thiserror",
"tokio",
"tokio-stream",
@@ -5322,8 +5297,12 @@ dependencies = [
"derive_more",
"hex",
"proptest",
"reth-db",
"reth-interfaces",
"reth-primitives",
"reth-provider",
"reth-rlp",
"thiserror",
"tokio",
"tokio-stream",
"tracing",

View File

@@ -67,7 +67,7 @@ pub use peer::{PeerId, WithPeerId};
pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef};
pub use revm_primitives::JumpMap;
pub use serde_helper::JsonU256;
pub use storage::{StorageEntry, StorageTrieEntry};
pub use storage::StorageEntry;
pub use transaction::{
util::secp256k1::sign_message, AccessList, AccessListItem, AccessListWithGasUsed,
FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, Signature,

View File

@@ -37,34 +37,3 @@ impl Compact for StorageEntry {
(Self { key, value }, out)
}
}
/// Account storage trie node.
#[derive_arbitrary(compact)]
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize)]
pub struct StorageTrieEntry {
/// Hashed storage key.
pub hash: H256,
/// Encoded node.
pub node: Vec<u8>,
}
// NOTE: Removing main_codec and manually encode subkey
// and compress second part of the value. If we have compression
// over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey
impl Compact for StorageTrieEntry {
fn to_compact(self, buf: &mut impl bytes::BufMut) -> usize {
// for now put full bytes and later compress it.
buf.put_slice(&self.hash.to_fixed_bytes()[..]);
buf.put_slice(&self.node[..]);
self.node.len() + 32
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8])
where
Self: Sized,
{
let key = H256::from_slice(&buf[..32]);
let node = Vec::from(&buf[32..len]);
(Self { hash: key, node }, &buf[len..])
}
}

View File

@@ -40,12 +40,14 @@ pub struct BranchNodeCompact {
impl BranchNodeCompact {
/// Creates a new [BranchNodeCompact] from the given parameters.
pub fn new(
state_mask: TrieMask,
tree_mask: TrieMask,
hash_mask: TrieMask,
state_mask: impl Into<TrieMask>,
tree_mask: impl Into<TrieMask>,
hash_mask: impl Into<TrieMask>,
hashes: Vec<H256>,
root_hash: Option<H256>,
) -> Self {
let (state_mask, tree_mask, hash_mask) =
(state_mask.into(), tree_mask.into(), hash_mask.into());
assert!(tree_mask.is_subset_of(&state_mask));
assert!(hash_mask.is_subset_of(&state_mask));
assert_eq!(hash_mask.count_ones() as usize, hashes.len());
@@ -129,9 +131,9 @@ mod tests {
#[test]
fn node_encoding() {
let n = BranchNodeCompact::new(
0xf607.into(),
0x0005.into(),
0x4004.into(),
0xf607,
0x0005,
0x4004,
vec![
hex!("90d53cd810cc5d4243766cd4451e7b9d14b736a1148b26b3baac7617f617d321").into(),
hex!("cc35c964dda53ba6c0b87798073a9628dbc9cd26b5cce88eb69655a9c609caf1").into(),

View File

@@ -45,7 +45,7 @@ impl TrieMask {
}
/// Returns `true` if a given bit is set in a mask.
pub fn is_bit_set(&self, index: i32) -> bool {
pub fn is_bit_set(&self, index: u8) -> bool {
self.0 & (1u16 << index) != 0
}

View File

@@ -1,10 +1,13 @@
//! Collection of trie related types.
mod nibbles;
pub use nibbles::{StoredNibbles, StoredNibblesSubKey};
mod branch_node;
pub use branch_node::BranchNodeCompact;
mod mask;
pub use mask::TrieMask;
mod nibbles;
mod storage;
pub use self::{
branch_node::BranchNodeCompact,
mask::TrieMask,
nibbles::{StoredNibbles, StoredNibblesSubKey},
storage::StorageTrieEntry,
};

View File

@@ -0,0 +1,33 @@
use super::{BranchNodeCompact, StoredNibblesSubKey};
use reth_codecs::Compact;
use serde::{Deserialize, Serialize};
/// Account storage trie node.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)]
pub struct StorageTrieEntry {
/// The nibbles of the intermediate node
pub nibbles: StoredNibblesSubKey,
/// Encoded node.
pub node: BranchNodeCompact,
}
// NOTE: Removing main_codec and manually encode subkey
// and compress second part of the value. If we have compression
// over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey
impl Compact for StorageTrieEntry {
fn to_compact(self, buf: &mut impl bytes::BufMut) -> usize {
let nibbles_len = self.nibbles.to_compact(buf);
let node_len = self.node.to_compact(buf);
nibbles_len + node_len
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8])
where
Self: Sized,
{
let (nibbles, buf) = StoredNibblesSubKey::from_compact(buf, 33);
let (node, buf) = BranchNodeCompact::from_compact(buf, len - 33);
let this = Self { nibbles, node };
(this, buf)
}
}

View File

@@ -73,7 +73,6 @@ where
EthApiClient::block_by_number(client, block_number, false).await.unwrap();
EthApiClient::block_transaction_count_by_number(client, block_number).await.unwrap();
EthApiClient::block_transaction_count_by_hash(client, hash).await.unwrap();
EthApiClient::get_proof(client, address, vec![], None).await.unwrap();
EthApiClient::block_uncles_count_by_hash(client, hash).await.unwrap();
EthApiClient::block_uncles_count_by_number(client, block_number).await.unwrap();
EthApiClient::uncle_by_block_hash_and_index(client, hash, index).await.unwrap();
@@ -100,6 +99,9 @@ where
EthApiClient::submit_hashrate(client, U256::default(), H256::default()).await.unwrap();
// Unimplemented
assert!(is_unimplemented(
EthApiClient::get_proof(client, address, vec![], None).await.err().unwrap()
));
assert!(is_unimplemented(EthApiClient::author(client).await.err().unwrap()));
assert!(is_unimplemented(EthApiClient::gas_price(client).await.err().unwrap()));
assert!(is_unimplemented(EthApiClient::max_priority_fee_per_gas(client).await.err().unwrap()));

View File

@@ -422,19 +422,21 @@ where
/// Handler for: `eth_getProof`
async fn get_proof(
&self,
address: Address,
keys: Vec<JsonStorageKey>,
block_number: Option<BlockId>,
_address: Address,
_keys: Vec<JsonStorageKey>,
_block_number: Option<BlockId>,
) -> Result<EIP1186AccountProofResponse> {
trace!(target: "rpc::eth", ?address, ?keys, ?block_number, "Serving eth_getProof");
let res = EthApi::get_proof(self, address, keys, block_number);
// TODO: uncomment when implemented
// trace!(target: "rpc::eth", ?address, ?keys, ?block_number, "Serving eth_getProof");
// let res = EthApi::get_proof(self, address, keys, block_number);
Ok(res.map_err(|e| match e {
EthApiError::InvalidBlockRange => {
internal_rpc_err("eth_getProof is unimplemented for historical blocks")
}
_ => e.into(),
})?)
// Ok(res.map_err(|e| match e {
// EthApiError::InvalidBlockRange => {
// internal_rpc_err("eth_getProof is unimplemented for historical blocks")
// }
// _ => e.into(),
// })?)
Err(internal_rpc_err("unimplemented"))
}
}

View File

@@ -21,6 +21,7 @@ reth-db = { path = "../storage/db" }
reth-codecs = { path = "../storage/codecs" }
reth-provider = { path = "../storage/provider" }
reth-metrics-derive = { path = "../metrics/metrics-derive" }
reth-trie = { path = "../trie" }
# async
tokio = { version = "1.21.2", features = ["sync"] }
@@ -50,6 +51,7 @@ reth-eth-wire = { path = "../net/eth-wire" } # TODO(o
reth-executor = { path = "../executor" }
reth-rlp = { path = "../rlp" }
reth-revm = { path = "../revm" }
reth-trie = { path = "../trie", features = ["test-utils"] }
itertools = "0.10.5"
tokio = { version = "*", features = ["rt", "sync", "macros"] }

View File

@@ -10,12 +10,12 @@ use reth_interfaces::test_utils::generators::{
random_transition_range,
};
use reth_primitives::{Account, Address, SealedBlock, H256};
use reth_provider::trie::DBTrieLoader;
use reth_stages::{
stages::{AccountHashingStage, StorageHashingStage},
test_utils::TestTransaction,
ExecInput, Stage, UnwindInput,
};
use reth_trie::StateRoot;
use std::{
collections::BTreeMap,
ops::Deref,
@@ -70,9 +70,6 @@ pub(crate) fn unwind_hashes<S: Clone + Stage<Env<WriteMap>>>(
StorageHashingStage::default().unwind(&mut db_tx, unwind).await.unwrap();
AccountHashingStage::default().unwind(&mut db_tx, unwind).await.unwrap();
let target_root = db_tx.get_header(unwind.unwind_to).unwrap().state_root;
let _ = db_tx.delete::<tables::AccountsTrie>(target_root, None);
// Clear previous run
stage.unwind(&mut db_tx, unwind).await.unwrap();
@@ -124,8 +121,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf {
tx.insert_accounts_and_storages(start_state.clone()).unwrap();
// make first block after genesis have valid state root
let root =
DBTrieLoader::new(tx.inner().deref()).calculate_root().and_then(|e| e.root()).unwrap();
let root = StateRoot::new(tx.inner().deref()).root(None).unwrap();
let second_block = blocks.get_mut(1).unwrap();
let cloned_second = second_block.clone();
let mut updated_header = cloned_second.header.unseal();
@@ -146,18 +142,11 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf {
// make last block have valid state root
let root = {
let mut tx_mut = tx.inner();
let root =
DBTrieLoader::new(tx_mut.deref()).calculate_root().and_then(|e| e.root()).unwrap();
let root = StateRoot::new(tx.inner().deref()).root(None).unwrap();
tx_mut.commit().unwrap();
root
};
tx.query(|tx| {
assert!(tx.get::<tables::AccountsTrie>(root)?.is_some());
Ok(())
})
.unwrap();
let last_block = blocks.last_mut().unwrap();
let cloned_last = last_block.clone();
let mut updated_header = cloned_last.header.unseal();

View File

@@ -1,10 +1,9 @@
use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput};
use reth_db::{database::Database, tables, transaction::DbTx};
use reth_db::{database::Database, tables, transaction::DbTxMut};
use reth_interfaces::consensus;
use reth_provider::{
trie::{DBTrieLoader, TrieProgress},
Transaction,
};
use reth_primitives::{BlockNumber, H256};
use reth_provider::Transaction;
use reth_trie::StateRoot;
use std::{fmt::Debug, ops::DerefMut};
use tracing::*;
@@ -65,6 +64,24 @@ impl MerkleStage {
pub fn default_unwind() -> Self {
Self::Unwind
}
/// Check that the computed state root matches the expected.
fn validate_state_root(
&self,
got: H256,
expected: H256,
target_block: BlockNumber,
) -> Result<(), StageError> {
if got == expected {
Ok(())
} else {
warn!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Block's root state failed verification");
Err(StageError::Validation {
block: target_block,
error: consensus::ConsensusError::BodyStateRootDiff { got, expected },
})
}
}
}
#[async_trait::async_trait]
@@ -108,43 +125,23 @@ impl<DB: Database> Stage<DB> for MerkleStage {
let trie_root = if from_transition == to_transition {
block_root
} else if to_transition - from_transition > threshold || stage_progress == 0 {
// if there are more blocks than threshold it is faster to rebuild the trie
debug!(target: "sync::stages::merkle::exec", current = ?stage_progress, target = ?previous_stage_progress, "Rebuilding trie");
tx.clear::<tables::AccountsTrie>()?;
tx.clear::<tables::StoragesTrie>()?;
StateRoot::new(tx.deref_mut()).root(None).map_err(|e| StageError::Fatal(Box::new(e)))?
} else {
let res = if to_transition - from_transition > threshold || stage_progress == 0 {
debug!(target: "sync::stages::merkle::exec", current = ?stage_progress, target = ?previous_stage_progress, "Rebuilding trie");
// if there are more blocks than threshold it is faster to rebuild the trie
let mut loader = DBTrieLoader::new(tx.deref_mut());
loader.calculate_root().map_err(|e| StageError::Fatal(Box::new(e)))?
} else {
debug!(target: "sync::stages::merkle::exec", current = ?stage_progress, target = ?previous_stage_progress, "Updating trie");
// Iterate over changeset (similar to Hashing stages) and take new values
let current_root = tx.get_header(stage_progress)?.state_root;
let mut loader = DBTrieLoader::new(tx.deref_mut());
loader
.update_root(current_root, from_transition..to_transition)
.map_err(|e| StageError::Fatal(Box::new(e)))?
};
match res {
TrieProgress::Complete(root) => root,
TrieProgress::InProgress(_) => {
return Ok(ExecOutput { stage_progress, done: false })
}
}
debug!(target: "sync::stages::merkle::exec", current = ?stage_progress, target =
?previous_stage_progress, "Updating trie"); // Iterate over
StateRoot::incremental_root(tx.deref_mut(), from_transition..to_transition, None)
.map_err(|e| StageError::Fatal(Box::new(e)))?
};
if block_root != trie_root {
warn!(target: "sync::stages::merkle::exec", ?previous_stage_progress, got = ?trie_root, expected = ?block_root, "Block's root state failed verification");
return Err(StageError::Validation {
block: previous_stage_progress,
error: consensus::ConsensusError::BodyStateRootDiff {
got: trie_root,
expected: block_root,
},
})
}
self.validate_state_root(trie_root, block_root, previous_stage_progress)?;
info!(target: "sync::stages::merkle::exec", "Stage finished");
Ok(ExecOutput { stage_progress: input.previous_stage_progress(), done: true })
Ok(ExecOutput { stage_progress: previous_stage_progress, done: true })
}
/// Unwind the stage.
@@ -158,49 +155,24 @@ impl<DB: Database> Stage<DB> for MerkleStage {
return Ok(UnwindOutput { stage_progress: input.unwind_to })
}
let target_root = tx.get_header(input.unwind_to)?.state_root;
// If the merkle stage fails to execute, the trie changes weren't committed
// and the root stayed the same
if tx.get::<tables::AccountsTrie>(target_root)?.is_some() {
info!(target: "sync::stages::merkle::unwind", "Stage skipped");
if input.unwind_to == 0 {
tx.clear::<tables::AccountsTrie>()?;
tx.clear::<tables::StoragesTrie>()?;
return Ok(UnwindOutput { stage_progress: input.unwind_to })
}
let current_root = tx.get_header(input.stage_progress)?.state_root;
let from_transition = tx.get_block_transition(input.unwind_to)?;
let to_transition = tx.get_block_transition(input.stage_progress)?;
let mut loader = DBTrieLoader::new(tx.deref_mut());
let block_root = loop {
match loader
.update_root(current_root, from_transition..to_transition)
.map_err(|e| StageError::Fatal(Box::new(e)))?
{
TrieProgress::Complete(root) => break root,
TrieProgress::InProgress(_) => {
// Save the loader's progress & drop it to allow committing to the database,
// otherwise we're hitting the borrow checker
let progress = loader.current;
let _ = loader;
tx.commit()?;
// Reinstantiate the loader from where it was left off.
loader = DBTrieLoader::new(tx.deref_mut());
loader.current = progress;
}
}
};
if block_root != target_root {
let unwind_to = input.unwind_to;
warn!(target: "sync::stages::merkle::unwind", ?unwind_to, got = ?block_root, expected = ?target_root, "Block's root state failed verification");
return Err(StageError::Validation {
block: unwind_to,
error: consensus::ConsensusError::BodyStateRootDiff {
got: block_root,
expected: target_root,
},
})
// Unwind trie only if there are transitions
if from_transition < to_transition {
let block_root =
StateRoot::incremental_root(tx.deref_mut(), from_transition..to_transition, None)
.map_err(|e| StageError::Fatal(Box::new(e)))?;
let target_root = tx.get_header(input.unwind_to)?.state_root;
self.validate_state_root(block_root, target_root, input.unwind_to)?;
} else {
info!(target: "sync::stages::merkle::unwind", "Nothing to unwind");
}
info!(target: "sync::stages::merkle::unwind", "Stage finished");
@@ -217,17 +189,16 @@ mod tests {
};
use assert_matches::assert_matches;
use reth_db::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
database::DatabaseGAT,
mdbx::{Env, WriteMap},
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
tables,
transaction::{DbTx, DbTxMut},
};
use reth_interfaces::test_utils::generators::{
random_block, random_block_range, random_contract_account_range, random_transition_range,
};
use reth_primitives::{keccak256, Account, Address, SealedBlock, StorageEntry, H256, U256};
use std::{collections::BTreeMap, ops::Deref};
use reth_primitives::{keccak256, SealedBlock, StorageEntry, H256, U256};
use reth_trie::test_utils::{state_root, state_root_prehashed};
use std::collections::BTreeMap;
stage_test_suite_ext!(MerkleTestRunner, merkle);
@@ -288,12 +259,6 @@ mod tests {
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
fn create_trie_loader<'tx, 'db>(
tx: &'tx Transaction<'db, Env<WriteMap>>,
) -> DBTrieLoader<'tx, <Env<WriteMap> as DatabaseGAT<'db>>::TXMut> {
DBTrieLoader::new(tx.deref())
}
struct MerkleTestRunner {
tx: TestTransaction,
clean_threshold: u64,
@@ -325,24 +290,30 @@ mod tests {
let stage_progress = input.stage_progress.unwrap_or_default();
let end = input.previous_stage_progress() + 1;
let n_accounts = 31;
let accounts = random_contract_account_range(&mut (0..n_accounts))
let num_of_accounts = 31;
let accounts = random_contract_account_range(&mut (0..num_of_accounts))
.into_iter()
.collect::<BTreeMap<_, _>>();
self.tx.insert_accounts_and_storages(
accounts.iter().map(|(addr, acc)| (*addr, (*acc, std::iter::empty()))),
)?;
let SealedBlock { header, body, ommers, withdrawals } =
random_block(stage_progress, None, Some(0), None);
let mut header = header.unseal();
header.state_root =
self.generate_initial_trie(accounts.iter().map(|(k, v)| (*k, *v)))?;
header.state_root = state_root(
accounts
.clone()
.into_iter()
.map(|(address, account)| (address, (account, std::iter::empty()))),
);
let sealed_head = SealedBlock { header: header.seal_slow(), body, ommers, withdrawals };
let head_hash = sealed_head.hash();
let mut blocks = vec![sealed_head];
blocks.extend(random_block_range((stage_progress + 1)..end, head_hash, 0..3));
self.tx.insert_blocks(blocks.iter(), None)?;
let (transitions, final_state) = random_transition_range(
@@ -351,13 +322,30 @@ mod tests {
0..3,
0..256,
);
self.tx.insert_transitions(transitions, None)?;
self.tx.insert_accounts_and_storages(final_state)?;
// Calculate state root
let root = self.tx.query(|tx| {
let mut accounts = BTreeMap::default();
let mut accounts_cursor = tx.cursor_read::<tables::HashedAccount>()?;
let mut storage_cursor = tx.cursor_dup_read::<tables::HashedStorage>()?;
for entry in accounts_cursor.walk_range(..)? {
let (key, account) = entry?;
let storage_entries =
storage_cursor.walk_dup(Some(key), None)?.collect::<Result<Vec<_>, _>>()?;
let storage = storage_entries
.into_iter()
.filter(|(_, v)| v.value != U256::ZERO)
.map(|(_, v)| (v.key, v.value))
.collect::<Vec<_>>();
accounts.insert(key, (account, storage));
}
Ok(state_root_prehashed(accounts.into_iter()))
})?;
let last_block_number = end - 1;
let root = self.state_root()?;
self.tx.commit(|tx| {
let mut last_header = tx.get::<tables::Headers>(last_block_number)?.unwrap();
last_header.state_root = root;
@@ -369,17 +357,11 @@ mod tests {
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
_input: ExecInput,
_output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
let start_block = input.stage_progress.unwrap_or_default() + 1;
let end_block = output.stage_progress;
if start_block > end_block {
return Ok(())
}
}
self.check_root(input.previous_stage_progress())
// The execution is validated within the stage
Ok(())
}
}
@@ -394,14 +376,15 @@ mod tests {
self.tx
.commit(|tx| {
let mut changeset_cursor =
let mut storage_changesets_cursor =
tx.cursor_dup_read::<tables::StorageChangeSet>().unwrap();
let mut hash_cursor = tx.cursor_dup_write::<tables::HashedStorage>().unwrap();
let mut rev_changeset_walker = changeset_cursor.walk_back(None).unwrap();
let mut storage_cursor =
tx.cursor_dup_write::<tables::HashedStorage>().unwrap();
let mut tree: BTreeMap<H256, BTreeMap<H256, U256>> = BTreeMap::new();
let mut rev_changeset_walker =
storage_changesets_cursor.walk_back(None).unwrap();
while let Some((tid_address, entry)) =
rev_changeset_walker.next().transpose().unwrap()
{
@@ -413,15 +396,18 @@ mod tests {
.or_default()
.insert(keccak256(entry.key), entry.value);
}
for (key, val) in tree.into_iter() {
for (entry_key, entry_val) in val.into_iter() {
hash_cursor.seek_by_key_subkey(key, entry_key).unwrap();
hash_cursor.delete_current().unwrap();
for (hashed_address, storage) in tree.into_iter() {
for (hashed_slot, value) in storage.into_iter() {
let storage_entry = storage_cursor
.seek_by_key_subkey(hashed_address, hashed_slot)
.unwrap();
if storage_entry.map(|v| v.key == hashed_slot).unwrap_or_default() {
storage_cursor.delete_current().unwrap();
}
if entry_val != U256::ZERO {
let storage_entry =
StorageEntry { key: entry_key, value: entry_val };
hash_cursor.append_dup(key, storage_entry).unwrap();
if value != U256::ZERO {
let storage_entry = StorageEntry { key: hashed_slot, value };
storage_cursor.upsert(hashed_address, storage_entry).unwrap();
}
}
}
@@ -437,28 +423,18 @@ mod tests {
break
}
match account_before_tx.info {
Some(acc) => {
tx.put::<tables::PlainAccountState>(account_before_tx.address, acc)
.unwrap();
tx.put::<tables::HashedAccount>(
keccak256(account_before_tx.address),
acc,
)
.unwrap();
}
None => {
tx.delete::<tables::PlainAccountState>(
account_before_tx.address,
None,
)
.unwrap();
tx.delete::<tables::HashedAccount>(
keccak256(account_before_tx.address),
None,
)
.unwrap();
}
if let Some(acc) = account_before_tx.info {
tx.put::<tables::HashedAccount>(
keccak256(account_before_tx.address),
acc,
)
.unwrap();
} else {
tx.delete::<tables::HashedAccount>(
keccak256(account_before_tx.address),
None,
)
.unwrap();
}
}
Ok(())
@@ -467,48 +443,8 @@ mod tests {
Ok(())
}
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
self.check_root(input.unwind_to)
}
}
impl MerkleTestRunner {
fn state_root(&self) -> Result<H256, TestRunnerError> {
Ok(create_trie_loader(&self.tx.inner())
.calculate_root()
.and_then(|e| e.root())
.unwrap())
}
pub(crate) fn generate_initial_trie(
&self,
accounts: impl IntoIterator<Item = (Address, Account)>,
) -> Result<H256, TestRunnerError> {
self.tx.insert_accounts_and_storages(
accounts.into_iter().map(|(addr, acc)| (addr, (acc, std::iter::empty()))),
)?;
let mut tx = self.tx.inner();
let root = create_trie_loader(&tx)
.calculate_root()
.and_then(|e| e.root())
.expect("couldn't create initial trie");
tx.commit()?;
Ok(root)
}
fn check_root(&self, previous_stage_progress: u64) -> Result<(), TestRunnerError> {
if previous_stage_progress != 0 {
let block_root =
self.tx.inner().get_header(previous_stage_progress).unwrap().state_root;
let root = create_trie_loader(&self.tx().inner())
.calculate_root()
.and_then(|e| e.root())
.unwrap();
assert_eq!(block_root, root);
}
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
// The unwind is validated within the stage
Ok(())
}
}

View File

@@ -14,7 +14,7 @@ mod headers;
mod index_account_history;
/// Index history of storage changes
mod index_storage_history;
/// Intermediate hashes and creating merkle root
/// Stage for computing state root.
mod merkle;
/// The sender recovery stage.
mod sender_recovery;

View File

@@ -4,10 +4,7 @@ use crate::{
Error,
};
use reth_codecs::{main_codec, Compact};
use reth_primitives::{
trie::{StoredNibbles, StoredNibblesSubKey},
*,
};
use reth_primitives::{trie::*, *};
/// Implements compression for Compact type.
macro_rules! impl_compression_for_compact {
@@ -44,6 +41,7 @@ impl_compression_for_compact!(
TxType,
StorageEntry,
StoredNibbles,
BranchNodeCompact,
StoredNibblesSubKey,
StorageTrieEntry,
StoredBlockBodyIndices,

View File

@@ -33,8 +33,9 @@ use crate::{
},
};
use reth_primitives::{
trie::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey},
Account, Address, BlockHash, BlockNumber, Bytecode, Header, IntegerList, Receipt, StorageEntry,
StorageTrieEntry, TransactionSigned, TransitionId, TxHash, TxNumber, H256,
TransactionSigned, TransitionId, TxHash, TxNumber, H256,
};
/// Enum for the types of tables present in libmdbx.
@@ -280,12 +281,12 @@ dupsort!(
table!(
/// Stores the current state's Merkle Patricia Tree.
( AccountsTrie ) H256 | Vec<u8>
( AccountsTrie ) StoredNibbles | BranchNodeCompact
);
dupsort!(
/// Stores the Merkle Patricia Trees of each [`Account`]'s storage.
( StoragesTrie ) H256 | [H256] StorageTrieEntry
/// From HashedAddress => NibblesSubKey => Intermediate value
( StoragesTrie ) H256 | [StoredNibblesSubKey] StorageTrieEntry
);
table!(

View File

@@ -13,17 +13,11 @@ reth-primitives = { path = "../../primitives" }
reth-interfaces = { path = "../../interfaces" }
reth-revm-primitives = { path = "../../revm/revm-primitives" }
reth-db = { path = "../db" }
reth-codecs = { path = "../codecs" }
reth-tracing = { path = "../../tracing" }
reth-rlp = { path = "../../rlp" }
reth-trie = { path = "../../trie" }
# async
tokio = { version = "1.21", features = ["sync", "macros", "rt-multi-thread"] }
# trie
cita_trie = "4.0.0"
hasher = "0.1.4"
# tracing
tracing = "0.1"
@@ -31,18 +25,16 @@ tracing = "0.1"
thiserror = "1.0.37"
auto_impl = "1.0"
itertools = "0.10"
parking_lot = "0.12"
# test-utils
reth-rlp = { path = "../../rlp", optional = true }
parking_lot = { version = "0.12", optional = true }
[dev-dependencies]
reth-db = { path = "../db", features = ["test-utils"] }
reth-primitives = { path = "../../primitives", features = ["arbitrary", "test-utils"] }
parking_lot = "0.12"
proptest = { version = "1.0" }
assert_matches = "1.5"
# trie
triehash = "0.8"
[features]
bench = []
test-utils = []
test-utils = ["reth-rlp", "parking_lot"]

View File

@@ -25,9 +25,6 @@ pub use providers::{
LatestStateProviderRef, ShareableDatabase,
};
/// Helper type for loading Merkle Patricia Trees from the database
pub mod trie;
/// Execution result
pub mod post_state;
pub use post_state::PostState;

View File

@@ -1,6 +1,6 @@
use crate::{
providers::state::macros::delegate_provider_impls, trie::DBTrieLoader, AccountProvider,
BlockHashProvider, StateProvider,
providers::state::macros::delegate_provider_impls, AccountProvider, BlockHashProvider,
StateProvider,
};
use reth_db::{
cursor::{DbCursorRO, DbDupCursorRO},
@@ -10,7 +10,6 @@ use reth_db::{
use reth_interfaces::{provider::ProviderError, Result};
use reth_primitives::{
keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, StorageValue, H256,
KECCAK_EMPTY,
};
use std::marker::PhantomData;
@@ -76,11 +75,10 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for LatestStateProviderRef<'a, 'b, TX>
fn proof(
&self,
address: Address,
keys: &[H256],
_keys: &[H256],
) -> Result<(Vec<Bytes>, H256, Vec<Vec<Bytes>>)> {
let hashed_address = keccak256(address);
let loader = DBTrieLoader::new(self.db);
let root = self
let _hashed_address = keccak256(address);
let _root = self
.db
.cursor_read::<tables::Headers>()?
.last()?
@@ -88,25 +86,7 @@ impl<'a, 'b, TX: DbTx<'a>> StateProvider for LatestStateProviderRef<'a, 'b, TX>
.1
.state_root;
let (account_proof, storage_root) = loader
.generate_account_proof(root, hashed_address)
.map_err(|_| ProviderError::StateTrie)?;
let account_proof = account_proof.into_iter().map(Bytes::from).collect();
let storage_proof = if storage_root == KECCAK_EMPTY {
// if there isn't storage, we return empty storage proofs
(0..keys.len()).map(|_| Vec::new()).collect()
} else {
let hashed_keys: Vec<H256> = keys.iter().map(keccak256).collect();
loader
.generate_storage_proofs(storage_root, hashed_address, &hashed_keys)
.map_err(|_| ProviderError::StateTrie)?
.into_iter()
.map(|v| v.into_iter().map(Bytes::from).collect())
.collect()
};
Ok((account_proof, storage_root, storage_proof))
unimplemented!()
}
}

View File

@@ -3,8 +3,8 @@
use crate::{post_state::PostState, Transaction};
use reth_db::{database::Database, models::StoredBlockBodyIndices, tables};
use reth_primitives::{
hex_literal::hex, proofs::EMPTY_ROOT, Account, Header, SealedBlock, SealedBlockWithSenders,
Withdrawal, H160, H256, U256,
hex_literal::hex, Account, Header, SealedBlock, SealedBlockWithSenders, Withdrawal, H160, H256,
U256,
};
use reth_rlp::Decodable;
use std::collections::BTreeMap;
@@ -39,7 +39,7 @@ pub fn assert_genesis_block<DB: Database>(tx: &Transaction<'_, DB>, g: SealedBlo
assert_eq!(tx.table::<tables::StorageChangeSet>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::HashedAccount>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::HashedStorage>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::AccountsTrie>().unwrap(), vec![(EMPTY_ROOT, vec![0x80])]);
assert_eq!(tx.table::<tables::AccountsTrie>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::StoragesTrie>().unwrap(), vec![]);
assert_eq!(tx.table::<tables::TxSenders>().unwrap(), vec![]);
// SyncStage is not updated in tests

View File

@@ -1,7 +1,6 @@
use crate::{
insert_canonical_block,
post_state::{Change, PostState, StorageChangeset},
trie::{DBTrieLoader, TrieError},
};
use itertools::{izip, Itertools};
use reth_db::{
@@ -24,6 +23,7 @@ use reth_primitives::{
Header, SealedBlock, SealedBlockWithSenders, StorageEntry, TransactionSignedEcRecovered,
TransitionId, TxNumber, H256, U256,
};
use reth_trie::{StateRoot, StateRootError};
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet},
fmt::Debug,
@@ -541,7 +541,6 @@ where
self.insert_block(block)?;
}
self.insert_hashes(
fork_block_number,
first_transition_id,
first_transition_id + num_transitions,
new_tip_number,
@@ -598,7 +597,6 @@ where
/// The resulting state root is compared with `expected_state_root`.
pub fn insert_hashes(
&mut self,
fork_block_number: BlockNumber,
from_transition_id: TransitionId,
to_transition_id: TransitionId,
current_block_number: BlockNumber,
@@ -623,14 +621,14 @@ where
// merkle tree
{
let current_root = self.get_header(fork_block_number)?.state_root;
let mut loader = DBTrieLoader::new(self.deref_mut());
let root = loader
.update_root(current_root, from_transition_id..to_transition_id)
.and_then(|e| e.root())?;
if root != expected_state_root {
let state_root = StateRoot::incremental_root(
self.deref_mut(),
from_transition_id..to_transition_id,
None,
)?;
if state_root != expected_state_root {
return Err(TransactionError::StateTrieRootMismatch {
got: root,
got: state_root,
expected: expected_state_root,
block_number: current_block_number,
block_hash: current_block_hash,
@@ -1111,15 +1109,7 @@ where
self.unwind_storage_history_indices(transition_storage_range)?;
// merkle tree
let new_state_root;
{
let (tip_number, _) =
self.cursor_read::<tables::CanonicalHeaders>()?.last()?.unwrap_or_default();
let current_root = self.get_header(tip_number)?.state_root;
let mut loader = DBTrieLoader::new(self.deref());
new_state_root =
loader.update_root(current_root, transition_range).and_then(|e| e.root())?;
}
let new_state_root = StateRoot::incremental_root(self.deref(), transition_range, None)?;
// state root should be always correct as we are reverting state.
// but for sake of double verification we will check it again.
if new_state_root != parent_state_root {
@@ -1537,14 +1527,14 @@ fn unwind_storage_history_shards<DB: Database>(
#[derive(Debug, thiserror::Error)]
pub enum TransactionError {
/// The transaction encountered a database error.
#[error("Database error: {0}")]
#[error(transparent)]
Database(#[from] DbError),
/// The transaction encountered a database integrity error.
#[error("A database integrity error occurred: {0}")]
#[error(transparent)]
DatabaseIntegrity(#[from] ProviderError),
/// The transaction encountered merkle trie error.
#[error("Merkle trie calculation error: {0}")]
MerkleTrie(#[from] TrieError),
/// The trie error.
#[error(transparent)]
TrieError(#[from] StateRootError),
/// Root mismatch
#[error("Merkle trie root mismatch on block: #{block_number:?} {block_hash:?}. got: {got:?} expected:{expected:?}")]
StateTrieRootMismatch {
@@ -1565,8 +1555,8 @@ mod test {
insert_canonical_block, test_utils::blocks::*, ShareableDatabase, Transaction,
TransactionsProvider,
};
use reth_db::{mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut};
use reth_primitives::{proofs::EMPTY_ROOT, ChainSpecBuilder, TransitionId, MAINNET};
use reth_db::mdbx::test_utils::create_test_rw_db;
use reth_primitives::{ChainSpecBuilder, TransitionId, MAINNET};
use std::{ops::DerefMut, sync::Arc};
#[test]
@@ -1587,14 +1577,11 @@ mod test {
let (block2, exec_res2) = data.blocks[1].clone();
insert_canonical_block(tx.deref_mut(), data.genesis.clone(), None, false).unwrap();
tx.put::<tables::AccountsTrie>(EMPTY_ROOT, vec![0x80]).unwrap();
assert_genesis_block(&tx, data.genesis);
exec_res1.clone().write_to_db(tx.deref_mut(), 0).unwrap();
tx.insert_block(block1.clone()).unwrap();
tx.insert_hashes(
genesis.number,
0,
exec_res1.transitions_count() as TransitionId,
block1.number,
@@ -1615,7 +1602,6 @@ mod test {
exec_res1.clone().write_to_db(tx.deref_mut(), 0).unwrap();
tx.insert_block(block1.clone()).unwrap();
tx.insert_hashes(
genesis.number,
0,
exec_res1.transitions_count() as TransitionId,
block1.number,
@@ -1630,7 +1616,6 @@ mod test {
.unwrap();
tx.insert_block(block2.clone()).unwrap();
tx.insert_hashes(
block1.number,
exec_res1.transitions_count() as TransitionId,
(exec_res1.transitions_count() + exec_res2.transitions_count()) as TransitionId,
2,
@@ -1698,8 +1683,6 @@ mod test {
let (block2, exec_res2) = data.blocks[1].clone();
insert_canonical_block(tx.deref_mut(), data.genesis.clone(), None, false).unwrap();
tx.put::<tables::AccountsTrie>(EMPTY_ROOT, vec![0x80]).unwrap();
assert_genesis_block(&tx, data.genesis);
tx.append_blocks_with_post_state(vec![block1.clone()], exec_res1.clone()).unwrap();

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,9 @@ Merkle trie implementation
[dependencies]
# reth
reth-primitives = { path = "../primitives" }
reth-interfaces = { path = "../interfaces" }
reth-rlp = { path = "../rlp" }
reth-db = { path = "../storage/db" }
# tokio
tokio = { version = "1.21.2", default-features = false, features = ["sync"] }
@@ -22,11 +24,17 @@ tracing = "0.1"
# misc
hex = "0.4"
thiserror = "1.0"
derive_more = "0.99"
# test-utils
triehash = { version = "0.8", optional = true }
[dev-dependencies]
# reth
reth-primitives = { path = "../primitives", features = ["test-utils", "arbitrary"] }
reth-db = { path = "../storage/db", features = ["test-utils"] }
reth-provider = { path = "../storage/provider" }
# trie
triehash = "0.8"
@@ -35,3 +43,6 @@ triehash = "0.8"
proptest = "1.0"
tokio = { version = "1.21.2", default-features = false, features = ["sync", "rt", "macros"] }
tokio-stream = "0.1.10"
[features]
test-utils = ["triehash"]

View File

@@ -0,0 +1,94 @@
use super::TrieCursor;
use reth_db::{
cursor::{DbCursorRO, DbCursorRW},
tables, Error,
};
use reth_primitives::trie::{BranchNodeCompact, StoredNibbles};
/// A cursor over the account trie.
pub struct AccountTrieCursor<C>(C);
impl<C> AccountTrieCursor<C> {
/// Create a new account trie cursor.
pub fn new(cursor: C) -> Self {
Self(cursor)
}
}
impl<'a, C> TrieCursor<StoredNibbles> for AccountTrieCursor<C>
where
C: DbCursorRO<'a, tables::AccountsTrie> + DbCursorRW<'a, tables::AccountsTrie>,
{
fn seek_exact(
&mut self,
key: StoredNibbles,
) -> Result<Option<(Vec<u8>, BranchNodeCompact)>, Error> {
Ok(self.0.seek_exact(key)?.map(|value| (value.0.inner.to_vec(), value.1)))
}
fn seek(&mut self, key: StoredNibbles) -> Result<Option<(Vec<u8>, BranchNodeCompact)>, Error> {
Ok(self.0.seek(key)?.map(|value| (value.0.inner.to_vec(), value.1)))
}
fn upsert(&mut self, key: StoredNibbles, value: BranchNodeCompact) -> Result<(), Error> {
self.0.upsert(key, value)
}
fn delete_current(&mut self) -> Result<(), Error> {
self.0.delete_current()
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_db::{
cursor::{DbCursorRO, DbCursorRW},
mdbx::test_utils::create_test_rw_db,
tables,
transaction::DbTxMut,
};
use reth_primitives::hex_literal::hex;
use reth_provider::Transaction;
#[test]
fn test_account_trie_order() {
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let mut cursor = tx.cursor_write::<tables::AccountsTrie>().unwrap();
let data = vec![
hex!("0303040e").to_vec(),
hex!("030305").to_vec(),
hex!("03030500").to_vec(),
hex!("0303050a").to_vec(),
];
for key in data.clone() {
cursor
.upsert(
key.into(),
BranchNodeCompact::new(
0b0000_0010_0000_0001,
0b0000_0010_0000_0001,
0,
Vec::default(),
None,
),
)
.unwrap();
}
let db_data =
cursor.walk_range(..).unwrap().collect::<std::result::Result<Vec<_>, _>>().unwrap();
assert_eq!(db_data[0].0.inner.to_vec(), data[0]);
assert_eq!(db_data[1].0.inner.to_vec(), data[1]);
assert_eq!(db_data[2].0.inner.to_vec(), data[2]);
assert_eq!(db_data[3].0.inner.to_vec(), data[3]);
assert_eq!(
cursor.seek(hex!("0303040f").to_vec().into()).unwrap().map(|(k, _)| k.inner.to_vec()),
Some(data[1].clone())
);
}
}

View File

@@ -0,0 +1,9 @@
mod account_cursor;
mod storage_cursor;
mod subnode;
mod trie_cursor;
pub use self::{
account_cursor::AccountTrieCursor, storage_cursor::StorageTrieCursor, subnode::CursorSubNode,
trie_cursor::TrieCursor,
};

View File

@@ -0,0 +1,92 @@
use super::TrieCursor;
use reth_db::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
tables, Error,
};
use reth_primitives::{
trie::{BranchNodeCompact, StorageTrieEntry, StoredNibblesSubKey},
H256,
};
/// A cursor over the storage trie.
pub struct StorageTrieCursor<C> {
/// The underlying cursor.
pub cursor: C,
hashed_address: H256,
}
impl<C> StorageTrieCursor<C> {
/// Create a new storage trie cursor.
pub fn new(cursor: C, hashed_address: H256) -> Self {
Self { cursor, hashed_address }
}
}
impl<'a, C> TrieCursor<StoredNibblesSubKey> for StorageTrieCursor<C>
where
C: DbDupCursorRO<'a, tables::StoragesTrie>
+ DbDupCursorRW<'a, tables::StoragesTrie>
+ DbCursorRO<'a, tables::StoragesTrie>
+ DbCursorRW<'a, tables::StoragesTrie>,
{
fn seek_exact(
&mut self,
key: StoredNibblesSubKey,
) -> Result<Option<(Vec<u8>, BranchNodeCompact)>, Error> {
Ok(self
.cursor
.seek_by_key_subkey(self.hashed_address, key.clone())?
.filter(|e| e.nibbles == key)
.map(|value| (value.nibbles.inner.to_vec(), value.node)))
}
fn seek(
&mut self,
key: StoredNibblesSubKey,
) -> Result<Option<(Vec<u8>, BranchNodeCompact)>, Error> {
Ok(self
.cursor
.seek_by_key_subkey(self.hashed_address, key)?
.map(|value| (value.nibbles.inner.to_vec(), value.node)))
}
fn upsert(&mut self, key: StoredNibblesSubKey, value: BranchNodeCompact) -> Result<(), Error> {
if let Some(entry) = self.cursor.seek_by_key_subkey(self.hashed_address, key.clone())? {
// "seek exact"
if entry.nibbles == key {
self.cursor.delete_current()?;
}
}
self.cursor.upsert(self.hashed_address, StorageTrieEntry { nibbles: key, node: value })?;
Ok(())
}
fn delete_current(&mut self) -> Result<(), Error> {
self.cursor.delete_current()
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_db::{mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut};
use reth_primitives::trie::BranchNodeCompact;
use reth_provider::Transaction;
// tests that upsert and seek match on the storagetrie cursor
#[test]
fn test_storage_cursor_abstraction() {
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let cursor = tx.cursor_dup_write::<tables::StoragesTrie>().unwrap();
let mut cursor = StorageTrieCursor::new(cursor, H256::random());
let key = vec![0x2, 0x3];
let value = BranchNodeCompact::new(1, 1, 1, vec![H256::random()], None);
cursor.upsert(key.clone().into(), value.clone()).unwrap();
assert_eq!(cursor.seek(key.clone().into()).unwrap().unwrap().1, value);
}
}

View File

@@ -0,0 +1,101 @@
use crate::{nodes::CHILD_INDEX_RANGE, Nibbles};
use reth_primitives::{trie::BranchNodeCompact, H256};
/// Cursor for iterating over a subtrie.
#[derive(Clone)]
pub struct CursorSubNode {
/// The key of the current node.
pub key: Nibbles,
/// The index of the next child to visit.
pub nibble: i8,
/// The node itself.
pub node: Option<BranchNodeCompact>,
}
impl Default for CursorSubNode {
fn default() -> Self {
Self::new(Nibbles::default(), None)
}
}
impl std::fmt::Debug for CursorSubNode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CursorSubNode")
.field("key", &self.key)
.field("nibble", &self.nibble)
.field("state_flag", &self.state_flag())
.field("tree_flag", &self.tree_flag())
.field("hash_flag", &self.hash_flag())
.field("hash", &self.hash())
.finish()
}
}
impl CursorSubNode {
/// Creates a new `CursorSubNode` from a key and an optional node.
pub fn new(key: Nibbles, node: Option<BranchNodeCompact>) -> Self {
// Find the first nibble that is set in the state mask of the node.
let nibble = match &node {
Some(n) if n.root_hash.is_none() => {
CHILD_INDEX_RANGE.clone().find(|i| n.state_mask.is_bit_set(*i)).unwrap() as i8
}
_ => -1,
};
CursorSubNode { key, node, nibble }
}
/// Returns the full key of the current node.
pub fn full_key(&self) -> Nibbles {
let mut out = self.key.clone();
if self.nibble >= 0 {
out.extend([self.nibble as u8]);
}
out
}
/// Returns `true` if the state flag is set for the current nibble.
pub fn state_flag(&self) -> bool {
if let Some(node) = &self.node {
if self.nibble >= 0 {
return node.state_mask.is_bit_set(self.nibble as u8)
}
}
true
}
/// Returns `true` if the tree flag is set for the current nibble.
pub fn tree_flag(&self) -> bool {
if let Some(node) = &self.node {
if self.nibble >= 0 {
return node.tree_mask.is_bit_set(self.nibble as u8)
}
}
true
}
/// Returns `true` if the current nibble has a root hash.
pub fn hash_flag(&self) -> bool {
match &self.node {
Some(node) => match self.nibble {
// This guy has it
-1 => node.root_hash.is_some(),
// Or get it from the children
_ => node.hash_mask.is_bit_set(self.nibble as u8),
},
None => false,
}
}
/// Returns the root hash of the current node, if it has one.
pub fn hash(&self) -> Option<H256> {
if self.hash_flag() {
let node = self.node.as_ref().unwrap();
match self.nibble {
-1 => node.root_hash,
_ => Some(node.hash_for_nibble(self.nibble as u8)),
}
} else {
None
}
}
}

View File

@@ -0,0 +1,17 @@
use reth_db::{table::Key, Error};
use reth_primitives::trie::BranchNodeCompact;
/// A cursor for navigating a trie that works with both Tables and DupSort tables.
pub trait TrieCursor<K: Key> {
/// Move the cursor to the key and return if it is an exact match.
fn seek_exact(&mut self, key: K) -> Result<Option<(Vec<u8>, BranchNodeCompact)>, Error>;
/// Move the cursor to the key and return a value matching of greater than the key.
fn seek(&mut self, key: K) -> Result<Option<(Vec<u8>, BranchNodeCompact)>, Error>;
/// Upsert the key/value pair.
fn upsert(&mut self, key: K, value: BranchNodeCompact) -> Result<(), Error>;
/// Delete the key/value pair at the current cursor position.
fn delete_current(&mut self) -> Result<(), Error>;
}

20
crates/trie/src/errors.rs Normal file
View File

@@ -0,0 +1,20 @@
use thiserror::Error;
/// State root error.
#[derive(Error, Debug)]
pub enum StateRootError {
/// Internal database error.
#[error(transparent)]
DB(#[from] reth_db::Error),
/// Storage root error.
#[error(transparent)]
StorageRootError(#[from] StorageRootError),
}
/// Storage root error.
#[derive(Error, Debug)]
pub enum StorageRootError {
/// Internal database error.
#[error(transparent)]
DB(#[from] reth_db::Error),
}

View File

@@ -8,14 +8,14 @@ use reth_primitives::{
trie::{BranchNodeCompact, TrieMask},
H256,
};
use std::fmt::Debug;
use tokio::sync::mpsc;
use std::{fmt::Debug, sync::mpsc};
mod value;
use value::HashBuilderValue;
/// A type alias for a sender of branch nodes.
pub type BranchNodeSender = mpsc::UnboundedSender<(Nibbles, BranchNodeCompact)>;
/// Branch nodes are sent by the Hash Builder to be stored in the database.
pub type BranchNodeSender = mpsc::Sender<(Nibbles, BranchNodeCompact)>;
/// A component used to construct the root hash of the trie. The primary purpose of a Hash Builder
/// is to build the Merkle proof that is essential for verifying the integrity and authenticity of
@@ -363,8 +363,6 @@ mod tests {
use proptest::prelude::*;
use reth_primitives::{hex_literal::hex, proofs::KeccakHasher, H256, U256};
use std::collections::{BTreeMap, HashMap};
use tokio::sync::mpsc::unbounded_channel;
use tokio_stream::{wrappers::UnboundedReceiverStream, StreamExt};
fn trie_root<I, K, V>(iter: I) -> H256
where
@@ -429,9 +427,9 @@ mod tests {
});
}
#[tokio::test]
async fn test_generates_branch_node() {
let (sender, recv) = unbounded_channel();
#[test]
fn test_generates_branch_node() {
let (sender, recv) = mpsc::channel();
let mut hb = HashBuilder::new(Some(sender));
// We have 1 branch node update to be stored at 0x01, indicated by the first nibble.
@@ -481,8 +479,7 @@ mod tests {
let root = hb.root();
drop(hb);
let receiver = UnboundedReceiverStream::new(recv);
let updates = receiver.collect::<Vec<_>>().await;
let updates = recv.iter().collect::<Vec<_>>();
let updates = updates.iter().cloned().collect::<BTreeMap<_, _>>();
let update = updates.get(&Nibbles::from(hex!("01").as_slice())).unwrap();

View File

@@ -24,3 +24,20 @@ pub mod hash_builder;
/// The implementation of a container for storing intermediate changes to a trie.
/// The container indicates when the trie has been modified.
pub mod prefix_set;
/// The cursor implementations for navigating account and storage tries.
pub mod cursor;
/// The trie walker for iterating over the trie nodes.
pub mod walker;
mod errors;
pub use errors::{StateRootError, StorageRootError};
/// The implementation of the Merkle Patricia Trie.
mod trie;
pub use trie::{BranchNodeUpdate, BranchNodeUpdateSender, StateRoot, StorageRoot};
/// Collection of trie-related test utilities.
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;

View File

@@ -217,8 +217,8 @@ impl Nibbles {
}
/// Extend the current nibbles with another nibbles.
pub fn extend(&mut self, b: &Nibbles) {
self.hex_data.extend_from_slice(b);
pub fn extend(&mut self, b: impl AsRef<[u8]>) {
self.hex_data.extend_from_slice(b.as_ref());
}
/// Truncate the current nibbles to the given length.

View File

@@ -1,4 +1,4 @@
use super::rlp_node;
use super::{rlp_node, CHILD_INDEX_RANGE};
use reth_primitives::{bytes::BytesMut, trie::TrieMask, H256};
use reth_rlp::{BufMut, EMPTY_STRING_CODE};
@@ -25,7 +25,7 @@ impl<'a> BranchNode<'a> {
hash_mask: TrieMask,
) -> impl Iterator<Item = H256> + '_ {
let mut index = self.stack.len() - state_mask.count_ones() as usize;
(0..16).filter_map(move |digit| {
CHILD_INDEX_RANGE.filter_map(move |digit| {
let mut child = None;
if state_mask.is_bit_set(digit) {
if hash_mask.is_bit_set(digit) {
@@ -44,7 +44,7 @@ impl<'a> BranchNode<'a> {
// Create the RLP header from the mask elements present.
let mut i = first_child_idx;
let header = (0..16).fold(
let header = CHILD_INDEX_RANGE.fold(
reth_rlp::Header { list: true, payload_length: 1 },
|mut header, digit| {
if state_mask.is_bit_set(digit) {
@@ -60,7 +60,7 @@ impl<'a> BranchNode<'a> {
// Extend the RLP buffer with the present children
let mut i = first_child_idx;
(0..16).for_each(|idx| {
CHILD_INDEX_RANGE.for_each(|idx| {
if state_mask.is_bit_set(idx) {
buf.extend_from_slice(&self.stack[i]);
i += 1;

View File

@@ -1,5 +1,6 @@
use reth_primitives::{keccak256, H256};
use reth_rlp::EMPTY_STRING_CODE;
use std::ops::Range;
mod branch;
mod extension;
@@ -7,6 +8,9 @@ mod leaf;
pub use self::{branch::BranchNode, extension::ExtensionNode, leaf::LeafNode};
/// The range of valid child indexes.
pub const CHILD_INDEX_RANGE: Range<u8> = 0..16;
/// Given an RLP encoded node, returns either RLP(Node) or RLP(keccak(RLP(node)))
fn rlp_node(rlp: &[u8]) -> Vec<u8> {
if rlp.len() < H256::len_bytes() {

View File

@@ -0,0 +1,62 @@
use super::PrefixSet;
use crate::Nibbles;
use derive_more::Deref;
use reth_db::{
cursor::DbCursorRO,
models::{AccountBeforeTx, TransitionIdAddress},
tables,
transaction::DbTx,
Error,
};
use reth_primitives::{keccak256, Address, StorageEntry, TransitionId, H256};
use std::{collections::HashMap, ops::Range};
/// A wrapper around a database transaction that loads prefix sets within a given transition range.
#[derive(Deref)]
pub struct PrefixSetLoader<'a, TX>(&'a TX);
impl<'a, TX> PrefixSetLoader<'a, TX> {
/// Create a new loader.
pub fn new(tx: &'a TX) -> Self {
Self(tx)
}
}
impl<'a, 'b, TX> PrefixSetLoader<'a, TX>
where
TX: DbTx<'b>,
{
/// Load all account and storage changes for the given transition id range.
pub fn load(
self,
tid_range: Range<TransitionId>,
) -> Result<(PrefixSet, HashMap<H256, PrefixSet>), Error> {
// Initialize prefix sets.
let mut account_prefix_set = PrefixSet::default();
let mut storage_prefix_set: HashMap<H256, PrefixSet> = HashMap::default();
// Walk account changeset and insert account prefixes.
let mut account_cursor = self.cursor_read::<tables::AccountChangeSet>()?;
for account_entry in account_cursor.walk_range(tid_range.clone())? {
let (_, AccountBeforeTx { address, .. }) = account_entry?;
account_prefix_set.insert(Nibbles::unpack(keccak256(address)));
}
// Walk storage changeset and insert storage prefixes as well as account prefixes if missing
// from the account prefix set.
let mut storage_cursor = self.cursor_dup_read::<tables::StorageChangeSet>()?;
let start = TransitionIdAddress((tid_range.start, Address::zero()));
let end = TransitionIdAddress((tid_range.end, Address::zero()));
for storage_entry in storage_cursor.walk_range(start..end)? {
let (TransitionIdAddress((_, address)), StorageEntry { key, .. }) = storage_entry?;
let hashed_address = keccak256(address);
account_prefix_set.insert(Nibbles::unpack(hashed_address));
storage_prefix_set
.entry(hashed_address)
.or_default()
.insert(Nibbles::unpack(keccak256(key)));
}
Ok((account_prefix_set, storage_prefix_set))
}
}

View File

@@ -1,6 +1,9 @@
use crate::Nibbles;
use std::collections::BTreeSet;
mod loader;
pub use loader::PrefixSetLoader;
/// A container for efficiently storing and checking for the presence of key prefixes.
///
/// This data structure stores a set of `Nibbles` and provides methods to insert

View File

@@ -0,0 +1,51 @@
use crate::account::EthAccount;
use reth_primitives::{proofs::KeccakHasher, Account, Address, H256, U256};
use reth_rlp::{encode_fixed_size, Encodable};
/// Re-export of [triehash].
pub use triehash;
/// Compute the state root of a given set of accounts using [triehash::sec_trie_root].
pub fn state_root<I, S>(accounts: I) -> H256
where
I: Iterator<Item = (Address, (Account, S))>,
S: IntoIterator<Item = (H256, U256)>,
{
let encoded_accounts = accounts.map(|(address, (account, storage))| {
let storage_root = storage_root(storage.into_iter());
let mut out = Vec::new();
EthAccount::from(account).with_storage_root(storage_root).encode(&mut out);
(address, out)
});
triehash::sec_trie_root::<KeccakHasher, _, _, _>(encoded_accounts)
}
/// Compute the storage root for a given account using [triehash::sec_trie_root].
pub fn storage_root<I: Iterator<Item = (H256, U256)>>(storage: I) -> H256 {
let encoded_storage = storage.map(|(k, v)| (k, encode_fixed_size(&v).to_vec()));
triehash::sec_trie_root::<KeccakHasher, _, _, _>(encoded_storage)
}
/// Compute the state root of a given set of accounts with prehashed keys using
/// [triehash::trie_root].
pub fn state_root_prehashed<I, S>(accounts: I) -> H256
where
I: Iterator<Item = (H256, (Account, S))>,
S: IntoIterator<Item = (H256, U256)>,
{
let encoded_accounts = accounts.map(|(address, (account, storage))| {
let storage_root = storage_root_prehashed(storage.into_iter());
let mut out = Vec::new();
EthAccount::from(account).with_storage_root(storage_root).encode(&mut out);
(address, out)
});
triehash::trie_root::<KeccakHasher, _, _, _>(encoded_accounts)
}
/// Compute the storage root for a given account with prehashed slots using [triehash::trie_root].
pub fn storage_root_prehashed<I: Iterator<Item = (H256, U256)>>(storage: I) -> H256 {
let encoded_storage = storage.map(|(k, v)| (k, encode_fixed_size(&v).to_vec()));
triehash::trie_root::<KeccakHasher, _, _, _>(encoded_storage)
}

1167
crates/trie/src/trie.rs Normal file

File diff suppressed because it is too large Load Diff

339
crates/trie/src/walker.rs Normal file
View File

@@ -0,0 +1,339 @@
use crate::{
cursor::{CursorSubNode, TrieCursor},
prefix_set::PrefixSet,
Nibbles,
};
use reth_db::{table::Key, Error};
use reth_primitives::{trie::BranchNodeCompact, H256};
use std::marker::PhantomData;
/// `TrieWalker` is a structure that enables traversal of a Merkle trie.
/// It allows moving through the trie in a depth-first manner, skipping certain branches if the .
pub struct TrieWalker<'a, K, C> {
/// A mutable reference to a trie cursor instance used for navigating the trie.
pub cursor: &'a mut C,
/// A vector containing the trie nodes that have been visited.
pub stack: Vec<CursorSubNode>,
/// A flag indicating whether the current node can be skipped when traversing the trie. This
/// is determined by whether the current key's prefix is included in the prefix set and if the
/// hash flag is set.
pub can_skip_current_node: bool,
/// A `PrefixSet` representing the changes to be applied to the trie.
pub changes: PrefixSet,
__phantom: PhantomData<K>,
}
impl<'a, K: Key + From<Vec<u8>>, C: TrieCursor<K>> TrieWalker<'a, K, C> {
/// Constructs a new TrieWalker, setting up the initial state of the stack and cursor.
pub fn new(cursor: &'a mut C, changes: PrefixSet) -> Self {
// Initialize the walker with a single empty stack element.
let mut this = Self {
cursor,
changes,
can_skip_current_node: false,
stack: vec![CursorSubNode::default()],
__phantom: PhantomData::default(),
};
// Set up the root node of the trie in the stack, if it exists.
if let Some((key, value)) = this.node(true).unwrap() {
this.stack[0] = CursorSubNode::new(key, Some(value));
}
// Update the skip state for the root node.
this.update_skip_node();
this
}
/// Prints the current stack of trie nodes.
pub fn print_stack(&self) {
println!("====================== STACK ======================");
for node in &self.stack {
println!("{node:?}");
}
println!("====================== END STACK ======================\n");
}
/// Advances the walker to the next trie node and updates the skip node flag.
///
/// # Returns
///
/// * `Result<Option<Nibbles>, Error>` - The next key in the trie or an error.
pub fn advance(&mut self) -> Result<Option<Nibbles>, Error> {
if let Some(last) = self.stack.last() {
if !self.can_skip_current_node && self.children_are_in_trie() {
// If we can't skip the current node and the children are in the trie,
// either consume the next node or move to the next sibling.
match last.nibble {
-1 => self.move_to_next_sibling(true)?,
_ => self.consume_node()?,
}
} else {
// If we can skip the current node, move to the next sibling.
self.move_to_next_sibling(false)?;
}
// Update the skip node flag based on the new position in the trie.
self.update_skip_node();
}
// Return the current key.
Ok(self.key())
}
/// Retrieves the current root node from the DB, seeking either the exact node or the next one.
fn node(&mut self, exact: bool) -> Result<Option<(Nibbles, BranchNodeCompact)>, Error> {
let key = self.key().expect("key must exist");
let entry = if exact {
self.cursor.seek_exact(key.hex_data.into())?
} else {
self.cursor.seek(key.hex_data.into())?
};
if let Some((_, node)) = &entry {
assert!(!node.state_mask.is_empty());
}
Ok(entry.map(|(k, v)| (Nibbles::from(k), v)))
}
/// Consumes the next node in the trie, updating the stack.
fn consume_node(&mut self) -> Result<(), Error> {
let Some((key, node)) = self.node(false)? else {
// If no next node is found, clear the stack.
self.stack.clear();
return Ok(());
};
// Overwrite the root node's first nibble
// We need to sync the stack with the trie structure when consuming a new node. This is
// necessary for proper traversal and accurately representing the trie in the stack.
if !key.is_empty() && !self.stack.is_empty() {
self.stack[0].nibble = key[0] as i8;
}
// Create a new CursorSubNode and push it to the stack.
let subnode = CursorSubNode::new(key, Some(node));
let nibble = subnode.nibble;
self.stack.push(subnode);
self.update_skip_node();
// Delete the current node if it's included in the prefix set or it doesn't contain the root
// hash.
if !self.can_skip_current_node || nibble != -1 {
self.cursor.delete_current()?;
}
Ok(())
}
/// Moves to the next sibling node in the trie, updating the stack.
fn move_to_next_sibling(&mut self, allow_root_to_child_nibble: bool) -> Result<(), Error> {
let Some(subnode) = self.stack.last_mut() else {
return Ok(());
};
// Check if the walker needs to backtrack to the previous level in the trie during its
// traversal.
if subnode.nibble >= 15 || (subnode.nibble < 0 && !allow_root_to_child_nibble) {
self.stack.pop();
self.move_to_next_sibling(false)?;
return Ok(())
}
subnode.nibble += 1;
if subnode.node.is_none() {
return self.consume_node()
}
// Find the next sibling with state.
while subnode.nibble < 16 {
if subnode.state_flag() {
return Ok(())
}
subnode.nibble += 1;
}
// Pop the current node and move to the next sibling.
self.stack.pop();
self.move_to_next_sibling(false)?;
Ok(())
}
/// Returns the current key in the trie.
pub fn key(&self) -> Option<Nibbles> {
self.stack.last().map(|n| n.full_key())
}
/// Returns the current hash in the trie if any.
pub fn hash(&self) -> Option<H256> {
self.stack.last().and_then(|n| n.hash())
}
/// Indicates whether the children of the current node are present in the trie.
pub fn children_are_in_trie(&self) -> bool {
self.stack.last().map_or(false, |n| n.tree_flag())
}
/// Returns the next unprocessed key in the trie.
pub fn next_unprocessed_key(&self) -> Option<H256> {
self.key()
.as_ref()
.and_then(|key| {
if self.can_skip_current_node {
key.increment().map(|inc| inc.pack())
} else {
Some(key.pack())
}
})
.map(|mut key| {
key.resize(32, 0);
H256::from_slice(key.as_slice())
})
}
fn update_skip_node(&mut self) {
self.can_skip_current_node = if let Some(key) = self.key() {
let contains_prefix = self.changes.contains(key);
let hash_flag = self.stack.last().unwrap().hash_flag();
!contains_prefix && hash_flag
} else {
false
};
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{AccountTrieCursor, StorageTrieCursor};
use reth_db::{mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut};
use reth_provider::Transaction;
#[test]
fn walk_nodes_with_common_prefix() {
let inputs = vec![
(vec![0x5u8], BranchNodeCompact::new(0b1_0000_0101, 0b1_0000_0100, 0, vec![], None)),
(vec![0x5u8, 0x2, 0xC], BranchNodeCompact::new(0b1000_0111, 0, 0, vec![], None)),
(vec![0x5u8, 0x8], BranchNodeCompact::new(0b0110, 0b0100, 0, vec![], None)),
];
let expected = vec![
vec![0x5, 0x0],
// The [0x5, 0x2] prefix is shared by the first 2 nodes, however:
// 1. 0x2 for the first node points to the child node path
// 2. 0x2 for the second node is a key.
// So to proceed to add 1 and 3, we need to push the sibling first (0xC).
vec![0x5, 0x2],
vec![0x5, 0x2, 0xC, 0x0],
vec![0x5, 0x2, 0xC, 0x1],
vec![0x5, 0x2, 0xC, 0x2],
vec![0x5, 0x2, 0xC, 0x7],
vec![0x5, 0x8],
vec![0x5, 0x8, 0x1],
vec![0x5, 0x8, 0x2],
];
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let account_trie =
AccountTrieCursor::new(tx.cursor_write::<tables::AccountsTrie>().unwrap());
test_cursor(account_trie, &inputs, &expected);
let storage_trie = StorageTrieCursor::new(
tx.cursor_dup_write::<tables::StoragesTrie>().unwrap(),
H256::random(),
);
test_cursor(storage_trie, &inputs, &expected);
}
fn test_cursor<K, T>(mut trie: T, inputs: &[(Vec<u8>, BranchNodeCompact)], expected: &[Vec<u8>])
where
K: Key + From<Vec<u8>>,
T: TrieCursor<K>,
{
for (k, v) in inputs {
trie.upsert(k.clone().into(), v.clone()).unwrap();
}
let mut walker = TrieWalker::new(&mut trie, Default::default());
assert!(walker.key().unwrap().is_empty());
// We're traversing the path in lexigraphical order.
for expected in expected {
let got = walker.advance().unwrap();
assert_eq!(got.unwrap(), Nibbles::from(&expected[..]));
}
// There should be 8 paths traversed in total from 3 branches.
let got = walker.advance().unwrap();
assert!(got.is_none());
}
#[test]
fn cursor_rootnode_with_changesets() {
let db = create_test_rw_db();
let tx = Transaction::new(db.as_ref()).unwrap();
let mut trie = StorageTrieCursor::new(
tx.cursor_dup_write::<tables::StoragesTrie>().unwrap(),
H256::random(),
);
let nodes = vec![
(
vec![],
BranchNodeCompact::new(
// 2 and 4 are set
0b10100,
0b00100,
0,
vec![],
Some(H256::random()),
),
),
(
vec![0x2],
BranchNodeCompact::new(
// 1 is set
0b00010,
0,
0b00010,
vec![H256::random()],
None,
),
),
];
for (k, v) in nodes {
trie.upsert(k.into(), v).unwrap();
}
// No changes
let mut cursor = TrieWalker::new(&mut trie, Default::default());
assert_eq!(cursor.key(), Some(Nibbles::from(vec![]))); // root
assert!(cursor.can_skip_current_node); // due to root_hash
cursor.advance().unwrap(); // skips to the end of trie
assert_eq!(cursor.key(), None);
// We insert something that's not part of the existing trie/prefix.
let mut changed = PrefixSet::default();
changed.insert(&[0xF, 0x1]);
let mut cursor = TrieWalker::new(&mut trie, changed);
// Root node
assert_eq!(cursor.key(), Some(Nibbles::from(vec![])));
// Should not be able to skip state due to the changed values
assert!(!cursor.can_skip_current_node);
cursor.advance().unwrap();
assert_eq!(cursor.key(), Some(Nibbles::from(vec![0x2])));
cursor.advance().unwrap();
assert_eq!(cursor.key(), Some(Nibbles::from(vec![0x2, 0x1])));
cursor.advance().unwrap();
assert_eq!(cursor.key(), Some(Nibbles::from(vec![0x4])));
cursor.advance().unwrap();
assert_eq!(cursor.key(), None); // the end of trie
}
}