From df43f5de50b55ec1abf65a1831fb46a9e576c813 Mon Sep 17 00:00:00 2001 From: yongkangc Date: Tue, 6 Jan 2026 02:33:01 +0000 Subject: [PATCH] bench: add trie write batching micro-benchmark Adds benchmark to measure the performance improvement from the locality-aware storage trie write batching optimization. --- Cargo.lock | 1 + crates/trie/db/Cargo.toml | 5 + crates/trie/db/benches/trie_write_batching.rs | 165 ++++++++++++++++++ 3 files changed, 171 insertions(+) create mode 100644 crates/trie/db/benches/trie_write_batching.rs diff --git a/Cargo.lock b/Cargo.lock index adf941b50e..e5abd02a54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11098,6 +11098,7 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", + "codspeed-criterion-compat", "proptest", "proptest-arbitrary-interop", "reth-chainspec", diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 09ccd30119..432ef7d039 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -46,6 +46,11 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true serde_json.workspace = true similar-asserts.workspace = true +criterion.workspace = true + +[[bench]] +name = "trie_write_batching" +harness = false [features] metrics = ["reth-trie/metrics"] diff --git a/crates/trie/db/benches/trie_write_batching.rs b/crates/trie/db/benches/trie_write_batching.rs new file mode 100644 index 0000000000..e84a746a3a --- /dev/null +++ b/crates/trie/db/benches/trie_write_batching.rs @@ -0,0 +1,165 @@ +#![allow(missing_docs)] + +//! Benchmark for storage trie write batching optimization. +//! +//! This benchmark compares write performance using: +//! 1. The optimized implementation (locality-aware batched writes) +//! 2. A baseline that seeks for every update +//! +//! The optimization exploits the fact that storage trie updates are sorted, +//! so we can use O(1) next_dup operations instead of O(log N) seek operations. + +use alloy_primitives::B256; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use reth_db_api::{ + cursor::{DbCursorRW, DbDupCursorRO}, + tables, + transaction::DbTxMut, +}; +use reth_provider::test_utils::create_test_provider_factory; +use reth_trie::{ + updates::StorageTrieUpdatesSorted, BranchNodeCompact, Nibbles, StorageTrieEntry, + StoredNibblesSubKey, +}; +use reth_trie_db::DatabaseStorageTrieCursor; +use std::hint::black_box as bb; + +/// Number of entries to update for benchmarking +const UPDATE_COUNTS: &[usize] = &[100, 500, 1000]; + +/// Generate sorted nibbles keys for StoragesTrie table +fn generate_sorted_nibbles_keys(count: usize) -> Vec { + let mut keys: Vec = (0..count as u64) + .map(|i| { + let mut bytes = [0u8; 8]; + bytes.copy_from_slice(&i.to_be_bytes()); + Nibbles::unpack(&bytes) + }) + .collect(); + keys.sort(); + keys +} + +/// Create storage trie updates for benchmarking +fn create_storage_updates(keys: &[Nibbles]) -> StorageTrieUpdatesSorted { + let storage_nodes: Vec<(Nibbles, Option)> = keys + .iter() + .map(|key| { + let node = BranchNodeCompact::new(0b1111, 0b0011, 0, vec![], None); + (*key, Some(node)) + }) + .collect(); + StorageTrieUpdatesSorted { is_deleted: false, storage_nodes } +} + +/// Benchmark storage trie write batching optimization +fn bench_storage_trie_writes(c: &mut Criterion) { + let mut group = c.benchmark_group("StoragesTrie Write Batching"); + + for &count in UPDATE_COUNTS { + let keys = generate_sorted_nibbles_keys(count); + let updates = create_storage_updates(&keys); + + // Benchmark: Optimized batched writes with locality optimization + group.bench_with_input( + BenchmarkId::new("optimized_batched_write", count), + &count, + |b, _| { + b.iter(|| { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + let hashed_address = B256::random(); + + // Pre-populate with some existing entries + { + let mut cursor = + provider.tx_ref().cursor_dup_write::().unwrap(); + for key in keys.iter().step_by(2) { + let node = BranchNodeCompact::new(0b1010, 0b1010, 0, vec![], None); + cursor + .upsert( + hashed_address, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(*key), + node, + }, + ) + .unwrap(); + } + } + + // Perform the optimized write + let mut cursor = DatabaseStorageTrieCursor::new( + provider.tx_ref().cursor_dup_write::().unwrap(), + hashed_address, + ); + let _ = bb(cursor.write_storage_trie_updates_sorted(bb(&updates))); + }); + }, + ); + + // Benchmark: Baseline that seeks for every update + group.bench_with_input( + BenchmarkId::new("baseline_seek_per_update", count), + &count, + |b, _| { + b.iter(|| { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + let hashed_address = B256::random(); + + // Pre-populate with some existing entries + { + let mut cursor = + provider.tx_ref().cursor_dup_write::().unwrap(); + for key in keys.iter().step_by(2) { + let node = BranchNodeCompact::new(0b1010, 0b1010, 0, vec![], None); + cursor + .upsert( + hashed_address, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(*key), + node, + }, + ) + .unwrap(); + } + } + + // Perform writes with seek per update (baseline) + let mut cursor = + provider.tx_ref().cursor_dup_write::().unwrap(); + + for (nibbles, maybe_updated) in + updates.storage_nodes.iter().filter(|(n, _)| !n.is_empty()) + { + let target = StoredNibblesSubKey(*nibbles); + // Baseline: always seek + if cursor + .seek_by_key_subkey(hashed_address, target.clone()) + .unwrap() + .filter(|e| e.nibbles == target) + .is_some() + { + cursor.delete_current().unwrap(); + } + + if let Some(node) = maybe_updated { + cursor + .upsert( + hashed_address, + &StorageTrieEntry { nibbles: target, node: node.clone() }, + ) + .unwrap(); + } + } + }); + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_storage_trie_writes); +criterion_main!(benches);