test(storage): add parametrized MDBX/RocksDB history lookup equivalence tests (#20871)

This commit is contained in:
YK
2026-01-15 19:16:40 +08:00
committed by GitHub
parent b25f32a977
commit 9bcd3712c8
4 changed files with 515 additions and 180 deletions

View File

@@ -10,7 +10,7 @@ use std::{
#[cfg(all(unix, feature = "rocksdb"))]
use crate::providers::rocksdb::RocksDBBatch;
use crate::{
providers::{StaticFileProvider, StaticFileProviderRWRefMut},
providers::{history_info, HistoryInfo, StaticFileProvider, StaticFileProviderRWRefMut},
StaticFileProviderFactory,
};
use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber};
@@ -708,7 +708,7 @@ impl<CURSOR, N: NodePrimitives> EitherReader<'_, CURSOR, N>
where
CURSOR: DbCursorRO<tables::StoragesHistory>,
{
/// Gets a storage history entry.
/// Gets a storage history shard entry for the given [`StorageShardedKey`], if present.
pub fn get_storage_history(
&mut self,
key: StorageShardedKey,
@@ -720,13 +720,43 @@ where
Self::RocksDB(tx) => tx.get::<tables::StoragesHistory>(key),
}
}
/// Lookup storage history and return [`HistoryInfo`].
pub fn storage_history_info(
&mut self,
address: Address,
storage_key: alloy_primitives::B256,
block_number: BlockNumber,
lowest_available_block_number: Option<BlockNumber>,
) -> ProviderResult<HistoryInfo> {
match self {
Self::Database(cursor, _) => {
let key = StorageShardedKey::new(address, storage_key, block_number);
history_info::<tables::StoragesHistory, _, _>(
cursor,
key,
block_number,
|k| k.address == address && k.sharded_key.key == storage_key,
lowest_available_block_number,
)
}
Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider),
#[cfg(all(unix, feature = "rocksdb"))]
Self::RocksDB(tx) => tx.storage_history_info(
address,
storage_key,
block_number,
lowest_available_block_number,
),
}
}
}
impl<CURSOR, N: NodePrimitives> EitherReader<'_, CURSOR, N>
where
CURSOR: DbCursorRO<tables::AccountsHistory>,
{
/// Gets an account history entry.
/// Gets an account history shard entry for the given [`ShardedKey`], if present.
pub fn get_account_history(
&mut self,
key: ShardedKey<Address>,
@@ -738,6 +768,32 @@ where
Self::RocksDB(tx) => tx.get::<tables::AccountsHistory>(key),
}
}
/// Lookup account history and return [`HistoryInfo`].
pub fn account_history_info(
&mut self,
address: Address,
block_number: BlockNumber,
lowest_available_block_number: Option<BlockNumber>,
) -> ProviderResult<HistoryInfo> {
match self {
Self::Database(cursor, _) => {
let key = ShardedKey::new(address, block_number);
history_info::<tables::AccountsHistory, _, _>(
cursor,
key,
block_number,
|k| k.key == address,
lowest_available_block_number,
)
}
Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider),
#[cfg(all(unix, feature = "rocksdb"))]
Self::RocksDB(tx) => {
tx.account_history_info(address, block_number, lowest_available_block_number)
}
}
}
}
impl<CURSOR, N: NodePrimitives> EitherReader<'_, CURSOR, N>
@@ -894,8 +950,11 @@ mod rocksdb_tests {
use reth_db_api::{
models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
tables,
transaction::DbTxMut,
};
use reth_ethereum_primitives::EthPrimitives;
use reth_storage_api::{DatabaseProviderFactory, StorageSettings};
use std::marker::PhantomData;
use tempfile::TempDir;
fn create_rocksdb_provider() -> (TempDir, RocksDBProvider) {
@@ -1125,10 +1184,391 @@ mod rocksdb_tests {
assert_eq!(provider.get::<tables::AccountsHistory>(key).unwrap(), None);
}
/// Test that `RocksDB` commits happen at `provider.commit()` level, not at writer level.
// ==================== Parametrized Backend Equivalence Tests ====================
//
// These tests verify that MDBX and RocksDB produce identical results for history lookups.
// Each scenario sets up the same data in both backends and asserts identical HistoryInfo.
/// Query parameters for a history lookup test case.
struct HistoryQuery {
block_number: BlockNumber,
lowest_available: Option<BlockNumber>,
expected: HistoryInfo,
}
// Type aliases for cursor types (needed for EitherWriter/EitherReader type inference)
type AccountsHistoryWriteCursor =
reth_db::mdbx::cursor::Cursor<reth_db::mdbx::RW, tables::AccountsHistory>;
type StoragesHistoryWriteCursor =
reth_db::mdbx::cursor::Cursor<reth_db::mdbx::RW, tables::StoragesHistory>;
type AccountsHistoryReadCursor =
reth_db::mdbx::cursor::Cursor<reth_db::mdbx::RO, tables::AccountsHistory>;
type StoragesHistoryReadCursor =
reth_db::mdbx::cursor::Cursor<reth_db::mdbx::RO, tables::StoragesHistory>;
/// Runs the same account history queries against both MDBX and `RocksDB` backends,
/// asserting they produce identical results.
fn run_account_history_scenario(
scenario_name: &str,
address: Address,
shards: &[(BlockNumber, Vec<BlockNumber>)], // (shard_highest_block, blocks_in_shard)
queries: &[HistoryQuery],
) {
// Setup MDBX and RocksDB with identical data using EitherWriter
let factory = create_test_provider_factory();
let mdbx_provider = factory.database_provider_rw().unwrap();
let (temp_dir, rocks_provider) = create_rocksdb_provider();
// Create writers for both backends
let mut mdbx_writer: EitherWriter<'_, AccountsHistoryWriteCursor, EthPrimitives> =
EitherWriter::Database(
mdbx_provider.tx_ref().cursor_write::<tables::AccountsHistory>().unwrap(),
);
let mut rocks_writer: EitherWriter<'_, AccountsHistoryWriteCursor, EthPrimitives> =
EitherWriter::RocksDB(rocks_provider.batch());
// Write identical data to both backends in a single loop
for (highest_block, blocks) in shards {
let key = ShardedKey::new(address, *highest_block);
let value = IntegerList::new(blocks.clone()).unwrap();
mdbx_writer.put_account_history(key.clone(), &value).unwrap();
rocks_writer.put_account_history(key, &value).unwrap();
}
// Commit both backends
drop(mdbx_writer);
mdbx_provider.commit().unwrap();
if let EitherWriter::RocksDB(batch) = rocks_writer {
batch.commit().unwrap();
}
// Run queries against both backends using EitherReader
let mdbx_ro = factory.database_provider_ro().unwrap();
let rocks_tx = rocks_provider.tx();
for (i, query) in queries.iter().enumerate() {
// MDBX query via EitherReader
let mut mdbx_reader: EitherReader<'_, AccountsHistoryReadCursor, EthPrimitives> =
EitherReader::Database(
mdbx_ro.tx_ref().cursor_read::<tables::AccountsHistory>().unwrap(),
PhantomData,
);
let mdbx_result = mdbx_reader
.account_history_info(address, query.block_number, query.lowest_available)
.unwrap();
// RocksDB query via EitherReader
let mut rocks_reader: EitherReader<'_, AccountsHistoryReadCursor, EthPrimitives> =
EitherReader::RocksDB(&rocks_tx);
let rocks_result = rocks_reader
.account_history_info(address, query.block_number, query.lowest_available)
.unwrap();
// Assert both backends produce identical results
assert_eq!(
mdbx_result,
rocks_result,
"Backend mismatch in scenario '{}' query {}: block={}, lowest={:?}\n\
MDBX: {:?}, RocksDB: {:?}",
scenario_name,
i,
query.block_number,
query.lowest_available,
mdbx_result,
rocks_result
);
// Also verify against expected result
assert_eq!(
mdbx_result,
query.expected,
"Unexpected result in scenario '{}' query {}: block={}, lowest={:?}\n\
Got: {:?}, Expected: {:?}",
scenario_name,
i,
query.block_number,
query.lowest_available,
mdbx_result,
query.expected
);
}
rocks_tx.rollback().unwrap();
drop(temp_dir);
}
/// Runs the same storage history queries against both MDBX and `RocksDB` backends,
/// asserting they produce identical results.
fn run_storage_history_scenario(
scenario_name: &str,
address: Address,
storage_key: B256,
shards: &[(BlockNumber, Vec<BlockNumber>)], // (shard_highest_block, blocks_in_shard)
queries: &[HistoryQuery],
) {
// Setup MDBX and RocksDB with identical data using EitherWriter
let factory = create_test_provider_factory();
let mdbx_provider = factory.database_provider_rw().unwrap();
let (temp_dir, rocks_provider) = create_rocksdb_provider();
// Create writers for both backends
let mut mdbx_writer: EitherWriter<'_, StoragesHistoryWriteCursor, EthPrimitives> =
EitherWriter::Database(
mdbx_provider.tx_ref().cursor_write::<tables::StoragesHistory>().unwrap(),
);
let mut rocks_writer: EitherWriter<'_, StoragesHistoryWriteCursor, EthPrimitives> =
EitherWriter::RocksDB(rocks_provider.batch());
// Write identical data to both backends in a single loop
for (highest_block, blocks) in shards {
let key = StorageShardedKey::new(address, storage_key, *highest_block);
let value = IntegerList::new(blocks.clone()).unwrap();
mdbx_writer.put_storage_history(key.clone(), &value).unwrap();
rocks_writer.put_storage_history(key, &value).unwrap();
}
// Commit both backends
drop(mdbx_writer);
mdbx_provider.commit().unwrap();
if let EitherWriter::RocksDB(batch) = rocks_writer {
batch.commit().unwrap();
}
// Run queries against both backends using EitherReader
let mdbx_ro = factory.database_provider_ro().unwrap();
let rocks_tx = rocks_provider.tx();
for (i, query) in queries.iter().enumerate() {
// MDBX query via EitherReader
let mut mdbx_reader: EitherReader<'_, StoragesHistoryReadCursor, EthPrimitives> =
EitherReader::Database(
mdbx_ro.tx_ref().cursor_read::<tables::StoragesHistory>().unwrap(),
PhantomData,
);
let mdbx_result = mdbx_reader
.storage_history_info(
address,
storage_key,
query.block_number,
query.lowest_available,
)
.unwrap();
// RocksDB query via EitherReader
let mut rocks_reader: EitherReader<'_, StoragesHistoryReadCursor, EthPrimitives> =
EitherReader::RocksDB(&rocks_tx);
let rocks_result = rocks_reader
.storage_history_info(
address,
storage_key,
query.block_number,
query.lowest_available,
)
.unwrap();
// Assert both backends produce identical results
assert_eq!(
mdbx_result,
rocks_result,
"Backend mismatch in scenario '{}' query {}: block={}, lowest={:?}\n\
MDBX: {:?}, RocksDB: {:?}",
scenario_name,
i,
query.block_number,
query.lowest_available,
mdbx_result,
rocks_result
);
// Also verify against expected result
assert_eq!(
mdbx_result,
query.expected,
"Unexpected result in scenario '{}' query {}: block={}, lowest={:?}\n\
Got: {:?}, Expected: {:?}",
scenario_name,
i,
query.block_number,
query.lowest_available,
mdbx_result,
query.expected
);
}
rocks_tx.rollback().unwrap();
drop(temp_dir);
}
/// Tests account history lookups across both MDBX and `RocksDB` backends.
///
/// This ensures all storage commits (MDBX, static files, `RocksDB`) happen atomically
/// in a single place, making it easier to reason about commit ordering and consistency.
/// Covers the following scenarios from PR2's `RocksDB`-only tests:
/// 1. Single shard - basic lookups within one shard
/// 2. Multiple shards - `prev()` shard detection and transitions
/// 3. No history - query address with no entries
/// 4. Pruning boundary - `lowest_available` boundary behavior (block at/after boundary)
#[test]
fn test_account_history_info_both_backends() {
let address = Address::from([0x42; 20]);
// Scenario 1: Single shard with blocks [100, 200, 300]
run_account_history_scenario(
"single_shard",
address,
&[(u64::MAX, vec![100, 200, 300])],
&[
// Before first entry -> NotYetWritten
HistoryQuery {
block_number: 50,
lowest_available: None,
expected: HistoryInfo::NotYetWritten,
},
// Between entries -> InChangeset(next_write)
HistoryQuery {
block_number: 150,
lowest_available: None,
expected: HistoryInfo::InChangeset(200),
},
// Exact match on entry -> InChangeset(same_block)
HistoryQuery {
block_number: 300,
lowest_available: None,
expected: HistoryInfo::InChangeset(300),
},
// After last entry in last shard -> InPlainState
HistoryQuery {
block_number: 500,
lowest_available: None,
expected: HistoryInfo::InPlainState,
},
],
);
// Scenario 2: Multiple shards - tests prev() shard detection
run_account_history_scenario(
"multiple_shards",
address,
&[
(500, vec![100, 200, 300, 400, 500]), // First shard ends at 500
(u64::MAX, vec![600, 700, 800]), // Last shard
],
&[
// Before first shard, no prev -> NotYetWritten
HistoryQuery {
block_number: 50,
lowest_available: None,
expected: HistoryInfo::NotYetWritten,
},
// Within first shard
HistoryQuery {
block_number: 150,
lowest_available: None,
expected: HistoryInfo::InChangeset(200),
},
// Between shards - prev() should find first shard
HistoryQuery {
block_number: 550,
lowest_available: None,
expected: HistoryInfo::InChangeset(600),
},
// After all entries
HistoryQuery {
block_number: 900,
lowest_available: None,
expected: HistoryInfo::InPlainState,
},
],
);
// Scenario 3: No history for address
let address_without_history = Address::from([0x43; 20]);
run_account_history_scenario(
"no_history",
address_without_history,
&[], // No shards for this address
&[HistoryQuery {
block_number: 150,
lowest_available: None,
expected: HistoryInfo::NotYetWritten,
}],
);
// Scenario 4: Query at pruning boundary
// Note: We test block >= lowest_available because HistoricalStateProviderRef
// errors on blocks below the pruning boundary before doing the lookup.
// The RocksDB implementation doesn't have this check at the same level.
// This tests that when pruning IS available, both backends agree.
run_account_history_scenario(
"with_pruning_boundary",
address,
&[(u64::MAX, vec![100, 200, 300])],
&[
// At pruning boundary -> InChangeset(first entry after block)
HistoryQuery {
block_number: 100,
lowest_available: Some(100),
expected: HistoryInfo::InChangeset(100),
},
// After pruning boundary, between entries
HistoryQuery {
block_number: 150,
lowest_available: Some(100),
expected: HistoryInfo::InChangeset(200),
},
],
);
}
/// Tests storage history lookups across both MDBX and `RocksDB` backends.
#[test]
fn test_storage_history_info_both_backends() {
let address = Address::from([0x42; 20]);
let storage_key = B256::from([0x01; 32]);
let other_storage_key = B256::from([0x02; 32]);
// Single shard with blocks [100, 200, 300]
run_storage_history_scenario(
"storage_single_shard",
address,
storage_key,
&[(u64::MAX, vec![100, 200, 300])],
&[
// Before first entry -> NotYetWritten
HistoryQuery {
block_number: 50,
lowest_available: None,
expected: HistoryInfo::NotYetWritten,
},
// Between entries -> InChangeset(next_write)
HistoryQuery {
block_number: 150,
lowest_available: None,
expected: HistoryInfo::InChangeset(200),
},
// After last entry -> InPlainState
HistoryQuery {
block_number: 500,
lowest_available: None,
expected: HistoryInfo::InPlainState,
},
],
);
// No history for different storage key
run_storage_history_scenario(
"storage_no_history",
address,
other_storage_key,
&[], // No shards for this storage key
&[HistoryQuery {
block_number: 150,
lowest_available: None,
expected: HistoryInfo::NotYetWritten,
}],
);
}
/// Test that `RocksDB` batches created via `EitherWriter` are only made visible when
/// `provider.commit()` is called, not when the writer is dropped.
#[test]
fn test_rocksdb_commits_at_provider_level() {
let factory = create_test_provider_factory();

View File

@@ -16,8 +16,8 @@ pub use static_file::{
mod state;
pub use state::{
historical::{
needs_prev_shard_check, HistoricalStateProvider, HistoricalStateProviderRef, HistoryInfo,
LowestAvailableBlocks,
history_info, needs_prev_shard_check, HistoricalStateProvider, HistoricalStateProviderRef,
HistoryInfo, LowestAvailableBlocks,
},
latest::{LatestStateProvider, LatestStateProviderRef},
overlay::{OverlayStateProvider, OverlayStateProviderFactory},

View File

@@ -1272,101 +1272,9 @@ mod tests {
assert_eq!(last, Some((20, b"value_20".to_vec())));
}
#[test]
fn test_account_history_info_single_shard() {
let temp_dir = TempDir::new().unwrap();
let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let address = Address::from([0x42; 20]);
// Create a single shard with blocks [100, 200, 300] and highest_block = u64::MAX
// This is the "last shard" invariant
let chunk = IntegerList::new([100, 200, 300]).unwrap();
let shard_key = ShardedKey::new(address, u64::MAX);
provider.put::<tables::AccountsHistory>(shard_key, &chunk).unwrap();
let tx = provider.tx();
// Query for block 150: should find block 200 in changeset
let result = tx.account_history_info(address, 150, None).unwrap();
assert_eq!(result, HistoryInfo::InChangeset(200));
// Query for block 50: should return NotYetWritten (before first entry, no prev shard)
let result = tx.account_history_info(address, 50, None).unwrap();
assert_eq!(result, HistoryInfo::NotYetWritten);
// Query for block 300: should return InChangeset(300) - exact match means look at
// changeset at that block for the previous value
let result = tx.account_history_info(address, 300, None).unwrap();
assert_eq!(result, HistoryInfo::InChangeset(300));
// Query for block 500: should return InPlainState (after last entry in last shard)
let result = tx.account_history_info(address, 500, None).unwrap();
assert_eq!(result, HistoryInfo::InPlainState);
tx.rollback().unwrap();
}
#[test]
fn test_account_history_info_multiple_shards() {
let temp_dir = TempDir::new().unwrap();
let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let address = Address::from([0x42; 20]);
// Create two shards: first shard ends at block 500, second is the last shard
let chunk1 = IntegerList::new([100, 200, 300, 400, 500]).unwrap();
let shard_key1 = ShardedKey::new(address, 500);
provider.put::<tables::AccountsHistory>(shard_key1, &chunk1).unwrap();
let chunk2 = IntegerList::new([600, 700, 800]).unwrap();
let shard_key2 = ShardedKey::new(address, u64::MAX);
provider.put::<tables::AccountsHistory>(shard_key2, &chunk2).unwrap();
let tx = provider.tx();
// Query for block 50: should return NotYetWritten (before first shard, no prev)
let result = tx.account_history_info(address, 50, None).unwrap();
assert_eq!(result, HistoryInfo::NotYetWritten);
// Query for block 150: should find block 200 in first shard's changeset
let result = tx.account_history_info(address, 150, None).unwrap();
assert_eq!(result, HistoryInfo::InChangeset(200));
// Query for block 550: should find block 600 in second shard's changeset
// prev() should detect first shard exists
let result = tx.account_history_info(address, 550, None).unwrap();
assert_eq!(result, HistoryInfo::InChangeset(600));
// Query for block 900: should return InPlainState (after last entry in last shard)
let result = tx.account_history_info(address, 900, None).unwrap();
assert_eq!(result, HistoryInfo::InPlainState);
tx.rollback().unwrap();
}
#[test]
fn test_account_history_info_no_history() {
let temp_dir = TempDir::new().unwrap();
let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let address1 = Address::from([0x42; 20]);
let address2 = Address::from([0x43; 20]);
// Only add history for address1
let chunk = IntegerList::new([100, 200, 300]).unwrap();
let shard_key = ShardedKey::new(address1, u64::MAX);
provider.put::<tables::AccountsHistory>(shard_key, &chunk).unwrap();
let tx = provider.tx();
// Query for address2 (no history exists): should return NotYetWritten
let result = tx.account_history_info(address2, 150, None).unwrap();
assert_eq!(result, HistoryInfo::NotYetWritten);
tx.rollback().unwrap();
}
/// Tests the edge case where block < `lowest_available_block_number`.
/// This case cannot be tested via `HistoricalStateProviderRef` (which errors before lookup),
/// so we keep this RocksDB-specific test to verify the low-level behavior.
#[test]
fn test_account_history_info_pruned_before_first_entry() {
let temp_dir = TempDir::new().unwrap();
@@ -1390,39 +1298,4 @@ mod tests {
tx.rollback().unwrap();
}
#[test]
fn test_storage_history_info() {
let temp_dir = TempDir::new().unwrap();
let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
let address = Address::from([0x42; 20]);
let storage_key = B256::from([0x01; 32]);
// Create a single shard for this storage slot
let chunk = IntegerList::new([100, 200, 300]).unwrap();
let shard_key = StorageShardedKey::new(address, storage_key, u64::MAX);
provider.put::<tables::StoragesHistory>(shard_key, &chunk).unwrap();
let tx = provider.tx();
// Query for block 150: should find block 200 in changeset
let result = tx.storage_history_info(address, storage_key, 150, None).unwrap();
assert_eq!(result, HistoryInfo::InChangeset(200));
// Query for block 50: should return NotYetWritten
let result = tx.storage_history_info(address, storage_key, 50, None).unwrap();
assert_eq!(result, HistoryInfo::NotYetWritten);
// Query for block 500: should return InPlainState
let result = tx.storage_history_info(address, storage_key, 500, None).unwrap();
assert_eq!(result, HistoryInfo::InPlainState);
// Query for different storage key (no history): should return NotYetWritten
let other_key = B256::from([0x02; 32]);
let result = tx.storage_history_info(address, other_key, 150, None).unwrap();
assert_eq!(result, HistoryInfo::NotYetWritten);
tx.rollback().unwrap();
}
}

View File

@@ -135,7 +135,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader>
// history key to search IntegerList of block number changesets.
let history_key = ShardedKey::new(address, self.block_number);
self.history_info::<tables::AccountsHistory, _>(
self.history_info_lookup::<tables::AccountsHistory, _>(
history_key,
|key| key.key == address,
self.lowest_available_blocks.account_history_block_number,
@@ -154,7 +154,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader>
// history key to search IntegerList of block number changesets.
let history_key = StorageShardedKey::new(address, storage_key, self.block_number);
self.history_info::<tables::StoragesHistory, _>(
self.history_info_lookup::<tables::StoragesHistory, _>(
history_key,
|key| key.address == address && key.sharded_key.key == storage_key,
self.lowest_available_blocks.storage_history_block_number,
@@ -204,7 +204,7 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader>
Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?)
}
fn history_info<T, K>(
fn history_info_lookup<T, K>(
&self,
key: K,
key_filter: impl Fn(&K) -> bool,
@@ -214,45 +214,13 @@ impl<'b, Provider: DBProvider + ChangeSetReader + BlockNumReader>
T: Table<Key = K, Value = BlockNumberList>,
{
let mut cursor = self.tx().cursor_read::<T>()?;
// Lookup the history chunk in the history index. If the key does not appear in the
// index, the first chunk for the next key will be returned so we filter out chunks that
// have a different key.
if let Some(chunk) = cursor.seek(key)?.filter(|(key, _)| key_filter(key)).map(|x| x.1) {
// Get the rank of the first entry before or equal to our block.
let mut rank = chunk.rank(self.block_number);
// Adjust the rank, so that we have the rank of the first entry strictly before our
// block (not equal to it).
if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(self.block_number) {
rank -= 1;
}
let found_block = chunk.select(rank);
// If our block is before the first entry in the index chunk and this first entry
// doesn't equal to our block, it might be before the first write ever. To check, we
// look at the previous entry and check if the key is the same.
// This check is worth it, the `cursor.prev()` check is rarely triggered (the if will
// short-circuit) and when it passes we save a full seek into the changeset/plain state
// table.
let is_before_first_write =
needs_prev_shard_check(rank, found_block, self.block_number) &&
!cursor.prev()?.is_some_and(|(key, _)| key_filter(&key));
Ok(HistoryInfo::from_lookup(
found_block,
is_before_first_write,
lowest_available_block_number,
))
} else if lowest_available_block_number.is_some() {
// The key may have been written, but due to pruning we may not have changesets and
// history, so we need to make a plain state lookup.
Ok(HistoryInfo::MaybeInPlainState)
} else {
// The key has not been written to at all.
Ok(HistoryInfo::NotYetWritten)
}
history_info::<T, K, _>(
&mut cursor,
key,
self.block_number,
key_filter,
lowest_available_block_number,
)
}
/// Set the lowest block number at which the account history is available.
@@ -570,6 +538,60 @@ pub fn needs_prev_shard_check(
rank == 0 && found_block != Some(block_number)
}
/// Generic history lookup for sharded history tables.
///
/// Seeks to the shard containing `block_number`, verifies the key via `key_filter`,
/// and checks previous shard to detect if we're before the first write.
pub fn history_info<T, K, C>(
cursor: &mut C,
key: K,
block_number: BlockNumber,
key_filter: impl Fn(&K) -> bool,
lowest_available_block_number: Option<BlockNumber>,
) -> ProviderResult<HistoryInfo>
where
T: Table<Key = K, Value = BlockNumberList>,
C: DbCursorRO<T>,
{
// Lookup the history chunk in the history index. If the key does not appear in the
// index, the first chunk for the next key will be returned so we filter out chunks that
// have a different key.
if let Some(chunk) = cursor.seek(key)?.filter(|(k, _)| key_filter(k)).map(|x| x.1) {
// Get the rank of the first entry before or equal to our block.
let mut rank = chunk.rank(block_number);
// Adjust the rank, so that we have the rank of the first entry strictly before our
// block (not equal to it).
if rank.checked_sub(1).and_then(|r| chunk.select(r)) == Some(block_number) {
rank -= 1;
}
let found_block = chunk.select(rank);
// If our block is before the first entry in the index chunk and this first entry
// doesn't equal to our block, it might be before the first write ever. To check, we
// look at the previous entry and check if the key is the same.
// This check is worth it, the `cursor.prev()` check is rarely triggered (the if will
// short-circuit) and when it passes we save a full seek into the changeset/plain state
// table.
let is_before_first_write = needs_prev_shard_check(rank, found_block, block_number) &&
!cursor.prev()?.is_some_and(|(k, _)| key_filter(&k));
Ok(HistoryInfo::from_lookup(
found_block,
is_before_first_write,
lowest_available_block_number,
))
} else if lowest_available_block_number.is_some() {
// The key may have been written, but due to pruning we may not have changesets and
// history, so we need to make a plain state lookup.
Ok(HistoryInfo::MaybeInPlainState)
} else {
// The key has not been written to at all.
Ok(HistoryInfo::NotYetWritten)
}
}
#[cfg(test)]
mod tests {
use super::needs_prev_shard_check;