feat(storage): add with_default_tables() to register RocksDB column families at initialization (#20416)

This commit is contained in:
Federico Gimenez
2025-12-16 13:59:58 +01:00
committed by GitHub
parent b6772370d7
commit 49057b1c0c
5 changed files with 52 additions and 5 deletions

View File

@@ -112,6 +112,7 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
};
// TransactionDB only support read-write mode
let rocksdb_provider = RocksDBProvider::builder(data_dir.rocksdb())
.with_default_tables()
.with_database_log_level(self.db.log_level)
.build()?;

View File

@@ -485,8 +485,9 @@ where
.with_blocks_per_file_for_segments(static_files_config.as_blocks_per_file_map())
.build()?;
// Initialize RocksDB provider with metrics and statistics enabled
// Initialize RocksDB provider with metrics, statistics, and default tables
let rocksdb_provider = RocksDBProvider::builder(self.data_dir().rocksdb())
.with_default_tables()
.with_metrics()
.with_statistics()
.build()?;

View File

@@ -109,7 +109,7 @@ impl<N> ProviderFactoryBuilder<N> {
self.db(Arc::new(open_db_read_only(db_dir, db_args)?))
.chainspec(chainspec)
.static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?)
.rocksdb_provider(RocksDBProvider::builder(&rocksdb_dir).build()?)
.rocksdb_provider(RocksDBProvider::builder(&rocksdb_dir).with_default_tables().build()?)
.build_provider_factory()
.map_err(Into::into)
}

View File

@@ -1,7 +1,7 @@
use super::metrics::{RocksDBMetrics, RocksDBOperation};
use reth_db_api::{
table::{Compress, Decompress, Encode, Table},
DatabaseError,
tables, DatabaseError,
};
use reth_storage_errors::{
db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation, LogLevel},
@@ -143,6 +143,18 @@ impl RocksDBBuilder {
self
}
/// Registers the default tables used by reth for `RocksDB` storage.
///
/// This registers:
/// - [`tables::TransactionHashNumbers`] - Transaction hash to number mapping
/// - [`tables::AccountsHistory`] - Account history index
/// - [`tables::StoragesHistory`] - Storage history index
pub fn with_default_tables(self) -> Self {
self.with_table::<tables::TransactionHashNumbers>()
.with_table::<tables::AccountsHistory>()
.with_table::<tables::StoragesHistory>()
}
/// Enables metrics.
pub const fn with_metrics(mut self) -> Self {
self.enable_metrics = true;
@@ -630,10 +642,38 @@ const fn convert_log_level(level: LogLevel) -> rocksdb::LogLevel {
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{TxHash, B256};
use reth_db_api::{table::Table, tables};
use alloy_primitives::{Address, TxHash, B256};
use reth_db_api::{
models::{sharded_key::ShardedKey, storage_sharded_key::StorageShardedKey, IntegerList},
table::Table,
tables,
};
use tempfile::TempDir;
#[test]
fn test_with_default_tables_registers_required_column_families() {
let temp_dir = TempDir::new().unwrap();
// Build with default tables
let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap();
// Should be able to write/read TransactionHashNumbers
let tx_hash = TxHash::from(B256::from([1u8; 32]));
provider.put::<tables::TransactionHashNumbers>(tx_hash, &100).unwrap();
assert_eq!(provider.get::<tables::TransactionHashNumbers>(tx_hash).unwrap(), Some(100));
// Should be able to write/read AccountsHistory
let key = ShardedKey::new(Address::ZERO, 100);
let value = IntegerList::default();
provider.put::<tables::AccountsHistory>(key.clone(), &value).unwrap();
assert!(provider.get::<tables::AccountsHistory>(key).unwrap().is_some());
// Should be able to write/read StoragesHistory
let key = StorageShardedKey::new(Address::ZERO, B256::ZERO, 100);
provider.put::<tables::StoragesHistory>(key.clone(), &value).unwrap();
assert!(provider.get::<tables::StoragesHistory>(key).unwrap().is_some());
}
#[derive(Debug)]
struct TestTable;

View File

@@ -119,6 +119,11 @@ impl RocksDBBuilder {
self
}
/// Registers the default tables used by reth for `RocksDB` storage (stub implementation).
pub const fn with_default_tables(self) -> Self {
self
}
/// Enables metrics (stub implementation).
pub const fn with_metrics(self) -> Self {
self