feat: add Metadata table and StorageSettings to ProviderFactory (#19384)

This commit is contained in:
joshieDo
2025-11-06 00:39:49 +00:00
committed by GitHub
parent e3b38b2de5
commit e20e56b75e
23 changed files with 265 additions and 68 deletions

1
Cargo.lock generated
View File

@@ -10567,6 +10567,7 @@ dependencies = [
"reth-storage-errors",
"reth-trie-common",
"revm-database",
"serde_json",
]
[[package]]

View File

@@ -132,7 +132,7 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
db,
self.chain.clone(),
static_file_provider,
)
)?
.with_prune_modes(prune_modes.clone());
// Check for consistency between database and static files.

View File

@@ -42,7 +42,7 @@ where
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
)?,
to,
from,
evm_config,

View File

@@ -39,7 +39,7 @@ pub(crate) async fn dump_hashing_account_stage<N: ProviderNodeTypes<DB = Arc<Dat
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
)?,
to,
from,
)?;

View File

@@ -29,7 +29,7 @@ pub(crate) async fn dump_hashing_storage_stage<N: ProviderNodeTypes<DB = Arc<Dat
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
)?,
to,
from,
)?;

View File

@@ -62,7 +62,7 @@ where
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
)?,
to,
from,
)?;

View File

@@ -125,7 +125,7 @@ pub async fn setup_engine_with_chain_import(
db.clone(),
chain_spec.clone(),
reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?,
);
)?;
// Initialize genesis if needed
reth_db_common::init::init_genesis(&provider_factory)?;
@@ -324,7 +324,8 @@ mod tests {
chain_spec.clone(),
reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())
.unwrap(),
);
)
.expect("failed to create provider factory");
// Initialize genesis
reth_db_common::init::init_genesis(&provider_factory).unwrap();
@@ -384,7 +385,8 @@ mod tests {
chain_spec.clone(),
reth_provider::providers::StaticFileProvider::read_only(static_files_path, false)
.unwrap(),
);
)
.expect("failed to create provider factory");
let provider = provider_factory.database_provider_ro().unwrap();
@@ -475,7 +477,8 @@ mod tests {
db.clone(),
chain_spec.clone(),
reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(),
);
)
.expect("failed to create provider factory");
// Initialize genesis
reth_db_common::init::init_genesis(&provider_factory).unwrap();

View File

@@ -244,7 +244,7 @@ pub async fn test_exex_context_with_chain_spec(
db,
chain_spec.clone(),
StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"),
);
)?;
let genesis_hash = init_genesis(&provider_factory)?;
let provider = BlockchainProvider::new(provider_factory.clone())?;

View File

@@ -469,7 +469,7 @@ where
self.right().clone(),
self.chain_spec(),
StaticFileProvider::read_write(self.data_dir().static_files())?,
)
)?
.with_prune_modes(self.prune_modes())
.with_static_files_metrics();

View File

@@ -44,7 +44,8 @@ impl Default for TestStageDB {
create_test_rw_db(),
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
)
.expect("failed to create test provider factory"),
}
}
}
@@ -59,7 +60,8 @@ impl TestStageDB {
create_test_rw_db_with_path(path),
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
)
.expect("failed to create test provider factory"),
}
}

View File

@@ -0,0 +1,39 @@
//! Storage metadata models.
use reth_codecs::{add_arbitrary_tests, Compact};
use serde::{Deserialize, Serialize};
/// Storage configuration settings for this node.
///
/// These should be set during `init_genesis` or `init_db` depending on whether we want dictate
/// behaviour of new or old nodes respectively.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, Compact)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact)]
pub struct StorageSettings {
/// Whether this node always writes receipts to static files.
///
/// If this is set to FALSE AND receipt pruning IS ENABLED, all receipts should be written to DB. Otherwise, they should be written to static files. This ensures that older nodes do not need to migrate their current DB tables to static files. For more, read: <https://github.com/paradigmxyz/reth/issues/18890#issuecomment-3457760097>
pub receipts_in_static_files: bool,
}
impl StorageSettings {
/// Creates a new `StorageSettings` with default values.
pub const fn new() -> Self {
Self { receipts_in_static_files: false }
}
/// Creates `StorageSettings` for legacy nodes.
///
/// This explicitly sets `receipts_in_static_files` to `false`, ensuring older nodes
/// continue writing receipts to the database when receipt pruning is enabled.
pub const fn legacy() -> Self {
Self { receipts_in_static_files: false }
}
/// Sets the `receipts_static_files` flag to true.
pub const fn with_receipts_in_static_files(mut self) -> Self {
self.receipts_in_static_files = true;
self
}
}

View File

@@ -20,12 +20,14 @@ use serde::{Deserialize, Serialize};
pub mod accounts;
pub mod blocks;
pub mod integer_list;
pub mod metadata;
pub mod sharded_key;
pub mod storage_sharded_key;
pub use accounts::*;
pub use blocks::*;
pub use integer_list::IntegerList;
pub use metadata::*;
pub use reth_db_models::{
AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices,
StoredBlockWithdrawals,

View File

@@ -540,6 +540,13 @@ tables! {
type Key = ChainStateKey;
type Value = BlockNumber;
}
/// Stores generic node metadata as key-value pairs.
/// Can store feature flags, configuration markers, and other node-specific data.
table Metadata {
type Key = String;
type Value = Vec<u8>;
}
}
/// Keys for the `ChainState` table.

View File

@@ -15,9 +15,9 @@ use reth_primitives_traits::{
use reth_provider::{
errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader,
BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome,
HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit,
StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory,
TrieWriter,
HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown,
ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter,
StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter,
};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
@@ -90,7 +90,8 @@ where
+ StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
+ ChainSpecProvider
+ StageCheckpointReader
+ BlockHashReader,
+ BlockHashReader
+ StorageSettingsCache,
PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
+ StageCheckpointWriter
+ HistoryWriter
@@ -98,6 +99,7 @@ where
+ HashingWriter
+ StateWriter
+ TrieWriter
+ MetadataWriter
+ AsRef<PF::ProviderRW>,
PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
{
@@ -161,9 +163,14 @@ where
static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?;
static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?;
// Behaviour reserved only for new nodes should be set here.
let storage_settings = StorageSettings::new();
provider_rw.write_storage_settings(storage_settings)?;
// `commit_unwind`` will first commit the DB and then the static file provider, which is
// necessary on `init_genesis`.
provider_rw.commit()?;
factory.set_storage_settings_cache(storage_settings);
Ok(hash)
}
@@ -726,11 +733,14 @@ mod tests {
init_genesis(&factory).unwrap();
// Try to init db with a different genesis block
let genesis_hash = init_genesis(&ProviderFactory::<MockNodeTypesWithDB>::new(
factory.into_db(),
MAINNET.clone(),
static_file_provider,
));
let genesis_hash = init_genesis(
&ProviderFactory::<MockNodeTypesWithDB>::new(
factory.into_db(),
MAINNET.clone(),
static_file_provider,
)
.unwrap(),
);
assert!(matches!(
genesis_hash.unwrap_err(),

View File

@@ -49,7 +49,10 @@ pub use reth_chain_state::{
};
// reexport traits to avoid breaking changes
pub use reth_storage_api::{HistoryWriter, StatsReader};
pub use reth_storage_api::{
HistoryWriter, MetadataProvider, MetadataWriter, StatsReader, StorageSettings,
StorageSettingsCache,
};
pub(crate) fn to_range<R: std::ops::RangeBounds<u64>>(bounds: R) -> std::ops::Range<u64> {
let start = match bounds.start_bound() {

View File

@@ -3,13 +3,17 @@
//! This also includes general purpose staging types that provide builder style functions that lead
//! up to the intended build target.
use crate::{providers::StaticFileProvider, ProviderFactory};
use crate::{
providers::{NodeTypesForProvider, StaticFileProvider},
ProviderFactory,
};
use reth_db::{
mdbx::{DatabaseArguments, MaxReadTransactionDuration},
open_db_read_only, DatabaseEnv,
};
use reth_db_api::{database_metrics::DatabaseMetrics, Database};
use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter};
use reth_storage_errors::provider::ProviderResult;
use std::{
marker::PhantomData,
path::{Path, PathBuf},
@@ -48,10 +52,9 @@ impl<N> ProviderFactoryBuilder<N> {
///
/// ```no_run
/// use reth_chainspec::MAINNET;
/// use reth_node_types::NodeTypes;
/// use reth_provider::providers::ProviderFactoryBuilder;
/// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder};
///
/// fn demo<N: NodeTypes<ChainSpec = reth_chainspec::ChainSpec>>() {
/// fn demo<N: NodeTypesForProvider<ChainSpec = reth_chainspec::ChainSpec>>() {
/// let provider_factory = ProviderFactoryBuilder::<N>::default()
/// .open_read_only(MAINNET.clone(), "datadir")
/// .unwrap();
@@ -64,11 +67,9 @@ impl<N> ProviderFactoryBuilder<N> {
///
/// ```no_run
/// use reth_chainspec::MAINNET;
/// use reth_node_types::NodeTypes;
/// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig};
///
/// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig};
///
/// fn demo<N: NodeTypes<ChainSpec = reth_chainspec::ChainSpec>>() {
/// fn demo<N: NodeTypesForProvider<ChainSpec = reth_chainspec::ChainSpec>>() {
/// let provider_factory = ProviderFactoryBuilder::<N>::default()
/// .open_read_only(MAINNET.clone(), ReadOnlyConfig::from_datadir("datadir").no_watch())
/// .unwrap();
@@ -84,11 +85,9 @@ impl<N> ProviderFactoryBuilder<N> {
///
/// ```no_run
/// use reth_chainspec::MAINNET;
/// use reth_node_types::NodeTypes;
/// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig};
///
/// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig};
///
/// fn demo<N: NodeTypes<ChainSpec = reth_chainspec::ChainSpec>>() {
/// fn demo<N: NodeTypesForProvider<ChainSpec = reth_chainspec::ChainSpec>>() {
/// let provider_factory = ProviderFactoryBuilder::<N>::default()
/// .open_read_only(
/// MAINNET.clone(),
@@ -103,15 +102,15 @@ impl<N> ProviderFactoryBuilder<N> {
config: impl Into<ReadOnlyConfig>,
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>>
where
N: NodeTypes,
N: NodeTypesForProvider,
{
let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } =
config.into();
Ok(self
.db(Arc::new(open_db_read_only(db_dir, db_args)?))
self.db(Arc::new(open_db_read_only(db_dir, db_args)?))
.chainspec(chainspec)
.static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?)
.build_provider_factory())
.build_provider_factory()
.map_err(Into::into)
}
}
@@ -320,11 +319,13 @@ impl<N, Val1, Val2, Val3> TypesAnd3<N, Val1, Val2, Val3> {
impl<N, DB> TypesAnd3<N, DB, Arc<N::ChainSpec>, StaticFileProvider<N::Primitives>>
where
N: NodeTypes,
N: NodeTypesForProvider,
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
{
/// Creates the [`ProviderFactory`].
pub fn build_provider_factory(self) -> ProviderFactory<NodeTypesWithDBAdapter<N, DB>> {
pub fn build_provider_factory(
self,
) -> ProviderResult<ProviderFactory<NodeTypesWithDBAdapter<N, DB>>> {
let Self { _types, val_1, val_2, val_3 } = self;
ProviderFactory::new(val_1, val_2, val_3)
}

View File

@@ -1,29 +1,31 @@
use crate::{
providers::{state::latest::LatestStateProvider, StaticFileProvider},
providers::{state::latest::LatestStateProvider, NodeTypesForProvider, StaticFileProvider},
to_range,
traits::{BlockSource, ReceiptProvider},
BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory,
HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, ProviderError,
PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory,
TransactionVariant, TransactionsProvider,
HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, MetadataProvider,
ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox,
StaticFileProviderFactory, TransactionVariant, TransactionsProvider,
};
use alloy_consensus::transaction::TransactionMeta;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256};
use core::fmt;
use parking_lot::RwLock;
use reth_chainspec::ChainInfo;
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db_api::{database::Database, models::StoredBlockBodyIndices};
use reth_errors::{RethError, RethResult};
use reth_node_types::{
BlockTy, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy,
BlockTy, HeaderTy, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy,
};
use reth_primitives_traits::{RecoveredBlock, SealedHeader};
use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{
BlockBodyIndicesProvider, NodePrimitivesProvider, TryIntoHistoricalStateProvider,
BlockBodyIndicesProvider, NodePrimitivesProvider, StorageSettings, StorageSettingsCache,
TryIntoHistoricalStateProvider,
};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::HashedPostState;
@@ -64,31 +66,52 @@ pub struct ProviderFactory<N: NodeTypesWithDB> {
prune_modes: PruneModes,
/// The node storage handler.
storage: Arc<N::Storage>,
/// Storage configuration settings for this node
storage_settings: Arc<RwLock<StorageSettings>>,
}
impl<N: NodeTypes> ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>> {
impl<N: NodeTypesForProvider> ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>> {
/// Instantiates the builder for this type
pub fn builder() -> ProviderFactoryBuilder<N> {
ProviderFactoryBuilder::default()
}
}
impl<N: NodeTypesWithDB> ProviderFactory<N> {
impl<N: ProviderNodeTypes> ProviderFactory<N> {
/// Create new database provider factory.
pub fn new(
db: N::DB,
chain_spec: Arc<N::ChainSpec>,
static_file_provider: StaticFileProvider<N::Primitives>,
) -> Self {
Self {
) -> ProviderResult<Self> {
// Load storage settings from database at init time. Creates a temporary provider
// to read persisted settings, falling back to legacy defaults if none exist.
//
// Both factory and all providers it creates should share these cached settings.
let legacy_settings = StorageSettings::legacy();
let storage_settings = DatabaseProvider::<_, N>::new(
db.tx()?,
chain_spec.clone(),
static_file_provider.clone(),
Default::default(),
Default::default(),
Arc::new(RwLock::new(legacy_settings)),
)
.storage_settings()?
.unwrap_or(legacy_settings);
Ok(Self {
db,
chain_spec,
static_file_provider,
prune_modes: PruneModes::default(),
storage: Default::default(),
}
storage_settings: Arc::new(RwLock::new(storage_settings)),
})
}
}
impl<N: NodeTypesWithDB> ProviderFactory<N> {
/// Enables metrics on the static file provider.
pub fn with_static_files_metrics(mut self) -> Self {
self.static_file_provider = self.static_file_provider.with_metrics();
@@ -113,7 +136,17 @@ impl<N: NodeTypesWithDB> ProviderFactory<N> {
}
}
impl<N: NodeTypesWithDB<DB = Arc<DatabaseEnv>>> ProviderFactory<N> {
impl<N: NodeTypesWithDB> StorageSettingsCache for ProviderFactory<N> {
fn cached_storage_settings(&self) -> StorageSettings {
*self.storage_settings.read()
}
fn set_storage_settings_cache(&self, settings: StorageSettings) {
*self.storage_settings.write() = settings;
}
}
impl<N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>> ProviderFactory<N> {
/// Create new database provider by passing a path. [`ProviderFactory`] will own the database
/// instance.
pub fn new_with_database_path<P: AsRef<Path>>(
@@ -122,13 +155,12 @@ impl<N: NodeTypesWithDB<DB = Arc<DatabaseEnv>>> ProviderFactory<N> {
args: DatabaseArguments,
static_file_provider: StaticFileProvider<N::Primitives>,
) -> RethResult<Self> {
Ok(Self {
db: Arc::new(init_db(path, args).map_err(RethError::msg)?),
Self::new(
Arc::new(init_db(path, args).map_err(RethError::msg)?),
chain_spec,
static_file_provider,
prune_modes: PruneModes::default(),
storage: Default::default(),
})
)
.map_err(RethError::Provider)
}
}
@@ -147,6 +179,7 @@ impl<N: ProviderNodeTypes> ProviderFactory<N> {
self.static_file_provider.clone(),
self.prune_modes.clone(),
self.storage.clone(),
self.storage_settings.clone(),
))
}
@@ -162,6 +195,7 @@ impl<N: ProviderNodeTypes> ProviderFactory<N> {
self.static_file_provider.clone(),
self.prune_modes.clone(),
self.storage.clone(),
self.storage_settings.clone(),
)))
}
@@ -545,13 +579,15 @@ where
N: NodeTypesWithDB<DB: fmt::Debug, ChainSpec: fmt::Debug, Storage: fmt::Debug>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self;
let Self { db, chain_spec, static_file_provider, prune_modes, storage, storage_settings } =
self;
f.debug_struct("ProviderFactory")
.field("db", &db)
.field("chain_spec", &chain_spec)
.field("static_file_provider", &static_file_provider)
.field("prune_modes", &prune_modes)
.field("storage", &storage)
.field("storage_settings", &*storage_settings.read())
.finish()
}
}
@@ -564,6 +600,7 @@ impl<N: NodeTypesWithDB> Clone for ProviderFactory<N> {
static_file_provider: self.static_file_provider.clone(),
prune_modes: self.prune_modes.clone(),
storage: self.storage.clone(),
storage_settings: self.storage_settings.clone(),
}
}
}

View File

@@ -31,6 +31,7 @@ use alloy_primitives::{
Address, BlockHash, BlockNumber, TxHash, TxNumber, B256,
};
use itertools::Itertools;
use parking_lot::RwLock;
use rayon::slice::ParallelSliceMut;
use reth_chain_state::ExecutedBlock;
use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec};
@@ -39,7 +40,7 @@ use reth_db_api::{
database::Database,
models::{
sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress,
BlockNumberHashedAddress, ShardedKey, StoredBlockBodyIndices,
BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices,
},
table::Table,
tables,
@@ -57,8 +58,9 @@ use reth_prune_types::{
use reth_stages_types::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use reth_storage_api::{
BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider,
StorageChangeSetReader, TryIntoHistoricalStateProvider,
BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter,
NodePrimitivesProvider, StateProvider, StorageChangeSetReader, StorageSettingsCache,
TryIntoHistoricalStateProvider,
};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{
@@ -153,6 +155,8 @@ pub struct DatabaseProvider<TX, N: NodeTypes> {
prune_modes: PruneModes,
/// Node storage handler.
storage: Arc<N::Storage>,
/// Storage configuration settings for this node
storage_settings: Arc<RwLock<StorageSettings>>,
}
impl<TX, N: NodeTypes> DatabaseProvider<TX, N> {
@@ -248,8 +252,9 @@ impl<TX: DbTxMut, N: NodeTypes> DatabaseProvider<TX, N> {
static_file_provider: StaticFileProvider<N::Primitives>,
prune_modes: PruneModes,
storage: Arc<N::Storage>,
storage_settings: Arc<RwLock<StorageSettings>>,
) -> Self {
Self { tx, chain_spec, static_file_provider, prune_modes, storage }
Self { tx, chain_spec, static_file_provider, prune_modes, storage, storage_settings }
}
}
@@ -494,8 +499,9 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> DatabaseProvider<TX, N> {
static_file_provider: StaticFileProvider<N::Primitives>,
prune_modes: PruneModes,
storage: Arc<N::Storage>,
storage_settings: Arc<RwLock<StorageSettings>>,
) -> Self {
Self { tx, chain_spec, static_file_provider, prune_modes, storage }
Self { tx, chain_spec, static_file_provider, prune_modes, storage, storage_settings }
}
/// Consume `DbTx` or `DbTxMut`.
@@ -3133,6 +3139,28 @@ impl<TX: DbTx + 'static, N: NodeTypes + 'static> DBProvider for DatabaseProvider
}
}
impl<TX: DbTx, N: NodeTypes> MetadataProvider for DatabaseProvider<TX, N> {
fn get_metadata(&self, key: &str) -> ProviderResult<Option<Vec<u8>>> {
self.tx.get::<tables::Metadata>(key.to_string()).map_err(Into::into)
}
}
impl<TX: DbTxMut, N: NodeTypes> MetadataWriter for DatabaseProvider<TX, N> {
fn write_metadata(&self, key: &str, value: Vec<u8>) -> ProviderResult<()> {
self.tx.put::<tables::Metadata>(key.to_string(), value).map_err(Into::into)
}
}
impl<TX: Send + Sync, N: NodeTypes> StorageSettingsCache for DatabaseProvider<TX, N> {
fn cached_storage_settings(&self) -> StorageSettings {
*self.storage_settings.read()
}
fn set_storage_settings_cache(&self, settings: StorageSettings) {
*self.storage_settings.write() = settings;
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -1,5 +1,5 @@
use crate::{
providers::{ProviderNodeTypes, StaticFileProvider},
providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider},
HashingWriter, ProviderFactory, TrieWriter,
};
use alloy_primitives::B256;
@@ -10,7 +10,7 @@ use reth_db::{
};
use reth_errors::ProviderResult;
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter};
use reth_node_types::NodeTypesWithDBAdapter;
use reth_primitives_traits::{Account, StorageEntry};
use reth_trie::StateRoot;
use reth_trie_db::DatabaseStateRoot;
@@ -50,7 +50,7 @@ pub fn create_test_provider_factory_with_chain_spec(
}
/// Creates test provider factory with provided chain spec.
pub fn create_test_provider_factory_with_node_types<N: NodeTypes>(
pub fn create_test_provider_factory_with_node_types<N: NodeTypesForProvider>(
chain_spec: Arc<N::ChainSpec>,
) -> ProviderFactory<NodeTypesWithDBAdapter<N, Arc<TempDatabase<DatabaseEnv>>>> {
let (static_dir, _) = create_test_static_files_dir();
@@ -60,6 +60,7 @@ pub fn create_test_provider_factory_with_node_types<N: NodeTypes>(
chain_spec,
StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"),
)
.expect("failed to create test provider factory")
}
/// Inserts the genesis alloc from the provided chain spec into the trie.

View File

@@ -32,6 +32,7 @@ alloy-consensus.workspace = true
alloy-rpc-types-engine.workspace = true
auto_impl.workspace = true
serde_json = { workspace = true, optional = true }
[features]
default = ["std"]
@@ -50,10 +51,12 @@ std = [
"reth-storage-errors/std",
"reth-db-models/std",
"reth-trie-common/std",
"serde_json?/std",
]
db-api = [
"dep:reth-db-api",
"dep:serde_json",
]
serde = [

View File

@@ -94,5 +94,12 @@ pub use state_writer::*;
mod header_sync_gap;
pub use header_sync_gap::HeaderSyncGapProvider;
#[cfg(feature = "db-api")]
pub mod metadata;
#[cfg(feature = "db-api")]
pub use metadata::{MetadataProvider, MetadataWriter, StorageSettingsCache};
#[cfg(feature = "db-api")]
pub use reth_db_api::models::StorageSettings;
mod full;
pub use full::*;

View File

@@ -0,0 +1,53 @@
//! Metadata provider trait for reading and writing node metadata.
use reth_db_api::models::StorageSettings;
use reth_storage_errors::provider::{ProviderError, ProviderResult};
/// Metadata keys.
pub mod keys {
/// Storage configuration settings for this node.
pub const STORAGE_SETTINGS: &str = "storage_settings";
}
/// Client trait for reading node metadata from the database.
#[auto_impl::auto_impl(&, Arc)]
pub trait MetadataProvider: Send + Sync {
/// Get a metadata value by key
fn get_metadata(&self, key: &str) -> ProviderResult<Option<Vec<u8>>>;
/// Get storage settings for this node
fn storage_settings(&self) -> ProviderResult<Option<StorageSettings>> {
self.get_metadata(keys::STORAGE_SETTINGS)?
.map(|bytes| serde_json::from_slice(&bytes).map_err(ProviderError::other))
.transpose()
}
}
/// Client trait for writing node metadata to the database.
pub trait MetadataWriter: Send + Sync {
/// Write a metadata value
fn write_metadata(&self, key: &str, value: Vec<u8>) -> ProviderResult<()>;
/// Write storage settings for this node
///
/// Be sure to update provider factory cache with
/// [`StorageSettingsCache::set_storage_settings_cache`].
fn write_storage_settings(&self, settings: StorageSettings) -> ProviderResult<()> {
self.write_metadata(
keys::STORAGE_SETTINGS,
serde_json::to_vec(&settings).map_err(ProviderError::other)?,
)
}
}
/// Trait for caching storage settings on a provider factory.
pub trait StorageSettingsCache: Send + Sync {
/// Gets the cached storage settings.
fn cached_storage_settings(&self) -> StorageSettings;
/// Sets the storage settings of this `ProviderFactory`.
///
/// IMPORTANT: It does not save settings in storage, that should be done by
/// [`MetadataWriter::write_storage_settings`]
fn set_storage_settings_cache(&self, settings: StorageSettings);
}

View File

@@ -53,7 +53,7 @@ async fn main() -> eyre::Result<()> {
db.clone(),
spec.clone(),
StaticFileProvider::read_only(db_path.join("static_files"), true)?,
);
)?;
// 2. Set up the blockchain provider using only the database provider and a noop for the tree to
// satisfy trait bounds. Tree is not used in this example since we are only operating on the