feat(op): nonce replay (#7781)

This commit is contained in:
Emilia Hane
2024-05-01 15:44:50 +02:00
committed by GitHub
parent f832b66f99
commit 99db2b352f
8 changed files with 455 additions and 126 deletions

View File

@@ -6,7 +6,8 @@ use crate::{
LogArgs,
},
commands::{
config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, node, node::NoArgs,
config_cmd, db, debug_cmd, dump_genesis, import, import_op, init_cmd, init_state,
node::{self, NoArgs},
p2p, recover, stage, test_vectors,
},
version::{LONG_VERSION, SHORT_VERSION},
@@ -147,6 +148,7 @@ impl<Ext: clap::Args + fmt::Debug> Cli<Ext> {
Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::Import(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)),
@@ -183,6 +185,9 @@ pub enum Commands<Ext: clap::Args + fmt::Debug = NoArgs> {
/// This syncs RLP encoded blocks from a file.
#[command(name = "import")]
Import(import::ImportCommand),
/// This syncs RLP encoded OP blocks below Bedrock from a file, without executing.
#[command(name = "import-op")]
ImportOp(import_op::ImportOpCommand),
/// Dumps genesis block JSON configuration to stdout.
DumpGenesis(dump_genesis::DumpGenesisCommand),
/// Database debugging utilities

View File

@@ -14,7 +14,7 @@ use futures::{Stream, StreamExt};
use reth_beacon_consensus::BeaconConsensus;
use reth_config::{config::EtlConfig, Config};
use reth_consensus::Consensus;
use reth_db::{database::Database, init_db};
use reth_db::{database::Database, init_db, tables, transaction::DbTx};
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
@@ -30,8 +30,8 @@ use reth_node_ethereum::EthEvmConfig;
use reth_node_events::node::NodeEvent;
use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256};
use reth_provider::{
BlockNumReader, HeaderProvider, HeaderSyncMode, ProviderError, ProviderFactory,
StageCheckpointReader, StaticFileProviderFactory,
BlockNumReader, ChainSpecProvider, HeaderProvider, HeaderSyncMode, ProviderError,
ProviderFactory, StageCheckpointReader, StaticFileProviderFactory,
};
use reth_stages::{
prelude::*,
@@ -41,7 +41,7 @@ use reth_stages::{
use reth_static_file::StaticFileProducer;
use std::{path::PathBuf, sync::Arc};
use tokio::sync::watch;
use tracing::{debug, info};
use tracing::{debug, error, info};
/// Stages that require state.
const STATE_STAGES: &[StageId] = &[
@@ -87,11 +87,6 @@ pub struct ImportCommand {
#[arg(long, verbatim_doc_comment)]
no_state: bool,
/// Import OP Mainnet chain below Bedrock. Caution! Flag must be set as env var, since the env
/// var is read by another process too, in order to make below Bedrock import work.
#[arg(long, verbatim_doc_comment, env = "OP_RETH_MAINNET_BELOW_BEDROCK")]
op_mainnet_below_bedrock: bool,
/// Chunk byte length.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
@@ -109,27 +104,23 @@ pub struct ImportCommand {
impl ImportCommand {
/// Execute `import` command
pub async fn execute(mut self) -> eyre::Result<()> {
pub async fn execute(self) -> eyre::Result<()> {
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
if self.op_mainnet_below_bedrock {
self.no_state = true;
debug!(target: "reth::cli", "Importing OP mainnet below bedrock");
}
if self.no_state {
debug!(target: "reth::cli", "Stages requiring state disabled");
info!(target: "reth::cli", "Disabled stages requiring state");
}
debug!(target: "reth::cli",
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), "Chunking chain import"
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking chain import"
);
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
let mut config: Config = self.load_config(config_path.clone())?;
let mut config: Config = load_config(config_path.clone())?;
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
@@ -155,6 +146,9 @@ impl ImportCommand {
// open file
let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?;
let mut total_decoded_blocks = 0;
let mut total_decoded_txns = 0;
while let Some(file_client) = reader.next_chunk().await? {
// create a new FileClient from chunk read from file
info!(target: "reth::cli",
@@ -164,20 +158,22 @@ impl ImportCommand {
let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?;
info!(target: "reth::cli", "Chain file chunk read");
let (mut pipeline, events) = self
.build_import_pipeline(
&config,
total_decoded_blocks += file_client.headers_len();
total_decoded_txns += file_client.total_transactions();
let (mut pipeline, events) = build_import_pipeline(
&config,
provider_factory.clone(),
&consensus,
Arc::new(file_client),
StaticFileProducer::new(
provider_factory.clone(),
&consensus,
Arc::new(file_client),
StaticFileProducer::new(
provider_factory.clone(),
provider_factory.static_file_provider(),
PruneModes::default(),
),
self.no_state,
)
.await?;
provider_factory.static_file_provider(),
PruneModes::default(),
),
true,
)
.await?;
// override the tip
pipeline.set_tip(tip);
@@ -202,104 +198,129 @@ impl ImportCommand {
}
}
info!(target: "reth::cli", "Chain file imported");
Ok(())
}
let provider = provider_factory.provider()?;
async fn build_import_pipeline<DB, C>(
&self,
config: &Config,
provider_factory: ProviderFactory<DB>,
consensus: &Arc<C>,
file_client: Arc<FileClient>,
static_file_producer: StaticFileProducer<DB>,
no_state: bool,
) -> eyre::Result<(Pipeline<DB>, impl Stream<Item = NodeEvent>)>
where
DB: Database + Clone + Unpin + 'static,
C: Consensus + 'static,
{
if !file_client.has_canonical_blocks() {
eyre::bail!("unable to import non canonical blocks");
let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
let total_imported_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
if total_decoded_blocks != total_imported_blocks ||
total_decoded_txns != total_imported_txns
{
error!(target: "reth::cli",
total_decoded_blocks,
total_imported_blocks,
total_decoded_txns,
total_imported_txns,
"Chain was partially imported"
);
}
// Retrieve latest header found in the database.
let last_block_number = provider_factory.last_block_number()?;
let local_head = provider_factory
.sealed_header(last_block_number)?
.ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?;
info!(target: "reth::cli",
total_imported_blocks,
total_imported_txns,
"Chain file imported"
);
let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers)
.build(file_client.clone(), consensus.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
header_downloader.update_local_head(local_head);
header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap()));
Ok(())
}
}
let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
.build(file_client.clone(), consensus.clone(), provider_factory.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
body_downloader
.set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap())
.expect("failed to set download range");
/// Builds import pipeline.
///
/// If configured to execute, all stages will run. Otherwise, only stages that don't require state
/// will run.
pub async fn build_import_pipeline<DB, C>(
config: &Config,
provider_factory: ProviderFactory<DB>,
consensus: &Arc<C>,
file_client: Arc<FileClient>,
static_file_producer: StaticFileProducer<DB>,
should_exec: bool,
) -> eyre::Result<(Pipeline<DB>, impl Stream<Item = NodeEvent>)>
where
DB: Database + Clone + Unpin + 'static,
C: Consensus + 'static,
{
if !file_client.has_canonical_blocks() {
eyre::bail!("unable to import non canonical blocks");
}
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let factory =
reth_revm::EvmProcessorFactory::new(self.chain.clone(), EthEvmConfig::default());
// Retrieve latest header found in the database.
let last_block_number = provider_factory.last_block_number()?;
let local_head = provider_factory
.sealed_header(last_block_number)?
.ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?;
let max_block = file_client.max_block().unwrap_or(0);
let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers)
.build(file_client.clone(), consensus.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
header_downloader.update_local_head(local_head);
header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap()));
let mut pipeline = Pipeline::builder()
.with_tip_sender(tip_tx)
// we want to sync all blocks the file client provides or 0 if empty
.with_max_block(max_block)
.add_stages(
DefaultStages::new(
provider_factory.clone(),
HeaderSyncMode::Tip(tip_rx),
consensus.clone(),
header_downloader,
body_downloader,
factory.clone(),
config.stages.etl.clone(),
)
.set(SenderRecoveryStage {
commit_threshold: config.stages.sender_recovery.commit_threshold,
})
.set(ExecutionStage::new(
factory,
ExecutionStageThresholds {
max_blocks: config.stages.execution.max_blocks,
max_changes: config.stages.execution.max_changes,
max_cumulative_gas: config.stages.execution.max_cumulative_gas,
max_duration: config.stages.execution.max_duration,
},
config
.stages
.merkle
.clean_threshold
.max(config.stages.account_hashing.clean_threshold)
.max(config.stages.storage_hashing.clean_threshold),
config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(),
ExExManagerHandle::empty(),
))
.disable_all_if(STATE_STAGES, || no_state),
let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
.build(file_client.clone(), consensus.clone(), provider_factory.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
body_downloader
.set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap())
.expect("failed to set download range");
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let factory =
reth_revm::EvmProcessorFactory::new(provider_factory.chain_spec(), EthEvmConfig::default());
let max_block = file_client.max_block().unwrap_or(0);
let mut pipeline = Pipeline::builder()
.with_tip_sender(tip_tx)
// we want to sync all blocks the file client provides or 0 if empty
.with_max_block(max_block)
.add_stages(
DefaultStages::new(
provider_factory.clone(),
HeaderSyncMode::Tip(tip_rx),
consensus.clone(),
header_downloader,
body_downloader,
factory.clone(),
config.stages.etl.clone(),
)
.build(provider_factory, static_file_producer);
.set(SenderRecoveryStage {
commit_threshold: config.stages.sender_recovery.commit_threshold,
})
.set(ExecutionStage::new(
factory,
ExecutionStageThresholds {
max_blocks: config.stages.execution.max_blocks,
max_changes: config.stages.execution.max_changes,
max_cumulative_gas: config.stages.execution.max_cumulative_gas,
max_duration: config.stages.execution.max_duration,
},
config
.stages
.merkle
.clean_threshold
.max(config.stages.account_hashing.clean_threshold)
.max(config.stages.storage_hashing.clean_threshold),
config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(),
ExExManagerHandle::empty(),
))
.disable_all_if(STATE_STAGES, || should_exec),
)
.build(provider_factory, static_file_producer);
let events = pipeline.events().map(Into::into);
let events = pipeline.events().map(Into::into);
Ok((pipeline, events))
}
Ok((pipeline, events))
}
/// Loads the reth config
fn load_config(&self, config_path: PathBuf) -> eyre::Result<Config> {
confy::load_path::<Config>(config_path.clone())
.wrap_err_with(|| format!("Could not load config file {config_path:?}"))
}
/// Loads the reth config
pub fn load_config(config_path: PathBuf) -> eyre::Result<Config> {
confy::load_path::<Config>(config_path.clone())
.wrap_err_with(|| format!("Could not load config file {config_path:?}"))
}
#[cfg(test)]

View File

@@ -0,0 +1,274 @@
//! Command that initializes the node by importing a chain from a file.
use crate::{
args::{
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
DatabaseArgs,
},
commands::import::{build_import_pipeline, load_config},
dirs::{DataDirPath, MaybePlatformPath},
version::SHORT_VERSION,
};
use clap::Parser;
use reth_beacon_consensus::BeaconConsensus;
use reth_config::{config::EtlConfig, Config};
use reth_db::{init_db, tables, transaction::DbTx};
use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE};
use reth_node_core::init::init_genesis;
use reth_primitives::{hex, stage::StageId, ChainSpec, PruneModes, TxHash};
use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory};
use reth_static_file::StaticFileProducer;
use std::{path::PathBuf, sync::Arc};
use tracing::{debug, error, info};
/// Syncs RLP encoded blocks from a file.
#[derive(Debug, Parser)]
pub struct ImportOpCommand {
/// The path to the configuration file to use.
#[arg(long, value_name = "FILE", verbatim_doc_comment)]
config: Option<PathBuf>,
/// The path to the data dir for all reth files and subdirectories.
///
/// Defaults to the OS-specific data directory:
///
/// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
/// - Windows: `{FOLDERID_RoamingAppData}/reth/`
/// - macOS: `$HOME/Library/Application Support/reth/`
#[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)]
datadir: MaybePlatformPath<DataDirPath>,
/// The chain this node is running.
///
/// Possible values are either a built-in chain or the path to a chain specification file.
#[arg(
long,
value_name = "CHAIN_OR_PATH",
long_help = chain_help(),
default_value = SUPPORTED_CHAINS[0],
value_parser = genesis_value_parser
)]
chain: Arc<ChainSpec>,
/// Chunk byte length.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
#[command(flatten)]
db: DatabaseArgs,
/// The path to a block file for import.
///
/// The online stages (headers and bodies) are replaced by a file import, after which the
/// remaining stages are executed.
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
path: PathBuf,
}
impl ImportOpCommand {
/// Execute `import` command
pub async fn execute(self) -> eyre::Result<()> {
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
info!(target: "reth::cli",
"Disabled stages requiring state, since cannot execute OVM state changes"
);
debug!(target: "reth::cli",
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking chain import"
);
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
let mut config: Config = load_config(config_path.clone())?;
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
if config.stages.etl.dir.is_none() {
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
}
let db_path = data_dir.db_path();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
init_genesis(provider_factory.clone())?;
let consensus = Arc::new(BeaconConsensus::new(self.chain.clone()));
info!(target: "reth::cli", "Consensus engine initialized");
// open file
let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?;
let mut total_decoded_blocks = 0;
let mut total_decoded_txns = 0;
let mut total_filtered_out_dup_txns = 0;
while let Some(mut file_client) = reader.next_chunk().await? {
// create a new FileClient from chunk read from file
info!(target: "reth::cli",
"Importing chain file chunk"
);
let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?;
info!(target: "reth::cli", "Chain file chunk read");
total_decoded_blocks += file_client.headers_len();
total_decoded_txns += file_client.bodies_len();
for (block_number, body) in file_client.bodies_iter_mut() {
body.transactions.retain(|tx| {
if is_duplicate(tx.hash, *block_number) {
total_filtered_out_dup_txns += 1;
return false
}
true
})
}
let (mut pipeline, events) = build_import_pipeline(
&config,
provider_factory.clone(),
&consensus,
Arc::new(file_client),
StaticFileProducer::new(
provider_factory.clone(),
provider_factory.static_file_provider(),
PruneModes::default(),
),
false,
)
.await?;
// override the tip
pipeline.set_tip(tip);
debug!(target: "reth::cli", ?tip, "Tip manually set");
let provider = provider_factory.provider()?;
let latest_block_number =
provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
tokio::spawn(reth_node_events::node::handle_events(
None,
latest_block_number,
events,
db.clone(),
));
// Run pipeline
info!(target: "reth::cli", "Starting sync pipeline");
tokio::select! {
res = pipeline.run() => res?,
_ = tokio::signal::ctrl_c() => {},
}
}
let provider = provider_factory.provider()?;
let total_imported_blocks = provider.tx_ref().entries::<tables::Headers>()?;
let total_imported_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
if total_decoded_blocks != total_imported_blocks ||
total_decoded_txns != total_imported_txns
{
error!(target: "reth::cli",
total_decoded_blocks,
total_imported_blocks,
total_decoded_txns,
total_imported_txns,
"Chain was partially imported"
);
}
info!(target: "reth::cli",
total_imported_blocks,
total_imported_txns,
"Chain file imported"
);
Ok(())
}
}
/// A transaction that has been replayed in chain below Bedrock.
#[derive(Debug)]
pub struct ReplayedTx {
tx_hash: TxHash,
original_block: u64,
}
impl ReplayedTx {
/// Returns a new instance.
pub const fn new(tx_hash: TxHash, original_block: u64) -> Self {
Self { tx_hash, original_block }
}
}
/// Transaction 0x9ed8..9cb9, first seen in block 985.
pub const TX_BLOCK_985: ReplayedTx = ReplayedTx::new(
TxHash::new(hex!("9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9")),
985,
);
/// Transaction 0x86f8..76e5, first seen in block 123 322.
pub const TX_BLOCK_123_322: ReplayedTx = ReplayedTx::new(
TxHash::new(hex!("c033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6")),
123_322,
);
/// Transaction 0x86f8..76e5, first seen in block 1 133 328.
pub const TX_BLOCK_1_133_328: ReplayedTx = ReplayedTx::new(
TxHash::new(hex!("86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5")),
1_133_328,
);
/// Transaction 0x3cc2..cd4e, first seen in block 1 244 152.
pub const TX_BLOCK_1_244_152: ReplayedTx = ReplayedTx::new(
TxHash::new(hex!("3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e")),
1_244_152,
);
/// List of original occurrences of all duplicate transactions below Bedrock.
pub const TX_DUP_ORIGINALS: [ReplayedTx; 4] =
[TX_BLOCK_985, TX_BLOCK_123_322, TX_BLOCK_1_133_328, TX_BLOCK_1_244_152];
/// Returns `true` if transaction is the second or third appearance of the transaction.
pub fn is_duplicate(tx_hash: TxHash, block_number: u64) -> bool {
for ReplayedTx { tx_hash: dup_tx_hash, original_block } in TX_DUP_ORIGINALS {
if tx_hash == dup_tx_hash && block_number != original_block {
return true
}
}
false
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_common_import_command_chain_args() {
for chain in SUPPORTED_CHAINS {
let args: ImportOpCommand =
ImportOpCommand::parse_from(["reth", "--chain", chain, "."]);
assert_eq!(
Ok(args.chain.chain),
chain.parse::<reth_primitives::Chain>(),
"failed to parse chain {chain}"
);
}
}
}

View File

@@ -5,6 +5,7 @@ pub mod db;
pub mod debug_cmd;
pub mod dump_genesis;
pub mod import;
pub mod import_op;
pub mod init_cmd;
pub mod init_state;

View File

@@ -222,6 +222,23 @@ impl FileClient {
pub fn bodies_len(&self) -> usize {
self.bodies.len()
}
/// Returns an iterator over headers in the client.
pub fn headers_iter(&mut self) -> impl Iterator<Item = &Header> {
self.headers.values()
}
/// Returns a mutable iterator over bodies in the client.
pub fn bodies_iter_mut(&mut self) -> impl Iterator<Item = (&u64, &mut BlockBody)> {
let bodies = &mut self.bodies;
let headers = &self.headers;
headers.keys().zip(bodies.values_mut())
}
/// Returns the current number of transactions in the client.
pub fn total_transactions(&self) -> usize {
self.bodies.iter().flat_map(|(_, body)| &body.transactions).count()
}
}
impl HeadersClient for FileClient {

View File

@@ -79,6 +79,11 @@ impl StageId {
matches!(self, StageId::Headers | StageId::Bodies)
}
/// Returns `true` if it's [TransactionLookup](StageId::TransactionLookup) stage.
pub fn is_tx_lookup(&self) -> bool {
matches!(self, StageId::TransactionLookup)
}
/// Returns true indicating if it's the finish stage [StageId::Finish]
pub fn is_finish(&self) -> bool {
matches!(self, StageId::Finish)

View File

@@ -153,18 +153,19 @@ impl<DB: Database> Stage<DB> for TransactionLookupStage {
);
}
let key = RawKey::<TxHash>::from_vec(hash);
if append_only {
txhash_cursor.append(
RawKey::<TxHash>::from_vec(hash),
RawValue::<TxNumber>::from_vec(number),
)?;
txhash_cursor.append(key, RawValue::<TxNumber>::from_vec(number))?
} else {
txhash_cursor.insert(
RawKey::<TxHash>::from_vec(hash),
RawValue::<TxNumber>::from_vec(number),
)?;
txhash_cursor.insert(key, RawValue::<TxNumber>::from_vec(number))?
}
}
trace!(target: "sync::stages::transaction_lookup",
total_hashes,
"Transaction hashes inserted"
);
break
}
}

View File

@@ -354,6 +354,11 @@ impl<TX: DbTx> DatabaseProvider<TX> {
|_| true,
)
}
/// Returns a reference to the [`ChainSpec`].
pub fn chain_spec(&self) -> &ChainSpec {
&self.chain_spec
}
}
impl<TX: DbTxMut + DbTx> DatabaseProvider<TX> {