feat: moved optimism commands to create and remove from bin (#9242)

Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
Luca Provini
2024-07-03 16:39:31 +02:00
committed by GitHub
parent 1998f44b1b
commit 08fc298e55
10 changed files with 192 additions and 6 deletions

View File

@@ -8,3 +8,49 @@ homepage.workspace = true
repository.workspace = true
[lints]
workspace = true
[dependencies]
reth-static-file-types = { workspace = true, features = ["clap"] }
clap = { workspace = true, features = ["derive", "env"] }
reth-cli-commands.workspace = true
reth-consensus.workspace = true
reth-db = { workspace = true, features = ["mdbx"] }
reth-db-api.workspace = true
reth-downloaders.workspace = true
reth-optimism-primitives.workspace = true
reth-provider.workspace = true
reth-prune.workspace = true
reth-stages.workspace = true
reth-static-file.workspace = true
reth-execution-types.workspace = true
reth-node-core.workspace = true
reth-primitives.workspace = true
reth-stages-types.workspace = true
reth-node-events.workspace = true
reth-network-p2p.workspace = true
reth-errors.workspace = true
reth-config.workspace = true
alloy-primitives.workspace = true
futures-util.workspace = true
reth-evm-optimism.workspace = true
tokio = { workspace = true, features = [
"sync",
"macros",
"time",
"rt-multi-thread",
] }
tracing.workspace = true
eyre.workspace = true
[features]
optimism = [
"reth-primitives/optimism",
"reth-evm-optimism/optimism",
]

View File

@@ -0,0 +1,96 @@
use alloy_primitives::B256;
use futures_util::{Stream, StreamExt};
use reth_config::Config;
use reth_consensus::Consensus;
use reth_db_api::database::Database;
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
};
use reth_errors::ProviderError;
use reth_evm_optimism::OpExecutorProvider;
use reth_network_p2p::{
bodies::downloader::BodyDownloader,
headers::downloader::{HeaderDownloader, SyncTarget},
};
use reth_node_events::node::NodeEvent;
use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory};
use reth_prune::PruneModes;
use reth_stages::{sets::DefaultStages, Pipeline, StageSet};
use reth_stages_types::StageId;
use reth_static_file::StaticFileProducer;
use std::sync::Arc;
use tokio::sync::watch;
/// Builds import pipeline.
///
/// If configured to execute, all stages will run. Otherwise, only stages that don't require state
/// will run.
pub async fn build_import_pipeline<DB, C>(
config: &Config,
provider_factory: ProviderFactory<DB>,
consensus: &Arc<C>,
file_client: Arc<FileClient>,
static_file_producer: StaticFileProducer<DB>,
disable_exec: bool,
) -> eyre::Result<(Pipeline<DB>, impl Stream<Item = NodeEvent>)>
where
DB: Database + Clone + Unpin + 'static,
C: Consensus + 'static,
{
if !file_client.has_canonical_blocks() {
eyre::bail!("unable to import non canonical blocks");
}
// Retrieve latest header found in the database.
let last_block_number = provider_factory.last_block_number()?;
let local_head = provider_factory
.sealed_header(last_block_number)?
.ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?;
let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers)
.build(file_client.clone(), consensus.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
header_downloader.update_local_head(local_head);
header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap()));
let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
.build(file_client.clone(), consensus.clone(), provider_factory.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
body_downloader
.set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap())
.expect("failed to set download range");
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let executor = OpExecutorProvider::optimism(provider_factory.chain_spec());
let max_block = file_client.max_block().unwrap_or(0);
let pipeline = Pipeline::builder()
.with_tip_sender(tip_tx)
// we want to sync all blocks the file client provides or 0 if empty
.with_max_block(max_block)
.add_stages(
DefaultStages::new(
provider_factory.clone(),
tip_rx,
consensus.clone(),
header_downloader,
body_downloader,
executor,
config.stages.clone(),
PruneModes::default(),
)
.builder()
.disable_all_if(&StageId::STATE_REQUIRED, || disable_exec),
)
.build(provider_factory, static_file_producer);
let events = pipeline.events().map(Into::into);
Ok((pipeline, events))
}

View File

@@ -0,0 +1,150 @@
//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a
//! file.
use clap::Parser;
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
use reth_consensus::noop::NoopConsensus;
use reth_db::tables;
use reth_db_api::transaction::DbTx;
use reth_downloaders::file_client::{
ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE,
};
use reth_node_core::version::SHORT_VERSION;
use reth_optimism_primitives::bedrock_import::is_dup_tx;
use reth_provider::StageCheckpointReader;
use reth_prune::PruneModes;
use reth_stages::StageId;
use reth_static_file::StaticFileProducer;
use std::{path::PathBuf, sync::Arc};
use tracing::{debug, error, info};
use crate::commands::build_pipeline::build_import_pipeline;
/// Syncs RLP encoded blocks from a file.
#[derive(Debug, Parser)]
pub struct ImportOpCommand {
#[command(flatten)]
env: EnvironmentArgs,
/// Chunk byte length to read from file.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
/// The path to a block file for import.
///
/// The online stages (headers and bodies) are replaced by a file import, after which the
/// remaining stages are executed.
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
path: PathBuf,
}
impl ImportOpCommand {
/// Execute `import` command
pub async fn execute(self) -> eyre::Result<()> {
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
info!(target: "reth::cli",
"Disabled stages requiring state, since cannot execute OVM state changes"
);
debug!(target: "reth::cli",
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking chain import"
);
let Environment { provider_factory, config, .. } = self.env.init(AccessRights::RW)?;
// we use noop here because we expect the inputs to be valid
let consensus = Arc::new(NoopConsensus::default());
// open file
let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?;
let mut total_decoded_blocks = 0;
let mut total_decoded_txns = 0;
let mut total_filtered_out_dup_txns = 0;
while let Some(mut file_client) = reader.next_chunk::<FileClient>().await? {
// create a new FileClient from chunk read from file
info!(target: "reth::cli",
"Importing chain file chunk"
);
let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?;
info!(target: "reth::cli", "Chain file chunk read");
total_decoded_blocks += file_client.headers_len();
total_decoded_txns += file_client.total_transactions();
for (block_number, body) in file_client.bodies_iter_mut() {
body.transactions.retain(|_| {
if is_dup_tx(block_number) {
total_filtered_out_dup_txns += 1;
return false
}
true
})
}
let (mut pipeline, events) = build_import_pipeline(
&config,
provider_factory.clone(),
&consensus,
Arc::new(file_client),
StaticFileProducer::new(provider_factory.clone(), PruneModes::default()),
true,
)
.await?;
// override the tip
pipeline.set_tip(tip);
debug!(target: "reth::cli", ?tip, "Tip manually set");
let provider = provider_factory.provider()?;
let latest_block_number =
provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
tokio::spawn(reth_node_events::node::handle_events(
None,
latest_block_number,
events,
provider_factory.db_ref().clone(),
));
// Run pipeline
info!(target: "reth::cli", "Starting sync pipeline");
tokio::select! {
res = pipeline.run() => res?,
_ = tokio::signal::ctrl_c() => {},
}
}
let provider = provider_factory.provider()?;
let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
let total_imported_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
if total_decoded_blocks != total_imported_blocks ||
total_decoded_txns != total_imported_txns + total_filtered_out_dup_txns
{
error!(target: "reth::cli",
total_decoded_blocks,
total_imported_blocks,
total_decoded_txns,
total_filtered_out_dup_txns,
total_imported_txns,
"Chain was partially imported"
);
}
info!(target: "reth::cli",
total_imported_blocks,
total_imported_txns,
total_decoded_blocks,
total_decoded_txns,
total_filtered_out_dup_txns,
"Chain file imported"
);
Ok(())
}
}

View File

@@ -0,0 +1,210 @@
//! Command that imports OP mainnet receipts from Bedrock datadir, exported via
//! <https://github.com/testinprod-io/op-geth/pull/1>.
use clap::Parser;
use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs};
use reth_db::tables;
use reth_db_api::{database::Database, transaction::DbTx};
use reth_downloaders::{
file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
file_codec_ovm_receipt::HackReceiptFileCodec,
receipt_file_client::ReceiptFileClient,
};
use reth_execution_types::ExecutionOutcome;
use reth_node_core::version::SHORT_VERSION;
use reth_optimism_primitives::bedrock_import::is_dup_tx;
use reth_primitives::Receipts;
use reth_provider::{
OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter,
StaticFileProviderFactory, StaticFileWriter, StatsReader,
};
use reth_stages::StageId;
use reth_static_file_types::StaticFileSegment;
use std::path::{Path, PathBuf};
use tracing::{debug, error, info, trace};
/// Initializes the database with the genesis block.
#[derive(Debug, Parser)]
pub struct ImportReceiptsOpCommand {
#[command(flatten)]
env: EnvironmentArgs,
/// Chunk byte length to read from file.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
/// The path to a receipts file for import. File must use `HackReceiptFileCodec` (used for
/// exporting OP chain segment below Bedrock block via testinprod/op-geth).
///
/// <https://github.com/testinprod-io/op-geth/pull/1>
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
path: PathBuf,
}
impl ImportReceiptsOpCommand {
/// Execute `import` command
pub async fn execute(self) -> eyre::Result<()> {
info!(target: "reth::cli", "reth {} starting", SHORT_VERSION);
debug!(target: "reth::cli",
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking receipts import"
);
let Environment { provider_factory, .. } = self.env.init(AccessRights::RW)?;
import_receipts_from_file(
provider_factory,
self.path,
self.chunk_len,
|first_block, receipts: &mut Receipts| {
let mut total_filtered_out_dup_txns = 0;
for (index, receipts_for_block) in receipts.iter_mut().enumerate() {
if is_dup_tx(first_block + index as u64) {
receipts_for_block.clear();
total_filtered_out_dup_txns += 1;
}
}
total_filtered_out_dup_txns
},
)
.await
}
}
/// Imports receipts to static files. Takes a filter callback as parameter, that returns the total
/// number of filtered out receipts.
///
/// Caution! Filter callback must replace completely filtered out receipts for a block, with empty
/// vectors, rather than `vec!(None)`. This is since the code for writing to static files, expects
/// indices in the [`Receipts`] list, to map to sequential block numbers.
pub async fn import_receipts_from_file<DB, P, F>(
provider_factory: ProviderFactory<DB>,
path: P,
chunk_len: Option<u64>,
mut filter: F,
) -> eyre::Result<()>
where
DB: Database,
P: AsRef<Path>,
F: FnMut(u64, &mut Receipts) -> usize,
{
let provider = provider_factory.provider_rw()?;
let static_file_provider = provider_factory.static_file_provider();
let total_imported_txns = static_file_provider
.count_entries::<tables::Transactions>()
.expect("transaction static files must exist before importing receipts");
let highest_block_transactions = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Transactions)
.expect("transaction static files must exist before importing receipts");
for stage in StageId::ALL {
let checkpoint = provider.get_stage_checkpoint(stage)?;
trace!(target: "reth::cli",
?stage,
?checkpoint,
"Read stage checkpoints from db"
);
}
// prepare the tx for `write_to_storage`
let tx = provider.into_tx();
let mut total_decoded_receipts = 0;
let mut total_filtered_out_dup_txns = 0;
// open file
let mut reader = ChunkedFileReader::new(path, chunk_len).await?;
while let Some(file_client) =
reader.next_chunk::<ReceiptFileClient<HackReceiptFileCodec>>().await?
{
// create a new file client from chunk read from file
let ReceiptFileClient {
mut receipts,
first_block,
total_receipts: total_receipts_chunk,
..
} = file_client;
// mark these as decoded
total_decoded_receipts += total_receipts_chunk;
total_filtered_out_dup_txns += filter(first_block, &mut receipts);
info!(target: "reth::cli",
first_receipts_block=?first_block,
total_receipts_chunk,
"Importing receipt file chunk"
);
// We're reusing receipt writing code internal to
// `ExecutionOutcome::write_to_storage`, so we just use a default empty
// `BundleState`.
let execution_outcome =
ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default());
let static_file_producer =
static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?;
// finally, write the receipts
execution_outcome.write_to_storage::<DB::TXMut>(
&tx,
Some(static_file_producer),
OriginalValuesKnown::Yes,
)?;
}
tx.commit()?;
// as static files works in file ranges, internally it will be committing when creating the
// next file range already, so we only need to call explicitly at the end.
static_file_provider.commit()?;
if total_decoded_receipts == 0 {
error!(target: "reth::cli", "No receipts were imported, ensure the receipt file is valid and not empty");
return Ok(())
}
let total_imported_receipts = static_file_provider
.count_entries::<tables::Receipts>()
.expect("static files must exist after ensuring we decoded more than zero");
if total_imported_receipts + total_filtered_out_dup_txns != total_decoded_receipts {
error!(target: "reth::cli",
total_decoded_receipts,
total_imported_receipts,
total_filtered_out_dup_txns,
"Receipts were partially imported"
);
}
if total_imported_receipts != total_imported_txns {
error!(target: "reth::cli",
total_imported_receipts,
total_imported_txns,
"Receipts inconsistent with transactions"
);
}
let highest_block_receipts = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Receipts)
.expect("static files must exist after ensuring we decoded more than zero");
if highest_block_receipts != highest_block_transactions {
error!(target: "reth::cli",
highest_block_receipts,
highest_block_transactions,
"Height of receipts inconsistent with transactions"
);
}
info!(target: "reth::cli",
total_imported_receipts,
total_decoded_receipts,
total_filtered_out_dup_txns,
"Receipt file imported"
);
Ok(())
}

View File

@@ -0,0 +1,4 @@
/// Helper function to build an import pipeline.
pub mod build_pipeline;
pub mod import;
pub mod import_receipts;

View File

@@ -5,5 +5,11 @@
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
// The `optimism` feature must be enabled to use this crate.
#![cfg(feature = "optimism")]
/// Optimism CLI commands.
pub mod commands;
pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand};