feat(download): modular snapshot downloads with interactive TUI and config generation (#22246)

Co-authored-by: Amp <amp@ampcode.com>
Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Derek Cofausper <256792747+decofe@users.noreply.github.com>
This commit is contained in:
Georgios Konstantopoulos
2026-03-04 13:32:45 -08:00
committed by GitHub
parent 016c445dfa
commit 26f4aab2a9
16 changed files with 4540 additions and 648 deletions

22
Cargo.lock generated
View File

@@ -1711,6 +1711,20 @@ dependencies = [
"wyz",
]
[[package]]
name = "blake3"
version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d"
dependencies = [
"arrayref",
"arrayvec",
"cc",
"cfg-if",
"constant_time_eq",
"cpufeatures",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
@@ -2540,6 +2554,12 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "constant_time_eq"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b"
[[package]]
name = "convert_case"
version = "0.10.0"
@@ -7732,6 +7752,7 @@ dependencies = [
"alloy-rlp",
"arbitrary",
"backon",
"blake3",
"clap",
"comfy-table",
"crossterm",
@@ -7747,6 +7768,7 @@ dependencies = [
"proptest",
"proptest-arbitrary-interop",
"ratatui",
"rayon",
"reqwest",
"reth-chainspec",
"reth-cli",

View File

@@ -504,6 +504,7 @@ bincode = "1.3"
bitflags = "2.4"
boyer-moore-magiclen = "0.2.16"
bytes = { version = "1.11.1", default-features = false }
blake3 = "1.8"
brotli = "8"
cfg-if = "1.0"
clap = "4"

View File

@@ -46,7 +46,7 @@ reth-prune.workspace = true
reth-prune-types.workspace = true
reth-revm.workspace = true
reth-stages.workspace = true
reth-stages-types = { workspace = true, optional = true }
reth-stages-types.workspace = true
reth-static-file-types = { workspace = true, features = ["clap"] }
reth-static-file.workspace = true
reth-tasks.workspace = true
@@ -87,6 +87,8 @@ tokio-stream.workspace = true
reqwest.workspace = true
url.workspace = true
metrics.workspace = true
blake3.workspace = true
rayon.workspace = true
# io
fdlimit.workspace = true
@@ -127,7 +129,7 @@ arbitrary = [
"reth-trie-common?/test-utils",
"reth-codecs/arbitrary",
"reth-prune-types/arbitrary",
"reth-stages-types?/arbitrary",
"reth-stages-types/arbitrary",
"reth-trie-common?/arbitrary",
"alloy-consensus/arbitrary",
"reth-primitives-traits/arbitrary",

View File

@@ -1,630 +0,0 @@
use crate::common::EnvironmentArgs;
use clap::Parser;
use eyre::Result;
use lz4::Decoder;
use reqwest::{blocking::Client as BlockingClient, header::RANGE, Client, StatusCode};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_fs_util as fs;
use std::{
borrow::Cow,
fs::OpenOptions,
io::{self, BufWriter, Read, Write},
path::{Path, PathBuf},
sync::{Arc, OnceLock},
time::{Duration, Instant},
};
use tar::Archive;
use tokio::task;
use tracing::info;
use url::Url;
use zstd::stream::read::Decoder as ZstdDecoder;
const BYTE_UNITS: [&str; 4] = ["B", "KB", "MB", "GB"];
const MERKLE_BASE_URL: &str = "https://downloads.merkle.io";
const EXTENSION_TAR_LZ4: &str = ".tar.lz4";
const EXTENSION_TAR_ZSTD: &str = ".tar.zst";
/// Global static download defaults
static DOWNLOAD_DEFAULTS: OnceLock<DownloadDefaults> = OnceLock::new();
/// Download configuration defaults
///
/// Global defaults can be set via [`DownloadDefaults::try_init`].
#[derive(Debug, Clone)]
pub struct DownloadDefaults {
/// List of available snapshot sources
pub available_snapshots: Vec<Cow<'static, str>>,
/// Default base URL for snapshots
pub default_base_url: Cow<'static, str>,
/// Default base URL for chain-aware snapshots.
///
/// When set, the chain ID is appended to form the full URL: `{base_url}/{chain_id}`.
/// For example, given a base URL of `https://snapshots.example.com` and chain ID `1`,
/// the resulting URL would be `https://snapshots.example.com/1`.
///
/// Falls back to [`default_base_url`](Self::default_base_url) when `None`.
pub default_chain_aware_base_url: Option<Cow<'static, str>>,
/// Optional custom long help text that overrides the generated help
pub long_help: Option<String>,
}
impl DownloadDefaults {
/// Initialize the global download defaults with this configuration
pub fn try_init(self) -> Result<(), Self> {
DOWNLOAD_DEFAULTS.set(self)
}
/// Get a reference to the global download defaults
pub fn get_global() -> &'static DownloadDefaults {
DOWNLOAD_DEFAULTS.get_or_init(DownloadDefaults::default_download_defaults)
}
/// Default download configuration with defaults from merkle.io and publicnode
pub fn default_download_defaults() -> Self {
Self {
available_snapshots: vec![
Cow::Borrowed("https://www.merkle.io/snapshots (default, mainnet archive)"),
Cow::Borrowed("https://publicnode.com/snapshots (full nodes & testnets)"),
],
default_base_url: Cow::Borrowed(MERKLE_BASE_URL),
default_chain_aware_base_url: None,
long_help: None,
}
}
/// Generates the long help text for the download URL argument using these defaults.
///
/// If a custom long_help is set, it will be returned. Otherwise, help text is generated
/// from the available_snapshots list.
pub fn long_help(&self) -> String {
if let Some(ref custom_help) = self.long_help {
return custom_help.clone();
}
let mut help = String::from(
"Specify a snapshot URL or let the command propose a default one.\n\nAvailable snapshot sources:\n",
);
for source in &self.available_snapshots {
help.push_str("- ");
help.push_str(source);
help.push('\n');
}
help.push_str(
"\nIf no URL is provided, the latest archive snapshot for the selected chain\nwill be proposed for download from ",
);
help.push_str(
self.default_chain_aware_base_url.as_deref().unwrap_or(&self.default_base_url),
);
help.push_str(
".\n\nLocal file:// URLs are also supported for extracting snapshots from disk.",
);
help
}
/// Add a snapshot source to the list
pub fn with_snapshot(mut self, source: impl Into<Cow<'static, str>>) -> Self {
self.available_snapshots.push(source.into());
self
}
/// Replace all snapshot sources
pub fn with_snapshots(mut self, sources: Vec<Cow<'static, str>>) -> Self {
self.available_snapshots = sources;
self
}
/// Set the default base URL, e.g. `https://downloads.merkle.io`.
pub fn with_base_url(mut self, url: impl Into<Cow<'static, str>>) -> Self {
self.default_base_url = url.into();
self
}
/// Set the default chain-aware base URL.
pub fn with_chain_aware_base_url(mut self, url: impl Into<Cow<'static, str>>) -> Self {
self.default_chain_aware_base_url = Some(url.into());
self
}
/// Builder: Set custom long help text, overriding the generated help
pub fn with_long_help(mut self, help: impl Into<String>) -> Self {
self.long_help = Some(help.into());
self
}
}
impl Default for DownloadDefaults {
fn default() -> Self {
Self::default_download_defaults()
}
}
#[derive(Debug, Parser)]
pub struct DownloadCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
/// Custom URL to download the snapshot from
#[arg(long, short, long_help = DownloadDefaults::get_global().long_help())]
url: Option<String>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> DownloadCommand<C> {
pub async fn execute<N>(self) -> Result<()> {
let data_dir = self.env.datadir.resolve_datadir(self.env.chain.chain());
fs::create_dir_all(&data_dir)?;
let url = match self.url {
Some(url) => url,
None => {
let url = get_latest_snapshot_url(self.env.chain.chain().id()).await?;
info!(target: "reth::cli", "Using default snapshot URL: {}", url);
url
}
};
info!(target: "reth::cli",
chain = %self.env.chain.chain(),
dir = ?data_dir.data_dir(),
url = %url,
"Starting snapshot download and extraction"
);
stream_and_extract(&url, data_dir.data_dir()).await?;
info!(target: "reth::cli", "Snapshot downloaded and extracted successfully");
Ok(())
}
}
impl<C: ChainSpecParser> DownloadCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
// Monitor process status and display progress every 100ms
// to avoid overwhelming stdout
struct DownloadProgress {
downloaded: u64,
total_size: u64,
last_displayed: Instant,
started_at: Instant,
}
impl DownloadProgress {
/// Creates new progress tracker with given total size
fn new(total_size: u64) -> Self {
let now = Instant::now();
Self { downloaded: 0, total_size, last_displayed: now, started_at: now }
}
/// Converts bytes to human readable format (B, KB, MB, GB)
fn format_size(size: u64) -> String {
let mut size = size as f64;
let mut unit_index = 0;
while size >= 1024.0 && unit_index < BYTE_UNITS.len() - 1 {
size /= 1024.0;
unit_index += 1;
}
format!("{:.2} {}", size, BYTE_UNITS[unit_index])
}
/// Format duration as human readable string
fn format_duration(duration: Duration) -> String {
let secs = duration.as_secs();
if secs < 60 {
format!("{secs}s")
} else if secs < 3600 {
format!("{}m {}s", secs / 60, secs % 60)
} else {
format!("{}h {}m", secs / 3600, (secs % 3600) / 60)
}
}
/// Updates progress bar
fn update(&mut self, chunk_size: u64) -> Result<()> {
self.downloaded += chunk_size;
// Only update display at most 10 times per second for efficiency
if self.last_displayed.elapsed() >= Duration::from_millis(100) {
let formatted_downloaded = Self::format_size(self.downloaded);
let formatted_total = Self::format_size(self.total_size);
let progress = (self.downloaded as f64 / self.total_size as f64) * 100.0;
// Calculate ETA based on current speed
let elapsed = self.started_at.elapsed();
let eta = if self.downloaded > 0 {
let remaining = self.total_size.saturating_sub(self.downloaded);
let speed = self.downloaded as f64 / elapsed.as_secs_f64();
if speed > 0.0 {
Duration::from_secs_f64(remaining as f64 / speed)
} else {
Duration::ZERO
}
} else {
Duration::ZERO
};
let eta_str = Self::format_duration(eta);
// Pad with spaces to clear any previous longer line
print!(
"\rDownloading and extracting... {progress:.2}% ({formatted_downloaded} / {formatted_total}) ETA: {eta_str} ",
);
io::stdout().flush()?;
self.last_displayed = Instant::now();
}
Ok(())
}
}
/// Adapter to track progress while reading
struct ProgressReader<R> {
reader: R,
progress: DownloadProgress,
}
impl<R: Read> ProgressReader<R> {
fn new(reader: R, total_size: u64) -> Self {
Self { reader, progress: DownloadProgress::new(total_size) }
}
}
impl<R: Read> Read for ProgressReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let bytes = self.reader.read(buf)?;
if bytes > 0 &&
let Err(e) = self.progress.update(bytes as u64)
{
return Err(io::Error::other(e));
}
Ok(bytes)
}
}
/// Supported compression formats for snapshots
#[derive(Debug, Clone, Copy)]
enum CompressionFormat {
Lz4,
Zstd,
}
impl CompressionFormat {
/// Detect compression format from file extension
fn from_url(url: &str) -> Result<Self> {
let path =
Url::parse(url).map(|u| u.path().to_string()).unwrap_or_else(|_| url.to_string());
if path.ends_with(EXTENSION_TAR_LZ4) {
Ok(Self::Lz4)
} else if path.ends_with(EXTENSION_TAR_ZSTD) {
Ok(Self::Zstd)
} else {
Err(eyre::eyre!(
"Unsupported file format. Expected .tar.lz4 or .tar.zst, got: {}",
path
))
}
}
}
/// Extracts a compressed tar archive to the target directory with progress tracking.
fn extract_archive<R: Read>(
reader: R,
total_size: u64,
format: CompressionFormat,
target_dir: &Path,
) -> Result<()> {
let progress_reader = ProgressReader::new(reader, total_size);
match format {
CompressionFormat::Lz4 => {
let decoder = Decoder::new(progress_reader)?;
Archive::new(decoder).unpack(target_dir)?;
}
CompressionFormat::Zstd => {
let decoder = ZstdDecoder::new(progress_reader)?;
Archive::new(decoder).unpack(target_dir)?;
}
}
info!(target: "reth::cli", "Extraction complete.");
Ok(())
}
/// Extracts a snapshot from a local file.
fn extract_from_file(path: &Path, format: CompressionFormat, target_dir: &Path) -> Result<()> {
let file = std::fs::File::open(path)?;
let total_size = file.metadata()?.len();
extract_archive(file, total_size, format, target_dir)
}
const MAX_DOWNLOAD_RETRIES: u32 = 10;
const RETRY_BACKOFF_SECS: u64 = 5;
/// Wrapper that tracks download progress while writing data.
/// Used with [`io::copy`] to display progress during downloads.
struct ProgressWriter<W> {
inner: W,
progress: DownloadProgress,
}
impl<W: Write> Write for ProgressWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
let _ = self.progress.update(n as u64);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
/// Downloads a file with resume support using HTTP Range requests.
/// Automatically retries on failure, resuming from where it left off.
/// Returns the path to the downloaded file and its total size.
fn resumable_download(url: &str, target_dir: &Path) -> Result<(PathBuf, u64)> {
let file_name = Url::parse(url)
.ok()
.and_then(|u| u.path_segments()?.next_back().map(|s| s.to_string()))
.unwrap_or_else(|| "snapshot.tar".to_string());
let final_path = target_dir.join(&file_name);
let part_path = target_dir.join(format!("{file_name}.part"));
let client = BlockingClient::builder().timeout(Duration::from_secs(30)).build()?;
let mut total_size: Option<u64> = None;
let mut last_error: Option<eyre::Error> = None;
let finalize_download = |size: u64| -> Result<(PathBuf, u64)> {
fs::rename(&part_path, &final_path)?;
info!(target: "reth::cli", "Download complete: {}", final_path.display());
Ok((final_path.clone(), size))
};
for attempt in 1..=MAX_DOWNLOAD_RETRIES {
let existing_size = fs::metadata(&part_path).map(|m| m.len()).unwrap_or(0);
if let Some(total) = total_size &&
existing_size >= total
{
return finalize_download(total);
}
if attempt > 1 {
info!(target: "reth::cli",
"Retry attempt {}/{} - resuming from {} bytes",
attempt, MAX_DOWNLOAD_RETRIES, existing_size
);
}
let mut request = client.get(url);
if existing_size > 0 {
request = request.header(RANGE, format!("bytes={existing_size}-"));
if attempt == 1 {
info!(target: "reth::cli", "Resuming download from {} bytes", existing_size);
}
}
let response = match request.send().and_then(|r| r.error_for_status()) {
Ok(r) => r,
Err(e) => {
last_error = Some(e.into());
if attempt < MAX_DOWNLOAD_RETRIES {
info!(target: "reth::cli",
"Download failed, retrying in {} seconds...", RETRY_BACKOFF_SECS
);
std::thread::sleep(Duration::from_secs(RETRY_BACKOFF_SECS));
}
continue;
}
};
let is_partial = response.status() == StatusCode::PARTIAL_CONTENT;
let size = if is_partial {
response
.headers()
.get("Content-Range")
.and_then(|v| v.to_str().ok())
.and_then(|v| v.split('/').next_back())
.and_then(|v| v.parse().ok())
} else {
response.content_length()
};
if total_size.is_none() {
total_size = size;
}
let current_total = total_size.ok_or_else(|| {
eyre::eyre!("Server did not provide Content-Length or Content-Range header")
})?;
let file = if is_partial && existing_size > 0 {
OpenOptions::new()
.append(true)
.open(&part_path)
.map_err(|e| fs::FsPathError::open(e, &part_path))?
} else {
fs::create_file(&part_path)?
};
let start_offset = if is_partial { existing_size } else { 0 };
let mut progress = DownloadProgress::new(current_total);
progress.downloaded = start_offset;
let mut writer = ProgressWriter { inner: BufWriter::new(file), progress };
let mut reader = response;
let copy_result = io::copy(&mut reader, &mut writer);
let flush_result = writer.inner.flush();
println!();
if let Err(e) = copy_result.and(flush_result) {
last_error = Some(e.into());
if attempt < MAX_DOWNLOAD_RETRIES {
info!(target: "reth::cli",
"Download interrupted, retrying in {} seconds...", RETRY_BACKOFF_SECS
);
std::thread::sleep(Duration::from_secs(RETRY_BACKOFF_SECS));
}
continue;
}
return finalize_download(current_total);
}
Err(last_error
.unwrap_or_else(|| eyre::eyre!("Download failed after {} attempts", MAX_DOWNLOAD_RETRIES)))
}
/// Fetches the snapshot from a remote URL with resume support, then extracts it.
fn download_and_extract(url: &str, format: CompressionFormat, target_dir: &Path) -> Result<()> {
let (downloaded_path, total_size) = resumable_download(url, target_dir)?;
info!(target: "reth::cli", "Extracting snapshot...");
let file = fs::open(&downloaded_path)?;
extract_archive(file, total_size, format, target_dir)?;
fs::remove_file(&downloaded_path)?;
info!(target: "reth::cli", "Removed downloaded archive");
Ok(())
}
/// Downloads and extracts a snapshot, blocking until finished.
///
/// Supports both `file://` URLs for local files and HTTP(S) URLs for remote downloads.
fn blocking_download_and_extract(url: &str, target_dir: &Path) -> Result<()> {
let format = CompressionFormat::from_url(url)?;
if let Ok(parsed_url) = Url::parse(url) &&
parsed_url.scheme() == "file"
{
let file_path = parsed_url
.to_file_path()
.map_err(|_| eyre::eyre!("Invalid file:// URL path: {}", url))?;
extract_from_file(&file_path, format, target_dir)
} else {
download_and_extract(url, format, target_dir)
}
}
async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> {
let target_dir = target_dir.to_path_buf();
let url = url.to_string();
task::spawn_blocking(move || blocking_download_and_extract(&url, &target_dir)).await??;
Ok(())
}
// Builds default URL for latest mainnet archive snapshot using configured defaults
async fn get_latest_snapshot_url(chain_id: u64) -> Result<String> {
let defaults = DownloadDefaults::get_global();
let base_url = match &defaults.default_chain_aware_base_url {
Some(url) => format!("{url}/{chain_id}"),
None => defaults.default_base_url.to_string(),
};
let latest_url = format!("{base_url}/latest.txt");
let filename = Client::new()
.get(latest_url)
.send()
.await?
.error_for_status()?
.text()
.await?
.trim()
.to_string();
Ok(format!("{base_url}/{filename}"))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_download_defaults_builder() {
let defaults = DownloadDefaults::default()
.with_snapshot("https://example.com/snapshots (example)")
.with_base_url("https://example.com");
assert_eq!(defaults.default_base_url, "https://example.com");
assert_eq!(defaults.available_snapshots.len(), 3); // 2 defaults + 1 added
}
#[test]
fn test_download_defaults_replace_snapshots() {
let defaults = DownloadDefaults::default().with_snapshots(vec![
Cow::Borrowed("https://custom1.com"),
Cow::Borrowed("https://custom2.com"),
]);
assert_eq!(defaults.available_snapshots.len(), 2);
assert_eq!(defaults.available_snapshots[0], "https://custom1.com");
}
#[test]
fn test_long_help_generation() {
let defaults = DownloadDefaults::default();
let help = defaults.long_help();
assert!(help.contains("Available snapshot sources:"));
assert!(help.contains("merkle.io"));
assert!(help.contains("publicnode.com"));
assert!(help.contains("file://"));
}
#[test]
fn test_long_help_override() {
let custom_help = "This is custom help text for downloading snapshots.";
let defaults = DownloadDefaults::default().with_long_help(custom_help);
let help = defaults.long_help();
assert_eq!(help, custom_help);
assert!(!help.contains("Available snapshot sources:"));
}
#[test]
fn test_builder_chaining() {
let defaults = DownloadDefaults::default()
.with_base_url("https://custom.example.com")
.with_snapshot("https://snapshot1.com")
.with_snapshot("https://snapshot2.com")
.with_long_help("Custom help for snapshots");
assert_eq!(defaults.default_base_url, "https://custom.example.com");
assert_eq!(defaults.available_snapshots.len(), 4); // 2 defaults + 2 added
assert_eq!(defaults.long_help, Some("Custom help for snapshots".to_string()));
}
#[test]
fn test_compression_format_detection() {
assert!(matches!(
CompressionFormat::from_url("https://example.com/snapshot.tar.lz4"),
Ok(CompressionFormat::Lz4)
));
assert!(matches!(
CompressionFormat::from_url("https://example.com/snapshot.tar.zst"),
Ok(CompressionFormat::Zstd)
));
assert!(matches!(
CompressionFormat::from_url("file:///path/to/snapshot.tar.lz4"),
Ok(CompressionFormat::Lz4)
));
assert!(matches!(
CompressionFormat::from_url("file:///path/to/snapshot.tar.zst"),
Ok(CompressionFormat::Zstd)
));
assert!(CompressionFormat::from_url("https://example.com/snapshot.tar.gz").is_err());
}
}

View File

@@ -0,0 +1,609 @@
use crate::download::{
manifest::{ComponentManifest, ComponentSelection, SnapshotComponentType, SnapshotManifest},
SelectionPreset,
};
use reth_chainspec::{EthereumHardfork, EthereumHardforks};
use reth_config::config::{BlocksPerFileConfig, Config, PruneConfig, StaticFilesConfig};
use reth_db::tables;
use reth_db_api::transaction::{DbTx, DbTxMut};
use reth_node_core::args::DefaultPruningValues;
use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment};
use reth_stages_types::StageCheckpoint;
use std::{collections::BTreeMap, path::Path};
use tracing::info;
/// Minimum blocks to keep for receipts, matching `--minimal` prune settings.
const MINIMUM_RECEIPTS_DISTANCE: u64 = 64;
/// Minimum blocks to keep for history/bodies, matching `--minimal` prune settings
/// (`MINIMUM_UNWIND_SAFE_DISTANCE`).
const MINIMUM_HISTORY_DISTANCE: u64 = 10064;
/// Writes a [`Config`] as TOML to `<data_dir>/reth.toml`.
///
/// If the file already exists, it is not overwritten. Returns `true` if the file was written.
pub fn write_config(config: &Config, data_dir: &Path) -> eyre::Result<bool> {
let config_path = data_dir.join("reth.toml");
if config_path.exists() {
info!(target: "reth::cli",
path = ?config_path,
"reth.toml already exists, skipping config generation"
);
return Ok(false);
}
let toml_str = toml::to_string_pretty(config)?;
reth_fs_util::write(&config_path, toml_str)?;
info!(target: "reth::cli",
path = ?config_path,
"Generated reth.toml based on downloaded components"
);
Ok(true)
}
/// Writes prune checkpoints to the provided write transaction.
pub(crate) fn write_prune_checkpoints_tx<Tx>(
tx: &Tx,
config: &Config,
snapshot_block: u64,
) -> eyre::Result<()>
where
Tx: DbTx + DbTxMut,
{
let segments = &config.prune.segments;
// Collect (segment, mode) pairs for all configured prune segments
let checkpoints: Vec<(PruneSegment, PruneMode)> = [
(PruneSegment::SenderRecovery, segments.sender_recovery),
(PruneSegment::TransactionLookup, segments.transaction_lookup),
(PruneSegment::Receipts, segments.receipts),
(PruneSegment::AccountHistory, segments.account_history),
(PruneSegment::StorageHistory, segments.storage_history),
(PruneSegment::Bodies, segments.bodies_history),
]
.into_iter()
.filter_map(|(segment, mode)| mode.map(|m| (segment, m)))
.collect();
if checkpoints.is_empty() {
return Ok(());
}
// Look up the last tx number for the snapshot block from BlockBodyIndices
let tx_number =
tx.get::<tables::BlockBodyIndices>(snapshot_block)?.map(|indices| indices.last_tx_num());
for (segment, prune_mode) in &checkpoints {
let checkpoint = PruneCheckpoint {
block_number: Some(snapshot_block),
tx_number,
prune_mode: *prune_mode,
};
tx.put::<tables::PruneCheckpoints>(*segment, checkpoint)?;
info!(target: "reth::cli",
segment = %segment,
block = snapshot_block,
tx = ?tx_number,
mode = ?prune_mode,
"Set prune checkpoint"
);
}
Ok(())
}
/// Stage IDs for index stages whose output is stored in RocksDB and is never
/// distributed in snapshots.
const INDEX_STAGE_IDS: [&str; 3] =
["TransactionLookup", "IndexAccountHistory", "IndexStorageHistory"];
/// Prune segments that correspond to the index stages.
const INDEX_PRUNE_SEGMENTS: [PruneSegment; 3] =
[PruneSegment::TransactionLookup, PruneSegment::AccountHistory, PruneSegment::StorageHistory];
/// Resets stage and prune checkpoints for stages whose output is not included
/// in the snapshot inside an existing write transaction.
///
/// A snapshot's mdbx comes from a fully synced node, so it has stage checkpoints
/// at the tip for `TransactionLookup`, `IndexAccountHistory`, and
/// `IndexStorageHistory`. Since we don't distribute the rocksdb indices those
/// stages produced, we must reset their checkpoints to block 0. Otherwise the
/// pipeline would see "already done" and skip rebuilding entirely.
///
/// We intentionally do not reset `SenderRecovery`: sender static files are
/// distributed for archive downloads, and non-archive downloads rely on the
/// configured prune checkpoints for this segment.
pub(crate) fn reset_index_stage_checkpoints_tx<Tx>(tx: &Tx) -> eyre::Result<()>
where
Tx: DbTx + DbTxMut,
{
for stage_id in INDEX_STAGE_IDS {
tx.put::<tables::StageCheckpoints>(stage_id.to_string(), StageCheckpoint::default())?;
// Also clear any stage-specific progress data
tx.delete::<tables::StageCheckpointProgresses>(stage_id.to_string(), None)?;
info!(target: "reth::cli", stage = stage_id, "Reset stage checkpoint to block 0");
}
// Clear corresponding prune checkpoints so the pruner doesn't inherit
// state from the source node
for segment in INDEX_PRUNE_SEGMENTS {
tx.delete::<tables::PruneCheckpoints>(segment, None)?;
}
Ok(())
}
/// Generates a [`Config`] from per-component range selections.
///
/// When all data components are selected as `All`, no pruning is configured (archive node).
/// Otherwise, `--minimal` style pruning is applied for missing/partial components.
pub(crate) fn config_for_selections(
selections: &BTreeMap<SnapshotComponentType, ComponentSelection>,
manifest: &SnapshotManifest,
preset: Option<SelectionPreset>,
chain_spec: Option<&impl EthereumHardforks>,
) -> Config {
let selection_for = |ty| selections.get(&ty).copied().unwrap_or(ComponentSelection::None);
let tx_sel = selection_for(SnapshotComponentType::Transactions);
let senders_sel = selection_for(SnapshotComponentType::TransactionSenders);
let receipt_sel = selection_for(SnapshotComponentType::Receipts);
let account_cs_sel = selection_for(SnapshotComponentType::AccountChangesets);
let storage_cs_sel = selection_for(SnapshotComponentType::StorageChangesets);
// Archive node — all data components present, no pruning
let is_archive = [tx_sel, senders_sel, receipt_sel, account_cs_sel, storage_cs_sel]
.iter()
.all(|s| *s == ComponentSelection::All);
// Extract blocks_per_file from manifest for all component types
let blocks_per_file = |ty: SnapshotComponentType| -> Option<u64> {
match manifest.component(ty)? {
ComponentManifest::Chunked(c) => Some(c.blocks_per_file),
ComponentManifest::Single(_) => None,
}
};
let static_files = StaticFilesConfig {
blocks_per_file: BlocksPerFileConfig {
headers: blocks_per_file(SnapshotComponentType::Headers),
transactions: blocks_per_file(SnapshotComponentType::Transactions),
receipts: blocks_per_file(SnapshotComponentType::Receipts),
transaction_senders: blocks_per_file(SnapshotComponentType::TransactionSenders),
account_change_sets: blocks_per_file(SnapshotComponentType::AccountChangesets),
storage_change_sets: blocks_per_file(SnapshotComponentType::StorageChangesets),
},
};
if is_archive || matches!(preset, Some(SelectionPreset::Archive)) {
return Config { static_files, ..Default::default() };
}
if matches!(preset, Some(SelectionPreset::Full)) {
let defaults = DefaultPruningValues::get_global();
let mut segments = defaults.full_prune_modes.clone();
if defaults.full_bodies_history_use_pre_merge {
segments.bodies_history = chain_spec.and_then(|chain_spec| {
chain_spec
.ethereum_fork_activation(EthereumHardfork::Paris)
.block_number()
.map(PruneMode::Before)
});
}
return Config {
prune: PruneConfig { block_interval: PruneConfig::default().block_interval, segments },
static_files,
..Default::default()
};
}
let mut config = Config::default();
let mut prune = PruneConfig::default();
if senders_sel != ComponentSelection::All {
prune.segments.sender_recovery = Some(PruneMode::Full);
}
prune.segments.transaction_lookup = Some(PruneMode::Full);
if let Some(mode) = selection_to_prune_mode(tx_sel, Some(MINIMUM_HISTORY_DISTANCE)) {
prune.segments.bodies_history = Some(mode);
}
if let Some(mode) = selection_to_prune_mode(receipt_sel, Some(MINIMUM_RECEIPTS_DISTANCE)) {
prune.segments.receipts = Some(mode);
}
if let Some(mode) = selection_to_prune_mode(account_cs_sel, Some(MINIMUM_HISTORY_DISTANCE)) {
prune.segments.account_history = Some(mode);
}
if let Some(mode) = selection_to_prune_mode(storage_cs_sel, Some(MINIMUM_HISTORY_DISTANCE)) {
prune.segments.storage_history = Some(mode);
}
config.prune = prune;
config.static_files = static_files;
config
}
/// Converts a [`ComponentSelection`] to an optional [`PruneMode`].
///
/// `min_distance` enforces the minimum blocks required for this segment.
/// When set, `None` and distances below the minimum are clamped to it
/// instead of producing `PruneMode::Full` which reth would reject.
fn selection_to_prune_mode(
sel: ComponentSelection,
min_distance: Option<u64>,
) -> Option<PruneMode> {
match sel {
ComponentSelection::All => None,
ComponentSelection::Distance(d) => {
Some(PruneMode::Distance(min_distance.map_or(d, |min| d.max(min))))
}
ComponentSelection::None => Some(min_distance.map_or(PruneMode::Full, PruneMode::Distance)),
}
}
/// Human-readable prune config summary.
pub(crate) fn describe_prune_config(config: &Config) -> Vec<String> {
let segments = &config.prune.segments;
[
("sender_recovery", segments.sender_recovery),
("transaction_lookup", segments.transaction_lookup),
("bodies_history", segments.bodies_history),
("receipts", segments.receipts),
("account_history", segments.account_history),
("storage_history", segments.storage_history),
]
.into_iter()
.filter_map(|(name, mode)| mode.map(|m| format!("{name}={}", format_mode(&m))))
.collect()
}
fn format_mode(mode: &PruneMode) -> String {
match mode {
PruneMode::Full => "\"full\"".to_string(),
PruneMode::Distance(d) => format!("{{ distance = {d} }}"),
PruneMode::Before(b) => format!("{{ before = {b} }}"),
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_db::Database;
/// Empty manifest for tests that only care about prune config.
fn empty_manifest() -> SnapshotManifest {
SnapshotManifest {
block: 0,
chain_id: 1,
storage_version: 2,
timestamp: 0,
base_url: None,
components: BTreeMap::new(),
}
}
#[test]
fn write_prune_checkpoints_sets_all_segments() {
let dir = tempfile::tempdir().unwrap();
let db = reth_db::init_db(dir.path(), reth_db::mdbx::DatabaseArguments::default()).unwrap();
let mut selections = BTreeMap::new();
selections.insert(SnapshotComponentType::State, ComponentSelection::All);
selections.insert(SnapshotComponentType::Headers, ComponentSelection::All);
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
let snapshot_block = 21_000_000;
{
let tx = db.tx_mut().unwrap();
write_prune_checkpoints_tx(&tx, &config, snapshot_block).unwrap();
tx.commit().unwrap();
}
// Verify all expected segments have checkpoints
let tx = db.tx().unwrap();
for segment in [
PruneSegment::SenderRecovery,
PruneSegment::TransactionLookup,
PruneSegment::Receipts,
PruneSegment::AccountHistory,
PruneSegment::StorageHistory,
PruneSegment::Bodies,
] {
let checkpoint = tx
.get::<tables::PruneCheckpoints>(segment)
.unwrap()
.unwrap_or_else(|| panic!("expected checkpoint for {segment}"));
assert_eq!(checkpoint.block_number, Some(snapshot_block));
// No BlockBodyIndices in empty DB, so tx_number should be None
assert_eq!(checkpoint.tx_number, None);
}
}
#[test]
fn write_prune_checkpoints_archive_no_checkpoints() {
let dir = tempfile::tempdir().unwrap();
let db = reth_db::init_db(dir.path(), reth_db::mdbx::DatabaseArguments::default()).unwrap();
// Archive node — no pruning configured, so no checkpoints written
let mut selections = BTreeMap::new();
for ty in SnapshotComponentType::ALL {
selections.insert(ty, ComponentSelection::All);
}
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
{
let tx = db.tx_mut().unwrap();
write_prune_checkpoints_tx(&tx, &config, 21_000_000).unwrap();
tx.commit().unwrap();
}
let tx = db.tx().unwrap();
for segment in [PruneSegment::SenderRecovery, PruneSegment::TransactionLookup] {
assert!(
tx.get::<tables::PruneCheckpoints>(segment).unwrap().is_none(),
"expected no checkpoint for {segment} on archive node"
);
}
}
#[test]
fn selections_all_no_pruning() {
let mut selections = BTreeMap::new();
for ty in SnapshotComponentType::ALL {
selections.insert(ty, ComponentSelection::All);
}
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
// Archive node — nothing pruned
assert_eq!(config.prune.segments.transaction_lookup, None);
assert_eq!(config.prune.segments.sender_recovery, None);
assert_eq!(config.prune.segments.bodies_history, None);
assert_eq!(config.prune.segments.receipts, None);
assert_eq!(config.prune.segments.account_history, None);
assert_eq!(config.prune.segments.storage_history, None);
}
#[test]
fn selections_none_clamps_to_minimum_distance() {
let mut selections = BTreeMap::new();
selections.insert(SnapshotComponentType::State, ComponentSelection::All);
selections.insert(SnapshotComponentType::Headers, ComponentSelection::All);
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
assert_eq!(config.prune.segments.transaction_lookup, Some(PruneMode::Full));
assert_eq!(config.prune.segments.sender_recovery, Some(PruneMode::Full));
// All segments clamped to their minimum distances
assert_eq!(
config.prune.segments.bodies_history,
Some(PruneMode::Distance(MINIMUM_HISTORY_DISTANCE))
);
assert_eq!(
config.prune.segments.receipts,
Some(PruneMode::Distance(MINIMUM_RECEIPTS_DISTANCE))
);
assert_eq!(
config.prune.segments.account_history,
Some(PruneMode::Distance(MINIMUM_HISTORY_DISTANCE))
);
assert_eq!(
config.prune.segments.storage_history,
Some(PruneMode::Distance(MINIMUM_HISTORY_DISTANCE))
);
}
#[test]
fn selections_distance_maps_bodies_history() {
let mut selections = BTreeMap::new();
selections.insert(SnapshotComponentType::State, ComponentSelection::All);
selections.insert(SnapshotComponentType::Headers, ComponentSelection::All);
selections
.insert(SnapshotComponentType::Transactions, ComponentSelection::Distance(10_064));
selections.insert(SnapshotComponentType::Receipts, ComponentSelection::None);
selections
.insert(SnapshotComponentType::AccountChangesets, ComponentSelection::Distance(10_064));
selections
.insert(SnapshotComponentType::StorageChangesets, ComponentSelection::Distance(10_064));
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
assert_eq!(config.prune.segments.transaction_lookup, Some(PruneMode::Full));
assert_eq!(config.prune.segments.sender_recovery, Some(PruneMode::Full));
// Bodies follows tx selection
assert_eq!(config.prune.segments.bodies_history, Some(PruneMode::Distance(10_064)));
assert_eq!(
config.prune.segments.receipts,
Some(PruneMode::Distance(MINIMUM_RECEIPTS_DISTANCE))
);
assert_eq!(config.prune.segments.account_history, Some(PruneMode::Distance(10_064)));
assert_eq!(config.prune.segments.storage_history, Some(PruneMode::Distance(10_064)));
}
#[test]
fn full_preset_matches_default_full_prune_config() {
let mut selections = BTreeMap::new();
selections.insert(SnapshotComponentType::State, ComponentSelection::All);
selections.insert(SnapshotComponentType::Headers, ComponentSelection::All);
selections
.insert(SnapshotComponentType::Transactions, ComponentSelection::Distance(500_000));
selections.insert(SnapshotComponentType::Receipts, ComponentSelection::Distance(10_064));
let chain_spec = reth_chainspec::MAINNET.clone();
let config = config_for_selections(
&selections,
&empty_manifest(),
Some(SelectionPreset::Full),
Some(chain_spec.as_ref()),
);
assert_eq!(config.prune.segments.sender_recovery, Some(PruneMode::Full));
assert_eq!(config.prune.segments.transaction_lookup, None);
assert_eq!(
config.prune.segments.receipts,
Some(PruneMode::Distance(MINIMUM_HISTORY_DISTANCE))
);
assert_eq!(
config.prune.segments.account_history,
Some(PruneMode::Distance(MINIMUM_HISTORY_DISTANCE))
);
assert_eq!(
config.prune.segments.storage_history,
Some(PruneMode::Distance(MINIMUM_HISTORY_DISTANCE))
);
let paris_block = chain_spec
.ethereum_fork_activation(EthereumHardfork::Paris)
.block_number()
.expect("mainnet Paris block should be known");
assert_eq!(config.prune.segments.bodies_history, Some(PruneMode::Before(paris_block)));
}
#[test]
fn describe_selections_all_no_pruning() {
let mut selections = BTreeMap::new();
for ty in SnapshotComponentType::ALL {
selections.insert(ty, ComponentSelection::All);
}
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
let desc = describe_prune_config(&config);
// Archive node — no prune segments described
assert!(desc.is_empty());
}
#[test]
fn describe_selections_with_distances() {
let mut selections = BTreeMap::new();
selections.insert(SnapshotComponentType::State, ComponentSelection::All);
selections.insert(SnapshotComponentType::Headers, ComponentSelection::All);
selections
.insert(SnapshotComponentType::Transactions, ComponentSelection::Distance(10_064));
selections.insert(SnapshotComponentType::Receipts, ComponentSelection::None);
let config = config_for_selections(
&selections,
&empty_manifest(),
None,
None::<&reth_chainspec::ChainSpec>,
);
let desc = describe_prune_config(&config);
assert!(desc.contains(&"sender_recovery=\"full\"".to_string()));
// Bodies follows tx selection
assert!(desc.contains(&"bodies_history={ distance = 10064 }".to_string()));
assert!(desc.contains(&"receipts={ distance = 64 }".to_string()));
}
#[test]
fn reset_index_stage_checkpoints_clears_only_rocksdb_index_stages() {
let dir = tempfile::tempdir().unwrap();
let db = reth_db::init_db(dir.path(), reth_db::mdbx::DatabaseArguments::default()).unwrap();
// Simulate a fully synced node: set stage checkpoints at tip
let tip_checkpoint = StageCheckpoint::new(24_500_000);
{
let tx = db.tx_mut().unwrap();
for stage_id in INDEX_STAGE_IDS {
tx.put::<tables::StageCheckpoints>(stage_id.to_string(), tip_checkpoint).unwrap();
}
for segment in INDEX_PRUNE_SEGMENTS {
tx.put::<tables::PruneCheckpoints>(
segment,
PruneCheckpoint {
block_number: Some(24_500_000),
tx_number: None,
prune_mode: PruneMode::Full,
},
)
.unwrap();
}
// Sender recovery checkpoints should be preserved by reset.
tx.put::<tables::StageCheckpoints>("SenderRecovery".to_string(), tip_checkpoint)
.unwrap();
tx.put::<tables::PruneCheckpoints>(
PruneSegment::SenderRecovery,
PruneCheckpoint {
block_number: Some(24_500_000),
tx_number: None,
prune_mode: PruneMode::Full,
},
)
.unwrap();
tx.commit().unwrap();
}
// Reset
{
let tx = db.tx_mut().unwrap();
reset_index_stage_checkpoints_tx(&tx).unwrap();
tx.commit().unwrap();
}
// Verify stage checkpoints are at block 0
let tx = db.tx().unwrap();
for stage_id in INDEX_STAGE_IDS {
let checkpoint = tx
.get::<tables::StageCheckpoints>(stage_id.to_string())
.unwrap()
.expect("checkpoint should exist");
assert_eq!(checkpoint.block_number, 0, "stage {stage_id} should be reset to block 0");
}
// Verify prune checkpoints are deleted
for segment in INDEX_PRUNE_SEGMENTS {
assert!(
tx.get::<tables::PruneCheckpoints>(segment).unwrap().is_none(),
"prune checkpoint for {segment} should be deleted"
);
}
// Verify sender checkpoints are left untouched.
let sender_stage_checkpoint = tx
.get::<tables::StageCheckpoints>("SenderRecovery".to_string())
.unwrap()
.expect("sender checkpoint should exist");
assert_eq!(sender_stage_checkpoint.block_number, tip_checkpoint.block_number);
let sender_prune_checkpoint = tx
.get::<tables::PruneCheckpoints>(PruneSegment::SenderRecovery)
.unwrap()
.expect("sender prune checkpoint should exist");
assert_eq!(sender_prune_checkpoint.block_number, Some(tip_checkpoint.block_number));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,232 @@
use crate::download::manifest::generate_manifest;
use clap::Parser;
use eyre::{Result, WrapErr};
use reth_db::{mdbx::DatabaseArguments, open_db_read_only, tables, Database};
use reth_db_api::transaction::DbTx;
use reth_stages_types::StageId;
use reth_static_file_types::DEFAULT_BLOCKS_PER_STATIC_FILE;
use std::{path::PathBuf, time::Instant};
use tracing::{info, warn};
/// Generate modular chunk archives and a snapshot manifest from a source datadir.
///
/// Archive naming convention:
/// - Chunked: `{component}-{start}-{end}.tar.zst` (e.g. `transactions-0-499999.tar.zst`)
#[derive(Debug, Parser)]
pub struct SnapshotManifestCommand {
/// Source datadir containing static files.
#[arg(long, short = 'd')]
source_datadir: PathBuf,
/// Optional base URL where archives will be hosted.
#[arg(long)]
base_url: Option<String>,
/// Output directory where chunk archives and manifest.json are written.
#[arg(long, short = 'o')]
output_dir: PathBuf,
/// Block number this snapshot was taken at.
///
/// If omitted, this is inferred from the source datadir's `Finish` stage checkpoint.
#[arg(long)]
block: Option<u64>,
/// Chain ID.
#[arg(long, default_value = "1")]
chain_id: u64,
/// Blocks per archive file for chunked components.
///
/// If omitted, this is inferred from header static file ranges in the source datadir.
#[arg(long)]
blocks_per_file: Option<u64>,
}
impl SnapshotManifestCommand {
pub fn execute(self) -> Result<()> {
let block = match self.block {
Some(block) => block,
None => infer_snapshot_block(&self.source_datadir)?,
};
let blocks_per_file = match self.blocks_per_file {
Some(blocks_per_file) => blocks_per_file,
None => infer_blocks_per_file(&self.source_datadir)?,
};
info!(target: "reth::cli",
dir = ?self.source_datadir,
output = ?self.output_dir,
block,
blocks_per_file,
"Packaging modular snapshot archives"
);
let start = Instant::now();
let manifest = generate_manifest(
&self.source_datadir,
&self.output_dir,
self.base_url.as_deref(),
block,
self.chain_id,
blocks_per_file,
)?;
let num_components = manifest.components.len();
let json = serde_json::to_string_pretty(&manifest)?;
let output = self.output_dir.join("manifest.json");
reth_fs_util::write(&output, &json)?;
info!(target: "reth::cli",
path = ?output,
components = num_components,
block = manifest.block,
elapsed = ?start.elapsed(),
"Manifest written"
);
Ok(())
}
}
fn infer_snapshot_block(source_datadir: &std::path::Path) -> Result<u64> {
if let Ok(block) = infer_snapshot_block_from_db(source_datadir) {
return Ok(block);
}
let block = infer_snapshot_block_from_headers(source_datadir)?;
warn!(
target: "reth::cli",
block,
"Could not read Finish stage checkpoint from source DB, using header static-file tip"
);
Ok(block)
}
fn infer_snapshot_block_from_db(source_datadir: &std::path::Path) -> Result<u64> {
let candidates = [source_datadir.join("db"), source_datadir.to_path_buf()];
for db_path in candidates {
if !db_path.exists() {
continue;
}
let db = match open_db_read_only(&db_path, DatabaseArguments::default()) {
Ok(db) => db,
Err(_) => continue,
};
let tx = db.tx()?;
if let Some(checkpoint) = tx.get::<tables::StageCheckpoints>(StageId::Finish.to_string())? {
return Ok(checkpoint.block_number);
}
}
eyre::bail!(
"Could not infer --block from source DB (Finish checkpoint missing); pass --block manually"
)
}
fn infer_snapshot_block_from_headers(source_datadir: &std::path::Path) -> Result<u64> {
let max_end = header_ranges(source_datadir)?
.into_iter()
.map(|(_, end)| end)
.max()
.ok_or_else(|| eyre::eyre!("No header static files found to infer --block"))?;
Ok(max_end)
}
fn infer_blocks_per_file(source_datadir: &std::path::Path) -> Result<u64> {
let mut inferred = None;
for (start, end) in header_ranges(source_datadir)? {
let span = end.saturating_sub(start).saturating_add(1);
if span == 0 {
continue;
}
if let Some(existing) = inferred {
if existing != span {
eyre::bail!(
"Inconsistent header static file ranges; pass --blocks-per-file manually"
);
}
} else {
inferred = Some(span);
}
}
inferred.ok_or_else(|| {
eyre::eyre!(
"Could not infer --blocks-per-file from header static files; pass it manually (default is {DEFAULT_BLOCKS_PER_STATIC_FILE})"
)
})
}
fn header_ranges(source_datadir: &std::path::Path) -> Result<Vec<(u64, u64)>> {
let static_files_dir = source_datadir.join("static_files");
let static_files_dir =
if static_files_dir.exists() { static_files_dir } else { source_datadir.to_path_buf() };
let entries = std::fs::read_dir(&static_files_dir).wrap_err_with(|| {
format!("Failed to read static files directory: {}", static_files_dir.display())
})?;
let mut ranges = Vec::new();
for entry in entries {
let entry = entry?;
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
if let Some(range) = parse_headers_range(&file_name) {
ranges.push(range);
}
}
Ok(ranges)
}
fn parse_headers_range(file_name: &str) -> Option<(u64, u64)> {
let remainder = file_name.strip_prefix("static_file_headers_")?;
let (start, end_with_suffix) = remainder.split_once('_')?;
let start = start.parse::<u64>().ok()?;
let end_digits: String = end_with_suffix.chars().take_while(|ch| ch.is_ascii_digit()).collect();
let end = end_digits.parse::<u64>().ok()?;
Some((start, end))
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[test]
fn parse_headers_range_works_with_suffixes() {
assert_eq!(parse_headers_range("static_file_headers_0_499999"), Some((0, 499_999)));
assert_eq!(
parse_headers_range("static_file_headers_500000_999999.jar"),
Some((500_000, 999_999))
);
assert_eq!(parse_headers_range("static_file_transactions_0_499999"), None);
}
#[test]
fn infer_blocks_per_file_from_header_ranges() {
let dir = tempdir().unwrap();
let sf = dir.path().join("static_files");
std::fs::create_dir_all(&sf).unwrap();
std::fs::write(sf.join("static_file_headers_0_499999"), []).unwrap();
std::fs::write(sf.join("static_file_headers_500000_999999.jar"), []).unwrap();
assert_eq!(infer_blocks_per_file(dir.path()).unwrap(), 500_000);
}
#[test]
fn infer_snapshot_block_from_headers_uses_max_end() {
let dir = tempdir().unwrap();
let sf = dir.path().join("static_files");
std::fs::create_dir_all(&sf).unwrap();
std::fs::write(sf.join("static_file_headers_0_499999"), []).unwrap();
std::fs::write(sf.join("static_file_headers_500000_999999"), []).unwrap();
assert_eq!(infer_snapshot_block_from_headers(dir.path()).unwrap(), 999_999);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,437 @@
use crate::download::{
manifest::{ComponentSelection, SnapshotComponentType, SnapshotManifest},
DownloadProgress, SelectionPreset,
};
use crossterm::{
event::{self, Event, KeyCode},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{
backend::CrosstermBackend,
layout::{Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph},
Frame, Terminal,
};
use std::{
collections::BTreeMap,
io,
time::{Duration, Instant},
};
/// Result of the interactive component selector.
pub struct SelectorOutput {
/// User-confirmed selections with per-component ranges.
pub selections: BTreeMap<SnapshotComponentType, ComponentSelection>,
/// Last preset action used in the TUI, if any.
pub preset: Option<SelectionPreset>,
}
/// All distance presets. Groups filter this to only valid options.
const DISTANCE_PRESETS: [ComponentSelection; 6] = [
ComponentSelection::None,
ComponentSelection::Distance(64),
ComponentSelection::Distance(10_064),
ComponentSelection::Distance(100_000),
ComponentSelection::Distance(1_000_000),
ComponentSelection::All,
];
/// Presets for components that require at least 64 blocks (receipts).
const RECEIPTS_PRESETS: [ComponentSelection; 5] = [
ComponentSelection::Distance(64),
ComponentSelection::Distance(10_064),
ComponentSelection::Distance(100_000),
ComponentSelection::Distance(1_000_000),
ComponentSelection::All,
];
/// Presets for components that require at least 10064 blocks (account/storage history).
const HISTORY_PRESETS: [ComponentSelection; 4] = [
ComponentSelection::Distance(10_064),
ComponentSelection::Distance(100_000),
ComponentSelection::Distance(1_000_000),
ComponentSelection::All,
];
/// A display group bundles one or more component types into a single TUI row.
struct DisplayGroup {
/// Display name shown in the TUI.
name: &'static str,
/// Underlying component types this group controls.
types: Vec<SnapshotComponentType>,
/// Whether this group is required and locked to All.
required: bool,
/// Valid presets for this group. Components with minimum distance requirements
/// exclude presets that would produce invalid prune configs.
presets: &'static [ComponentSelection],
}
/// Build the display groups from available components in the manifest.
fn build_groups(manifest: &SnapshotManifest) -> Vec<DisplayGroup> {
let has = |ty: SnapshotComponentType| manifest.component(ty).is_some();
let mut groups = Vec::new();
if has(SnapshotComponentType::State) {
groups.push(DisplayGroup {
name: "State (mdbx)",
types: vec![SnapshotComponentType::State],
required: true,
presets: &DISTANCE_PRESETS,
});
}
if has(SnapshotComponentType::Headers) {
groups.push(DisplayGroup {
name: "Headers",
types: vec![SnapshotComponentType::Headers],
required: true,
presets: &DISTANCE_PRESETS,
});
}
if has(SnapshotComponentType::Transactions) {
groups.push(DisplayGroup {
name: "Transactions",
types: vec![SnapshotComponentType::Transactions],
required: false,
presets: &HISTORY_PRESETS,
});
}
if has(SnapshotComponentType::Receipts) {
groups.push(DisplayGroup {
name: "Receipts",
types: vec![SnapshotComponentType::Receipts],
required: false,
presets: &RECEIPTS_PRESETS,
});
}
// Bundle account + storage changesets as "State History"
let has_acc = has(SnapshotComponentType::AccountChangesets);
let has_stor = has(SnapshotComponentType::StorageChangesets);
if has_acc || has_stor {
let mut types = Vec::new();
if has_acc {
types.push(SnapshotComponentType::AccountChangesets);
}
if has_stor {
types.push(SnapshotComponentType::StorageChangesets);
}
groups.push(DisplayGroup {
name: "State History",
types,
required: false,
presets: &HISTORY_PRESETS,
});
}
groups
}
struct SelectorApp {
manifest: SnapshotManifest,
full_preset: BTreeMap<SnapshotComponentType, ComponentSelection>,
/// Display groups shown in the TUI.
groups: Vec<DisplayGroup>,
/// Current selection for each group.
selections: Vec<ComponentSelection>,
/// Last preset action invoked by user.
preset: Option<SelectionPreset>,
/// Current cursor position.
cursor: usize,
/// List state for ratatui.
list_state: ListState,
}
impl SelectorApp {
fn new(
manifest: SnapshotManifest,
full_preset: BTreeMap<SnapshotComponentType, ComponentSelection>,
) -> Self {
let groups = build_groups(&manifest);
// Default to the minimal preset (matches --minimal prune config)
let selections = groups.iter().map(|g| g.types[0].minimal_selection()).collect();
let mut list_state = ListState::default();
list_state.select(Some(0));
Self {
manifest,
full_preset,
groups,
selections,
preset: Some(SelectionPreset::Minimal),
cursor: 0,
list_state,
}
}
fn cycle_right(&mut self) {
if let Some(group) = self.groups.get(self.cursor) {
if group.required {
return;
}
let presets = group.presets;
let current = self.selections[self.cursor];
let idx = presets.iter().position(|p| *p == current).unwrap_or(0);
self.selections[self.cursor] = presets[(idx + 1) % presets.len()];
self.preset = None;
}
}
fn cycle_left(&mut self) {
if let Some(group) = self.groups.get(self.cursor) {
if group.required {
return;
}
let presets = group.presets;
let current = self.selections[self.cursor];
let idx = presets.iter().position(|p| *p == current).unwrap_or(0);
self.selections[self.cursor] = presets[(idx + presets.len() - 1) % presets.len()];
self.preset = None;
}
}
fn select_all(&mut self) {
for sel in &mut self.selections {
*sel = ComponentSelection::All;
}
self.preset = Some(SelectionPreset::Archive);
}
fn select_minimal(&mut self) {
for (i, group) in self.groups.iter().enumerate() {
self.selections[i] = group.types[0].minimal_selection();
}
self.preset = Some(SelectionPreset::Minimal);
}
fn select_full(&mut self) {
for (i, group) in self.groups.iter().enumerate() {
let mut selection = group.types[0].minimal_selection();
for ty in &group.types {
if let Some(sel) = self.full_preset.get(ty).copied() {
selection = sel;
break;
}
}
self.selections[i] = selection;
}
self.preset = Some(SelectionPreset::Full);
}
fn move_up(&mut self) {
if self.cursor > 0 {
self.cursor -= 1;
} else {
self.cursor = self.groups.len().saturating_sub(1);
}
self.list_state.select(Some(self.cursor));
}
fn move_down(&mut self) {
if self.cursor < self.groups.len() - 1 {
self.cursor += 1;
} else {
self.cursor = 0;
}
self.list_state.select(Some(self.cursor));
}
/// Build the flat component→selection map from grouped selections.
fn selection_map(&self) -> BTreeMap<SnapshotComponentType, ComponentSelection> {
let mut map = BTreeMap::new();
for (group, sel) in self.groups.iter().zip(&self.selections) {
for ty in &group.types {
map.insert(*ty, *sel);
}
}
map
}
/// Size for a single group, summing all component types in the group.
fn group_size(&self, group_idx: usize) -> u64 {
let sel = self.selections[group_idx];
let distance = match sel {
ComponentSelection::None => return 0,
ComponentSelection::All => None,
ComponentSelection::Distance(d) => Some(d),
};
self.groups[group_idx]
.types
.iter()
.map(|ty| self.manifest.size_for_distance(*ty, distance))
.sum()
}
fn total_selected_size(&self) -> u64 {
(0..self.groups.len()).map(|i| self.group_size(i)).sum()
}
}
/// Runs the interactive component selector TUI.
pub fn run_selector(
manifest: SnapshotManifest,
full_preset: &BTreeMap<SnapshotComponentType, ComponentSelection>,
) -> eyre::Result<SelectorOutput> {
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let mut app = SelectorApp::new(manifest, full_preset.clone());
let result = event_loop(&mut terminal, &mut app);
disable_raw_mode()?;
execute!(terminal.backend_mut(), LeaveAlternateScreen)?;
terminal.show_cursor()?;
result
}
fn event_loop(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
app: &mut SelectorApp,
) -> eyre::Result<SelectorOutput> {
let tick_rate = Duration::from_millis(100);
let mut last_tick = Instant::now();
loop {
terminal.draw(|f| render(f, app))?;
let timeout =
tick_rate.checked_sub(last_tick.elapsed()).unwrap_or_else(|| Duration::from_secs(0));
if crossterm::event::poll(timeout)? &&
let Event::Key(key) = event::read()? &&
key.kind == event::KeyEventKind::Press
{
match key.code {
KeyCode::Char('q') | KeyCode::Esc => {
eyre::bail!("Download cancelled by user");
}
KeyCode::Enter => {
return Ok(SelectorOutput {
selections: app.selection_map(),
preset: app.preset,
});
}
KeyCode::Right | KeyCode::Char('l') | KeyCode::Char(' ') => app.cycle_right(),
KeyCode::Left | KeyCode::Char('h') => app.cycle_left(),
KeyCode::Char('a') => app.select_all(),
KeyCode::Char('f') => app.select_full(),
KeyCode::Char('m') => app.select_minimal(),
KeyCode::Up | KeyCode::Char('k') => app.move_up(),
KeyCode::Down | KeyCode::Char('j') => app.move_down(),
_ => {}
}
}
if last_tick.elapsed() >= tick_rate {
last_tick = Instant::now();
}
}
}
fn format_selection(sel: &ComponentSelection) -> String {
match sel {
ComponentSelection::All => "All".to_string(),
ComponentSelection::Distance(d) => format!("Last {d} blocks"),
ComponentSelection::None => "None".to_string(),
}
}
fn render(f: &mut Frame<'_>, app: &mut SelectorApp) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3), // Header
Constraint::Min(8), // Component list
Constraint::Length(3), // Footer
])
.split(f.area());
// Header
let block_info = if app.manifest.block > 0 {
format!(" (block {})", app.manifest.block)
} else {
String::new()
};
let header = Paragraph::new(format!(" Select snapshot components to download{}", block_info))
.style(Style::default().fg(Color::Cyan).add_modifier(Modifier::BOLD))
.block(Block::default().borders(Borders::ALL).title("reth download"));
f.render_widget(header, chunks[0]);
// Component list
let items: Vec<ListItem<'_>> = app
.groups
.iter()
.enumerate()
.map(|(i, group)| {
let sel = &app.selections[i];
let sel_str = format_selection(sel);
let size = app.group_size(i);
let size_str =
if size > 0 { DownloadProgress::format_size(size) } else { String::new() };
let required = if group.required { " (required)" } else { "" };
let at_max = *sel == *group.presets.last().unwrap_or(&ComponentSelection::All);
let at_min = *sel == group.presets[0];
let arrows = if group.required {
" "
} else if at_max {
""
} else if at_min {
""
} else {
"◂ ▸"
};
let style = if group.required {
Style::default().fg(Color::DarkGray)
} else if matches!(sel, ComponentSelection::None) {
Style::default().fg(Color::White)
} else {
Style::default().fg(Color::Green)
};
ListItem::new(Line::from(vec![
Span::styled(format!(" {:<22}", group.name), style),
Span::styled(
format!("{arrows} {:<12}", sel_str),
style.add_modifier(Modifier::BOLD),
),
Span::styled(format!("{:>10}", size_str), style.add_modifier(Modifier::DIM)),
Span::styled(required.to_string(), Style::default().fg(Color::DarkGray)),
]))
})
.collect();
let total_str = DownloadProgress::format_size(app.total_selected_size());
let list = List::new(items)
.block(
Block::default()
.borders(Borders::ALL)
.title(format!("Components — Total: {total_str}")),
)
.highlight_style(Style::default().add_modifier(Modifier::BOLD).bg(Color::DarkGray))
.highlight_symbol("");
f.render_stateful_widget(list, chunks[1], &mut app.list_state);
// Footer
let footer = Paragraph::new(
" [←/→] adjust [m] minimal [f] full [a] archive [Enter] confirm [Esc] cancel",
)
.style(Style::default().fg(Color::Cyan).add_modifier(Modifier::BOLD))
.block(Block::default().borders(Borders::ALL));
f.render_widget(footer, chunks[2]);
}

View File

@@ -194,6 +194,7 @@ where
runner.run_blocking_command_until_exit(|ctx| command.execute::<N>(ctx))
}
Commands::Download(command) => runner.run_blocking_until_ctrl_c(command.execute::<N>()),
Commands::SnapshotManifest(command) => command.execute(),
Commands::Stage(command) => {
runner.run_command_until_exit(|ctx| command.execute::<N, _>(ctx, components))
}

View File

@@ -6,7 +6,9 @@ use reth_chainspec::{ChainSpec, Hardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::{
common::{CliComponentsBuilder, CliNodeTypes, HeaderMut},
config_cmd, db, download, dump_genesis, export_era, import, import_era, init_cmd, init_state,
config_cmd, db, download,
download::manifest_cmd,
dump_genesis, export_era, import, import_era, init_cmd, init_state,
launcher::FnLauncher,
node::{self, NoArgs},
p2p, prune, re_execute, stage,
@@ -281,6 +283,9 @@ pub enum Commands<
/// Download public node snapshots
#[command(name = "download")]
Download(download::DownloadCommand<C>),
/// Generate a snapshot manifest from local archive files.
#[command(name = "snapshot-manifest")]
SnapshotManifest(manifest_cmd::SnapshotManifestCommand),
/// Manipulate individual stages.
#[command(name = "stage")]
Stage(stage::Command<C>),
@@ -333,6 +338,7 @@ impl<C: ChainSpecParser, Ext: clap::Args + fmt::Debug, SubCmd: Subcommand + fmt:
Self::DumpGenesis(cmd) => cmd.chain_spec(),
Self::Db(cmd) => cmd.chain_spec(),
Self::Download(cmd) => cmd.chain_spec(),
Self::SnapshotManifest(_) => None,
Self::Stage(cmd) => cmd.chain_spec(),
Self::P2P(cmd) => cmd.chain_spec(),
#[cfg(feature = "dev")]

View File

@@ -41,6 +41,7 @@
- [`reth db account-storage`](./reth/db/account-storage.mdx)
- [`reth db state`](./reth/db/state.mdx)
- [`reth download`](./reth/download.mdx)
- [`reth snapshot-manifest`](./reth/snapshot-manifest.mdx)
- [`reth stage`](./reth/stage.mdx)
- [`reth stage run`](./reth/stage/run.mdx)
- [`reth stage drop`](./reth/stage/drop.mdx)

View File

@@ -9,21 +9,22 @@ $ reth --help
Usage: reth [OPTIONS] <COMMAND>
Commands:
node Start the node
init Initialize the database from a genesis file
init-state Initialize the database from a state dump file
import This syncs RLP encoded blocks from a file or files
import-era This syncs ERA encoded blocks from a directory
export-era Exports block to era1 files in a specified directory
dump-genesis Dumps genesis block JSON configuration to stdout
db Database debugging utilities
download Download public node snapshots
stage Manipulate individual stages
p2p P2P Debugging utilities
config Write config to stdout
prune Prune according to the configuration without any limits
re-execute Re-execute blocks in parallel to verify historical sync correctness
help Print this message or the help of the given subcommand(s)
node Start the node
init Initialize the database from a genesis file
init-state Initialize the database from a state dump file
import This syncs RLP encoded blocks from a file or files
import-era This syncs ERA encoded blocks from a directory
export-era Exports block to era1 files in a specified directory
dump-genesis Dumps genesis block JSON configuration to stdout
db Database debugging utilities
download Download public node snapshots
snapshot-manifest Generate a snapshot manifest from local archive files
stage Manipulate individual stages
p2p P2P Debugging utilities
config Write config to stdout
prune Prune according to the configuration without any limits
re-execute Re-execute blocks in parallel to verify historical sync correctness
help Print this message or the help of the given subcommand(s)
Options:
-h, --help

View File

@@ -133,6 +133,50 @@ Storage:
Local file:// URLs are also supported for extracting snapshots from disk.
--manifest-url <URL>
URL to a snapshot manifest.json for modular component downloads.
When provided, fetches this manifest instead of discovering it from the default base URL. Useful for testing with custom or local manifests.
--manifest-path <PATH>
Local path to a snapshot manifest.json for modular component downloads
--with-txs
Include transaction static files
--with-receipts
Include receipt static files
--with-state-history
Include account and storage history static files
--archive
Download all available components (archive node, no pruning)
--minimal
Download the minimal component set (same default as --non-interactive)
--full
Download the full node component set (matches default full prune settings)
--without-rocksdb
Skip optional RocksDB indices even when archive components are selected.
This affects `--archive`/`--all` and TUI archive preset (`a`).
-y, --non-interactive
Skip interactive component selection. Downloads the minimal set (state + headers + transactions + changesets) unless explicit --with-* flags narrow it
--resumable
Use resumable two-phase downloads (download to disk first, then extract).
Archives are downloaded to a .part file with HTTP Range resume support before extraction. Slower but tolerates network interruptions without restarting. By default, archives stream directly into the extractor.
--download-concurrency <DOWNLOAD_CONCURRENCY>
Maximum number of concurrent modular archive workers
[default: 8]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout

View File

@@ -0,0 +1,180 @@
# reth snapshot-manifest
Generate a snapshot manifest from local archive files
```bash
$ reth snapshot-manifest --help
```
```txt
Usage: reth snapshot-manifest [OPTIONS] --source-datadir <SOURCE_DATADIR> --output-dir <OUTPUT_DIR>
Options:
-d, --source-datadir <SOURCE_DATADIR>
Source datadir containing static files
--base-url <BASE_URL>
Optional base URL where archives will be hosted
-o, --output-dir <OUTPUT_DIR>
Output directory where chunk archives and manifest.json are written
--block <BLOCK>
Block number this snapshot was taken at.
If omitted, this is inferred from the source datadir's `Finish` stage checkpoint.
--chain-id <CHAIN_ID>
Chain ID
[default: 1]
--blocks-per-file <BLOCKS_PER_FILE>
Blocks per archive file for chunked components.
If omitted, this is inferred from header static file ranges in the source datadir.
-h, --help
Print help (see a summary with '-h')
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled.
Default: 5 for `node` command, 0 for non-node utility subcommands.
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -200,6 +200,10 @@ export const rethCliSidebar: SidebarItem = {
text: "reth download",
link: "/cli/reth/download"
},
{
text: "reth snapshot-manifest",
link: "/cli/reth/snapshot-manifest"
},
{
text: "reth stage",
link: "/cli/reth/stage",