feat(cli): add reth db state command for historical contract storage (#21570)

Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Matthias Seitz
2026-01-30 15:03:19 +01:00
committed by GitHub
parent b3d532ce9d
commit faf64c712e
14 changed files with 825 additions and 4 deletions

1
Cargo.lock generated
View File

@@ -7814,6 +7814,7 @@ dependencies = [
"itertools 0.14.0",
"lz4",
"metrics",
"parking_lot",
"proptest",
"proptest-arbitrary-interop",
"ratatui",

View File

@@ -78,6 +78,7 @@ lz4.workspace = true
zstd.workspace = true
serde.workspace = true
serde_json.workspace = true
parking_lot.workspace = true
tar.workspace = true
tracing.workspace = true
backon.workspace = true

View File

@@ -17,6 +17,7 @@ mod get;
mod list;
mod repair_trie;
mod settings;
mod state;
mod static_file_header;
mod stats;
/// DB List TUI
@@ -65,6 +66,8 @@ pub enum Subcommands {
Settings(settings::Command),
/// Gets storage size information for an account
AccountStorage(account_storage::Command),
/// Gets account state and storage at a specific block
State(state::Command),
}
/// Initializes a provider factory with specified access rights, and then execute with the provided
@@ -198,6 +201,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
command.execute(&tool)?;
});
}
Subcommands::State(command) => {
db_exec!(self.env, tool, N, AccessRights::RO, {
command.execute(&tool)?;
});
}
}
Ok(())

View File

@@ -0,0 +1,412 @@
use alloy_primitives::{Address, BlockNumber, B256, U256};
use clap::Parser;
use parking_lot::Mutex;
use reth_db_api::{
cursor::{DbCursorRO, DbDupCursorRO},
database::Database,
tables,
transaction::DbTx,
};
use reth_db_common::DbTool;
use reth_node_builder::NodeTypesWithDB;
use reth_provider::providers::ProviderNodeTypes;
use reth_storage_api::{BlockNumReader, StateProvider, StorageSettingsCache};
use std::{
collections::BTreeSet,
thread,
time::{Duration, Instant},
};
use tracing::{error, info};
/// Log progress every 5 seconds
const LOG_INTERVAL: Duration = Duration::from_secs(30);
/// The arguments for the `reth db state` command
#[derive(Parser, Debug)]
pub struct Command {
/// The account address to get state for
address: Address,
/// Block number to query state at (uses current state if not provided)
#[arg(long, short)]
block: Option<BlockNumber>,
/// Maximum number of storage slots to display
#[arg(long, short, default_value = "100")]
limit: usize,
/// Output format (table, json, csv)
#[arg(long, short, default_value = "table")]
format: OutputFormat,
}
impl Command {
/// Execute `db state` command
pub fn execute<N: NodeTypesWithDB + ProviderNodeTypes>(
self,
tool: &DbTool<N>,
) -> eyre::Result<()> {
let address = self.address;
let limit = self.limit;
if let Some(block) = self.block {
self.execute_historical(tool, address, block, limit)
} else {
self.execute_current(tool, address, limit)
}
}
fn execute_current<N: NodeTypesWithDB + ProviderNodeTypes>(
&self,
tool: &DbTool<N>,
address: Address,
limit: usize,
) -> eyre::Result<()> {
let entries = tool.provider_factory.db_ref().view(|tx| {
// Get account info
let account = tx.get::<tables::PlainAccountState>(address)?;
// Get storage entries
let mut cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
let mut entries = Vec::new();
let mut last_log = Instant::now();
let walker = cursor.walk_dup(Some(address), None)?;
for (idx, entry) in walker.enumerate() {
let (_, storage_entry) = entry?;
if storage_entry.value != U256::ZERO {
entries.push((storage_entry.key, storage_entry.value));
}
if entries.len() >= limit {
break;
}
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
slots_scanned = idx,
"Scanning storage slots"
);
last_log = Instant::now();
}
}
Ok::<_, eyre::Report>((account, entries))
})??;
let (account, storage_entries) = entries;
self.print_results(address, None, account, &storage_entries);
Ok(())
}
fn execute_historical<N: NodeTypesWithDB + ProviderNodeTypes>(
&self,
tool: &DbTool<N>,
address: Address,
block: BlockNumber,
limit: usize,
) -> eyre::Result<()> {
let provider = tool.provider_factory.history_by_block_number(block)?;
// Get account info at that block
let account = provider.basic_account(&address)?;
// Check storage settings to determine where history is stored
let storage_settings = tool.provider_factory.cached_storage_settings();
let history_in_rocksdb = storage_settings.storages_history_in_rocksdb;
// For historical queries, enumerate keys from history indices only
// (not PlainStorageState, which reflects current state)
let mut storage_keys = BTreeSet::new();
if history_in_rocksdb {
error!(
target: "reth::cli",
"Historical storage queries with RocksDB backend are not yet supported. \
Use MDBX for storage history or query current state without --block."
);
return Ok(());
}
// Collect keys from MDBX StorageChangeSets using parallel scanning
self.collect_mdbx_storage_keys_parallel(tool, address, &mut storage_keys)?;
info!(
target: "reth::cli",
address = %address,
block = block,
total_keys = storage_keys.len(),
"Found storage keys to query"
);
// Now query each key at the historical block using the StateProvider
// This handles both MDBX and RocksDB backends transparently
let mut entries = Vec::new();
let mut last_log = Instant::now();
for (idx, key) in storage_keys.iter().enumerate() {
match provider.storage(address, *key) {
Ok(Some(value)) if value != U256::ZERO => {
entries.push((*key, value));
}
_ => {}
}
if entries.len() >= limit {
break;
}
if last_log.elapsed() >= LOG_INTERVAL {
info!(
target: "reth::cli",
address = %address,
block = block,
keys_total = storage_keys.len(),
slots_scanned = idx,
slots_found = entries.len(),
"Scanning historical storage slots"
);
last_log = Instant::now();
}
}
self.print_results(address, Some(block), account, &entries);
Ok(())
}
/// Collects storage keys from MDBX StorageChangeSets using parallel block range scanning.
fn collect_mdbx_storage_keys_parallel<N: NodeTypesWithDB + ProviderNodeTypes>(
&self,
tool: &DbTool<N>,
address: Address,
keys: &mut BTreeSet<B256>,
) -> eyre::Result<()> {
const CHUNK_SIZE: u64 = 500_000; // 500k blocks per thread
let num_threads = std::thread::available_parallelism()
.map(|p| p.get().saturating_sub(1).max(1))
.unwrap_or(4);
// Get the current tip block
let tip = tool.provider_factory.provider()?.best_block_number()?;
if tip == 0 {
return Ok(());
}
info!(
target: "reth::cli",
address = %address,
tip,
chunk_size = CHUNK_SIZE,
num_threads,
"Starting parallel MDBX changeset scan"
);
// Shared state for collecting keys
let collected_keys: Mutex<BTreeSet<B256>> = Mutex::new(BTreeSet::new());
let total_entries_scanned = Mutex::new(0usize);
// Create chunk ranges
let mut chunks: Vec<(u64, u64)> = Vec::new();
let mut start = 0u64;
while start <= tip {
let end = (start + CHUNK_SIZE - 1).min(tip);
chunks.push((start, end));
start = end + 1;
}
let chunks_ref = &chunks;
let next_chunk = Mutex::new(0usize);
let next_chunk_ref = &next_chunk;
let collected_keys_ref = &collected_keys;
let total_entries_ref = &total_entries_scanned;
thread::scope(|s| {
let handles: Vec<_> = (0..num_threads)
.map(|thread_id| {
s.spawn(move || {
loop {
// Get next chunk to process
let chunk_idx = {
let mut idx = next_chunk_ref.lock();
if *idx >= chunks_ref.len() {
return Ok::<_, eyre::Report>(());
}
let current = *idx;
*idx += 1;
current
};
let (chunk_start, chunk_end) = chunks_ref[chunk_idx];
// Open a new read transaction for this chunk
tool.provider_factory.db_ref().view(|tx| {
tx.disable_long_read_transaction_safety();
let mut changeset_cursor =
tx.cursor_read::<tables::StorageChangeSets>()?;
let start_key =
reth_db_api::models::BlockNumberAddress((chunk_start, address));
let end_key =
reth_db_api::models::BlockNumberAddress((chunk_end, address));
let mut local_keys = BTreeSet::new();
let mut entries_in_chunk = 0usize;
if let Ok(walker) = changeset_cursor.walk_range(start_key..=end_key)
{
for (block_addr, storage_entry) in walker.flatten() {
if block_addr.address() == address {
local_keys.insert(storage_entry.key);
}
entries_in_chunk += 1;
}
}
// Merge into global state
collected_keys_ref.lock().extend(local_keys);
*total_entries_ref.lock() += entries_in_chunk;
info!(
target: "reth::cli",
thread_id,
chunk_start,
chunk_end,
entries_in_chunk,
"Thread completed chunk"
);
Ok::<_, eyre::Report>(())
})??;
}
})
})
.collect();
for handle in handles {
handle.join().map_err(|_| eyre::eyre!("Thread panicked"))??;
}
Ok::<_, eyre::Report>(())
})?;
let final_keys = collected_keys.into_inner();
let total = *total_entries_scanned.lock();
info!(
target: "reth::cli",
address = %address,
total_entries = total,
unique_keys = final_keys.len(),
"Finished parallel MDBX changeset scan"
);
keys.extend(final_keys);
Ok(())
}
fn print_results(
&self,
address: Address,
block: Option<BlockNumber>,
account: Option<reth_primitives_traits::Account>,
storage: &[(alloy_primitives::B256, U256)],
) {
match self.format {
OutputFormat::Table => {
println!("Account: {address}");
if let Some(b) = block {
println!("Block: {b}");
} else {
println!("Block: latest");
}
println!();
if let Some(acc) = account {
println!("Nonce: {}", acc.nonce);
println!("Balance: {} wei", acc.balance);
if let Some(code_hash) = acc.bytecode_hash {
println!("Code hash: {code_hash}");
}
} else {
println!("Account not found");
}
println!();
println!("Storage ({} slots):", storage.len());
println!("{:-<130}", "");
println!("{:<66} | {:<64}", "Slot", "Value");
println!("{:-<130}", "");
for (key, value) in storage {
println!("{key} | {value:#066x}");
}
}
OutputFormat::Json => {
let output = serde_json::json!({
"address": address.to_string(),
"block": block,
"account": account.map(|a| serde_json::json!({
"nonce": a.nonce,
"balance": a.balance.to_string(),
"code_hash": a.bytecode_hash.map(|h| h.to_string()),
})),
"storage": storage.iter().map(|(k, v)| {
serde_json::json!({
"key": k.to_string(),
"value": format!("{v:#066x}"),
})
}).collect::<Vec<_>>(),
});
println!("{}", serde_json::to_string_pretty(&output).unwrap());
}
OutputFormat::Csv => {
println!("slot,value");
for (key, value) in storage {
println!("{key},{value:#066x}");
}
}
}
}
}
#[derive(Debug, Clone, Default, clap::ValueEnum)]
pub enum OutputFormat {
#[default]
Table,
Json,
Csv,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_state_args() {
let cmd = Command::try_parse_from([
"state",
"0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045",
"--block",
"1000000",
])
.unwrap();
assert_eq!(
cmd.address,
"0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045".parse::<Address>().unwrap()
);
assert_eq!(cmd.block, Some(1000000));
}
#[test]
fn parse_state_args_no_block() {
let cmd = Command::try_parse_from(["state", "0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045"])
.unwrap();
assert_eq!(cmd.block, None);
}
}

View File

@@ -39,8 +39,8 @@ pub use consistent::ConsistentProvider;
pub(crate) mod rocksdb;
pub use rocksdb::{
RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBStats, RocksDBTableStats,
RocksTx,
RocksDBBatch, RocksDBBuilder, RocksDBIter, RocksDBProvider, RocksDBRawIter, RocksDBStats,
RocksDBTableStats, RocksTx,
};
/// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy

View File

@@ -6,6 +6,6 @@ mod provider;
pub(crate) use provider::{PendingRocksDBBatches, RocksDBWriteCtx};
pub use provider::{
RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksDBRawIter, RocksDBStats, RocksDBTableStats,
RocksTx,
RocksDBBatch, RocksDBBuilder, RocksDBIter, RocksDBProvider, RocksDBRawIter, RocksDBStats,
RocksDBTableStats, RocksTx,
};

View File

@@ -119,6 +119,13 @@ impl RocksDBProvider {
pub const fn flush(&self, _tables: &[&'static str]) -> ProviderResult<()> {
Ok(())
}
/// Creates an iterator over all entries in the specified table (stub implementation).
///
/// Returns an empty iterator since there is no `RocksDB` when the feature is disabled.
pub const fn iter<T: reth_db_api::table::Table>(&self) -> ProviderResult<RocksDBIter<T>> {
Ok(RocksDBIter(std::marker::PhantomData))
}
}
impl DatabaseMetrics for RocksDBProvider {
@@ -189,3 +196,15 @@ pub struct RocksTx;
/// A stub raw iterator for `RocksDB`.
#[derive(Debug)]
pub struct RocksDBRawIter;
/// A stub typed iterator for `RocksDB`.
#[derive(Debug)]
pub struct RocksDBIter<T: reth_db_api::table::Table>(std::marker::PhantomData<T>);
impl<T: reth_db_api::table::Table> Iterator for RocksDBIter<T> {
type Item = ProviderResult<(T::Key, T::Value)>;
fn next(&mut self) -> Option<Self::Item> {
None
}
}

View File

@@ -37,6 +37,7 @@
- [`reth db settings set account_history`](./reth/db/settings/set/account_history.mdx)
- [`reth db settings set storage_changesets`](./reth/db/settings/set/storage_changesets.mdx)
- [`reth db account-storage`](./reth/db/account-storage.mdx)
- [`reth db state`](./reth/db/state.mdx)
- [`reth download`](./reth/download.mdx)
- [`reth stage`](./reth/stage.mdx)
- [`reth stage run`](./reth/stage/run.mdx)
@@ -97,6 +98,7 @@
- [`op-reth db settings set account_history`](./op-reth/db/settings/set/account_history.mdx)
- [`op-reth db settings set storage_changesets`](./op-reth/db/settings/set/storage_changesets.mdx)
- [`op-reth db account-storage`](./op-reth/db/account-storage.mdx)
- [`op-reth db state`](./op-reth/db/state.mdx)
- [`op-reth stage`](./op-reth/stage.mdx)
- [`op-reth stage run`](./op-reth/stage/run.mdx)
- [`op-reth stage drop`](./op-reth/stage/drop.mdx)

View File

@@ -22,6 +22,7 @@ Commands:
path Returns the full database path
settings Manage storage settings
account-storage Gets storage size information for an account
state Gets account state and storage at a specific block
help Print this message or the help of the given subcommand(s)
Options:

View File

@@ -0,0 +1,184 @@
# op-reth db state
Gets account state and storage at a specific block
```bash
$ op-reth db state --help
```
```txt
Usage: op-reth db state [OPTIONS] <ADDRESS>
Arguments:
<ADDRESS>
The account address to get state for
Options:
-b, --block <BLOCK>
Block number to query state at (uses current state if not provided)
-l, --limit <LIMIT>
Maximum number of storage slots to display
[default: 100]
-f, --format <FORMAT>
Output format (table, json, csv)
[default: table]
[possible values: table, json, csv]
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev
[default: optimism]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
[default: 5]
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -22,6 +22,7 @@ Commands:
path Returns the full database path
settings Manage storage settings
account-storage Gets storage size information for an account
state Gets account state and storage at a specific block
help Print this message or the help of the given subcommand(s)
Options:

View File

@@ -0,0 +1,184 @@
# reth db state
Gets account state and storage at a specific block
```bash
$ reth db state --help
```
```txt
Usage: reth db state [OPTIONS] <ADDRESS>
Arguments:
<ADDRESS>
The account address to get state for
Options:
-b, --block <BLOCK>
Block number to query state at (uses current state if not provided)
-l, --limit <LIMIT>
Maximum number of storage slots to display
[default: 100]
-f, --format <FORMAT>
Output format (table, json, csv)
[default: table]
[possible values: table, json, csv]
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
mainnet, sepolia, holesky, hoodi, dev
[default: mainnet]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
[default: 5]
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -171,6 +171,10 @@ export const opRethCliSidebar: SidebarItem = {
{
text: "op-reth db account-storage",
link: "/cli/op-reth/db/account-storage"
},
{
text: "op-reth db state",
link: "/cli/op-reth/db/state"
}
]
},

View File

@@ -175,6 +175,10 @@ export const rethCliSidebar: SidebarItem = {
{
text: "reth db account-storage",
link: "/cli/reth/db/account-storage"
},
{
text: "reth db state",
link: "/cli/reth/db/state"
}
]
},