feat(cli): add db stage-checkpoints command (#22579)

Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Dan Cline
2026-02-25 14:58:59 -05:00
committed by GitHub
parent 6dcab51c97
commit ce2a194fb7
8 changed files with 843 additions and 0 deletions

View File

@@ -19,6 +19,7 @@ mod list;
mod prune_checkpoints;
mod repair_trie;
mod settings;
mod stage_checkpoints;
mod state;
mod static_file_header;
mod stats;
@@ -70,6 +71,8 @@ pub enum Subcommands {
Settings(settings::Command),
/// View or set prune checkpoints
PruneCheckpoints(prune_checkpoints::Command),
// View or set stage checkpoints
StageCheckpoints(stage_checkpoints::Command),
/// Gets storage size information for an account
AccountStorage(account_storage::Command),
/// Gets account state and storage at a specific block
@@ -213,6 +216,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
command.execute(&tool)?;
});
}
Subcommands::StageCheckpoints(command) => {
db_exec!(self.env, tool, N, command.access_rights(), {
command.execute(&tool)?;
});
}
Subcommands::AccountStorage(command) => {
db_exec!(self.env, tool, N, AccessRights::RO, {
command.execute(&tool)?;

View File

@@ -0,0 +1,297 @@
//! `reth db stage-checkpoints` command for viewing and setting stage checkpoint values.
use clap::{Args, Parser, Subcommand, ValueEnum};
use reth_db_common::DbTool;
use reth_provider::{
providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, StageCheckpointReader,
StageCheckpointWriter,
};
use reth_stages::StageId;
use crate::common::AccessRights;
/// `reth db stage-checkpoints` subcommand
#[derive(Debug, Parser)]
pub struct Command {
#[command(subcommand)]
command: Subcommands,
}
impl Command {
/// Returns database access rights required for the command.
pub fn access_rights(&self) -> AccessRights {
match &self.command {
Subcommands::Get { .. } => AccessRights::RO,
Subcommands::Set(_) => AccessRights::RW,
}
}
/// Execute the command
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
match self.command {
Subcommands::Get { stage } => Self::get(tool, stage),
Subcommands::Set(args) => Self::set(tool, args),
}
}
fn get<N: ProviderNodeTypes>(tool: &DbTool<N>, stage: Option<StageArg>) -> eyre::Result<()> {
let provider = tool.provider_factory.provider()?;
match stage {
Some(stage) => {
let stage_id = stage.into();
let checkpoint = provider.get_stage_checkpoint(stage_id)?;
println!("{stage_id}: {checkpoint:?}");
}
None => {
let mut checkpoints = provider.get_all_checkpoints()?;
checkpoints.sort_by(|a, b| a.0.cmp(&b.0));
for (stage, checkpoint) in checkpoints {
println!("{stage}: {checkpoint:?}");
}
}
}
Ok(())
}
fn set<N: ProviderNodeTypes>(tool: &DbTool<N>, args: SetArgs) -> eyre::Result<()> {
let stage_id: StageId = args.stage.into();
let provider_rw = tool.provider_factory.database_provider_rw()?;
let previous = provider_rw.get_stage_checkpoint(stage_id)?;
let mut checkpoint = previous.unwrap_or_default();
checkpoint.block_number = args.block_number;
if args.clear_stage_unit {
checkpoint.stage_checkpoint = None;
}
provider_rw.save_stage_checkpoint(stage_id, checkpoint)?;
provider_rw.commit()?;
println!("Updated checkpoint for {stage_id}: {checkpoint:?}");
Ok(())
}
}
#[derive(Debug, Subcommand)]
enum Subcommands {
/// Get stage checkpoint(s) from database.
Get {
/// Specific stage to query. If omitted, shows all stages.
#[arg(long, value_enum)]
stage: Option<StageArg>,
},
/// Set a stage checkpoint.
Set(SetArgs),
}
/// Arguments for the `set` subcommand.
#[derive(Debug, Args)]
pub struct SetArgs {
/// Stage to update.
#[arg(long, value_enum)]
stage: StageArg,
/// Block number to set as stage checkpoint.
#[arg(long)]
block_number: u64,
/// Clear stage-specific unit checkpoint payload.
#[arg(long)]
clear_stage_unit: bool,
}
/// CLI-friendly stage names.
#[derive(Debug, Clone, Copy, ValueEnum)]
#[clap(rename_all = "kebab-case")]
pub enum StageArg {
Era,
Headers,
Bodies,
SenderRecovery,
Execution,
PruneSenderRecovery,
MerkleUnwind,
AccountHashing,
StorageHashing,
MerkleExecute,
TransactionLookup,
IndexStorageHistory,
IndexAccountHistory,
Prune,
Finish,
}
impl From<StageArg> for StageId {
fn from(arg: StageArg) -> Self {
match arg {
StageArg::Era => Self::Era,
StageArg::Headers => Self::Headers,
StageArg::Bodies => Self::Bodies,
StageArg::SenderRecovery => Self::SenderRecovery,
StageArg::Execution => Self::Execution,
StageArg::PruneSenderRecovery => Self::PruneSenderRecovery,
StageArg::MerkleUnwind => Self::MerkleUnwind,
StageArg::AccountHashing => Self::AccountHashing,
StageArg::StorageHashing => Self::StorageHashing,
StageArg::MerkleExecute => Self::MerkleExecute,
StageArg::TransactionLookup => Self::TransactionLookup,
StageArg::IndexStorageHistory => Self::IndexStorageHistory,
StageArg::IndexAccountHistory => Self::IndexAccountHistory,
StageArg::Prune => Self::Prune,
StageArg::Finish => Self::Finish,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
use reth_provider::{
test_utils::create_test_provider_factory, DBProvider, DatabaseProviderFactory,
StageCheckpointReader, StageCheckpointWriter,
};
use reth_stages::StageCheckpoint;
#[test]
fn parse_set_args() {
let command = Command::parse_from([
"stage-checkpoints",
"set",
"--stage",
"headers",
"--block-number",
"123",
]);
assert!(matches!(
command.command,
Subcommands::Set(SetArgs {
stage: StageArg::Headers,
block_number: 123,
clear_stage_unit: false,
})
));
}
#[test]
fn set_overwrites_block_number() {
let provider_factory = create_test_provider_factory();
let tool = DbTool::new(provider_factory.clone()).expect("db tool");
{
let provider_rw = provider_factory.database_provider_rw().expect("rw provider");
provider_rw
.save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(10))
.expect("save checkpoint");
provider_rw.commit().expect("commit initial checkpoint");
}
let command = Command {
command: Subcommands::Set(SetArgs {
stage: StageArg::Headers,
block_number: 42,
clear_stage_unit: false,
}),
};
command.execute(&tool).expect("execute command");
let provider = provider_factory.provider().expect("provider");
let checkpoint = provider
.get_stage_checkpoint(StageId::Headers)
.expect("get stage checkpoint")
.expect("missing stage checkpoint");
assert_eq!(checkpoint.block_number, 42);
}
#[test]
fn set_preserves_stage_unit_checkpoint_unless_cleared() {
let provider_factory = create_test_provider_factory();
let tool = DbTool::new(provider_factory.clone()).expect("db tool");
{
let provider_rw = provider_factory.database_provider_rw().expect("rw provider");
let checkpoint = StageCheckpoint::new(10).with_block_range(&StageId::Execution, 5, 10);
provider_rw
.save_stage_checkpoint(StageId::Execution, checkpoint)
.expect("save checkpoint");
provider_rw.commit().expect("commit initial checkpoint");
}
Command {
command: Subcommands::Set(SetArgs {
stage: StageArg::Execution,
block_number: 11,
clear_stage_unit: false,
}),
}
.execute(&tool)
.expect("execute command");
let provider = provider_factory.provider().expect("provider");
let checkpoint = provider
.get_stage_checkpoint(StageId::Execution)
.expect("get stage checkpoint")
.expect("missing stage checkpoint");
assert!(checkpoint.stage_checkpoint.is_some());
Command {
command: Subcommands::Set(SetArgs {
stage: StageArg::Execution,
block_number: 12,
clear_stage_unit: true,
}),
}
.execute(&tool)
.expect("execute command");
let checkpoint = provider_factory
.provider()
.expect("provider")
.get_stage_checkpoint(StageId::Execution)
.expect("get stage checkpoint")
.expect("missing stage checkpoint");
assert!(checkpoint.stage_checkpoint.is_none());
}
#[test]
fn set_preserves_checkpoint_progress() {
let provider_factory = create_test_provider_factory();
let tool = DbTool::new(provider_factory.clone()).expect("db tool");
{
let provider_rw = provider_factory.database_provider_rw().expect("rw provider");
provider_rw
.save_stage_checkpoint(StageId::MerkleExecute, StageCheckpoint::new(10))
.expect("save checkpoint");
provider_rw
.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![1, 2, 3])
.expect("save progress");
provider_rw.commit().expect("commit initial checkpoint");
}
Command {
command: Subcommands::Set(SetArgs {
stage: StageArg::MerkleExecute,
block_number: 20,
clear_stage_unit: false,
}),
}
.execute(&tool)
.expect("execute command");
let provider = provider_factory.provider().expect("provider");
let progress = provider
.get_stage_checkpoint_progress(StageId::MerkleExecute)
.expect("get stage checkpoint progress");
assert_eq!(progress, Some(vec![1, 2, 3]));
}
}

View File

@@ -35,6 +35,9 @@
- [`reth db prune-checkpoints`](./reth/db/prune-checkpoints.mdx)
- [`reth db prune-checkpoints get`](./reth/db/prune-checkpoints/get.mdx)
- [`reth db prune-checkpoints set`](./reth/db/prune-checkpoints/set.mdx)
- [`reth db stage-checkpoints`](./reth/db/stage-checkpoints.mdx)
- [`reth db stage-checkpoints get`](./reth/db/stage-checkpoints/get.mdx)
- [`reth db stage-checkpoints set`](./reth/db/stage-checkpoints/set.mdx)
- [`reth db account-storage`](./reth/db/account-storage.mdx)
- [`reth db state`](./reth/db/state.mdx)
- [`reth download`](./reth/download.mdx)

View File

@@ -23,6 +23,7 @@ Commands:
path Returns the full database path
settings Manage storage settings
prune-checkpoints View or set prune checkpoints
stage-checkpoints `reth db stage-checkpoints` subcommand
account-storage Gets storage size information for an account
state Gets account state and storage at a specific block
help Print this message or the help of the given subcommand(s)

View File

@@ -0,0 +1,171 @@
# reth db stage-checkpoints
`reth db stage-checkpoints` subcommand
```bash
$ reth db stage-checkpoints --help
```
```txt
Usage: reth db stage-checkpoints [OPTIONS] <COMMAND>
Commands:
get Get stage checkpoint(s) from database
set Set a stage checkpoint
help Print this message or the help of the given subcommand(s)
Options:
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
mainnet, sepolia, holesky, hoodi, dev
[default: mainnet]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled.
Default: 5 for `node` command, 0 for non-node utility subcommands.
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -0,0 +1,171 @@
# reth db stage-checkpoints get
Get stage checkpoint(s) from database
```bash
$ reth db stage-checkpoints get --help
```
```txt
Usage: reth db stage-checkpoints get [OPTIONS]
Options:
--stage <STAGE>
Specific stage to query. If omitted, shows all stages
[possible values: era, headers, bodies, sender-recovery, execution, prune-sender-recovery, merkle-unwind, account-hashing, storage-hashing, merkle-execute, transaction-lookup, index-storage-history, index-account-history, prune, finish]
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
mainnet, sepolia, holesky, hoodi, dev
[default: mainnet]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled.
Default: 5 for `node` command, 0 for non-node utility subcommands.
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -0,0 +1,177 @@
# reth db stage-checkpoints set
Set a stage checkpoint
```bash
$ reth db stage-checkpoints set --help
```
```txt
Usage: reth db stage-checkpoints set [OPTIONS] --stage <STAGE> --block-number <BLOCK_NUMBER>
Options:
--stage <STAGE>
Stage to update
[possible values: era, headers, bodies, sender-recovery, execution, prune-sender-recovery, merkle-unwind, account-hashing, storage-hashing, merkle-execute, transaction-lookup, index-storage-history, index-account-history, prune, finish]
--block-number <BLOCK_NUMBER>
Block number to set as stage checkpoint
--clear-stage-unit
Clear stage-specific unit checkpoint payload
-h, --help
Print help (see a summary with '-h')
Datadir:
--chain <CHAIN_OR_PATH>
The chain this node is running.
Possible values are either a built-in chain or the path to a chain specification file.
Built-in chains:
mainnet, sepolia, holesky, hoodi, dev
[default: mainnet]
Logging:
--log.stdout.format <FORMAT>
The format to use for logs written to stdout
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.stdout.filter <FILTER>
The filter to use for logs written to stdout
[default: ]
--log.file.format <FORMAT>
The format to use for logs written to the log file
Possible values:
- json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- terminal: Represents terminal-friendly formatting for logs
[default: terminal]
--log.file.filter <FILTER>
The filter to use for logs written to the log file
[default: debug]
--log.file.directory <PATH>
The path to put log files in
[default: <CACHE_DIR>/logs]
--log.file.name <NAME>
The prefix name of the log files
[default: reth.log]
--log.file.max-size <SIZE>
The maximum size (in MB) of one log file
[default: 200]
--log.file.max-files <COUNT>
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled.
Default: 5 for `node` command, 0 for non-node utility subcommands.
--log.journald
Write logs to journald
--log.journald.filter <FILTER>
The filter to use for logs written to journald
[default: error]
--color <COLOR>
Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
Possible values:
- always: Colors on
- auto: Auto-detect
- never: Colors off
[default: always]
--logs-otlp[=<URL>]
Enable `Opentelemetry` logs export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/logs` - gRPC: `http://localhost:4317`
Example: --logs-otlp=http://collector:4318/v1/logs
[env: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT=]
--logs-otlp.filter <FILTER>
Set a filter directive for the OTLP logs exporter. This controls the verbosity of logs sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --logs-otlp.filter=info,reth=debug
Defaults to INFO if not specified.
[default: info]
Display:
-v, --verbosity...
Set the minimum log level.
-v Errors
-vv Warnings
-vvv Info
-vvvv Debug
-vvvvv Traces (warning: very verbose!)
-q, --quiet
Silence all log output
Tracing:
--tracing-otlp[=<URL>]
Enable `Opentelemetry` tracing export to an OTLP endpoint.
If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317`
Example: --tracing-otlp=http://collector:4318/v1/traces
[env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=]
--tracing-otlp-protocol <PROTOCOL>
OTLP transport protocol to use for exporting traces and logs.
- `http`: expects endpoint path to end with `/v1/traces` or `/v1/logs` - `grpc`: expects endpoint without a path
Defaults to HTTP if not specified.
Possible values:
- http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path
- grpc: gRPC transport, port 4317
[env: OTEL_EXPORTER_OTLP_PROTOCOL=]
[default: http]
--tracing-otlp.filter <FILTER>
Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable.
Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off
Defaults to TRACE if not specified.
[default: debug]
--tracing-otlp.sample-ratio <RATIO>
Trace sampling ratio to control the percentage of traces to export.
Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling
Example: --tracing-otlp.sample-ratio=0.0.
[env: OTEL_TRACES_SAMPLER_ARG=]
```

View File

@@ -171,6 +171,21 @@ export const rethCliSidebar: SidebarItem = {
}
]
},
{
text: "reth db stage-checkpoints",
link: "/cli/reth/db/stage-checkpoints",
collapsed: true,
items: [
{
text: "reth db stage-checkpoints get",
link: "/cli/reth/db/stage-checkpoints/get"
},
{
text: "reth db stage-checkpoints set",
link: "/cli/reth/db/stage-checkpoints/set"
}
]
},
{
text: "reth db account-storage",
link: "/cli/reth/db/account-storage"