mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-02-10 23:15:34 -05:00
chore: finally move node-core to node folder (#9191)
This commit is contained in:
62
crates/node/core/src/args/benchmark_args.rs
Normal file
62
crates/node/core/src/args/benchmark_args.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
//! clap [Args](clap::Args) for benchmark configuration
|
||||
|
||||
use clap::Args;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for benchmark configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone)]
|
||||
#[command(next_help_heading = "Benchmark")]
|
||||
pub struct BenchmarkArgs {
|
||||
/// Run the benchmark from a specific block.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub from: Option<u64>,
|
||||
|
||||
/// Run the benchmark to a specific block.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub to: Option<u64>,
|
||||
|
||||
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
|
||||
///
|
||||
/// This will perform JWT authentication for all requests to the given engine RPC url.
|
||||
///
|
||||
/// If no path is provided, a secret will be generated and stored in the datadir under
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
|
||||
#[arg(long = "jwtsecret", value_name = "PATH", global = true, required = false)]
|
||||
pub auth_jwtsecret: Option<PathBuf>,
|
||||
|
||||
/// The RPC url to use for sending engine requests.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "ENGINE_RPC_URL",
|
||||
verbatim_doc_comment,
|
||||
default_value = "http://localhost:8551"
|
||||
)]
|
||||
pub engine_rpc_url: String,
|
||||
|
||||
/// The path to the output directory for granular benchmark results.
|
||||
#[arg(long, short, value_name = "BENCHMARK_OUTPUT", verbatim_doc_comment)]
|
||||
pub output: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_benchmark_args() {
|
||||
let default_args = BenchmarkArgs {
|
||||
engine_rpc_url: "http://localhost:8551".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
let args = CommandParser::<BenchmarkArgs>::parse_from(["reth-bench"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
140
crates/node/core/src/args/database.rs
Normal file
140
crates/node/core/src/args/database.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
//! clap [Args](clap::Args) for database configuration
|
||||
|
||||
use crate::version::default_client_version;
|
||||
use clap::{
|
||||
builder::{PossibleValue, TypedValueParser},
|
||||
error::ErrorKind,
|
||||
Arg, Args, Command, Error,
|
||||
};
|
||||
use reth_storage_errors::db::LogLevel;
|
||||
|
||||
/// Parameters for database configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)]
|
||||
#[command(next_help_heading = "Database")]
|
||||
pub struct DatabaseArgs {
|
||||
/// Database logging level. Levels higher than "notice" require a debug build.
|
||||
#[arg(long = "db.log-level", value_parser = LogLevelValueParser::default())]
|
||||
pub log_level: Option<LogLevel>,
|
||||
/// Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an
|
||||
/// NFS volume.
|
||||
#[arg(long = "db.exclusive")]
|
||||
pub exclusive: Option<bool>,
|
||||
}
|
||||
|
||||
impl DatabaseArgs {
|
||||
/// Returns default database arguments with configured log level and client version.
|
||||
pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments {
|
||||
reth_db::mdbx::DatabaseArguments::new(default_client_version())
|
||||
.with_log_level(self.log_level)
|
||||
.with_exclusive(self.exclusive)
|
||||
}
|
||||
}
|
||||
|
||||
/// clap value parser for [`LogLevel`].
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct LogLevelValueParser;
|
||||
|
||||
impl TypedValueParser for LogLevelValueParser {
|
||||
type Value = LogLevel;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
arg: Option<&Arg>,
|
||||
value: &std::ffi::OsStr,
|
||||
) -> Result<Self::Value, Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| Error::raw(ErrorKind::InvalidUtf8, "Invalid UTF-8"))?;
|
||||
|
||||
val.parse::<LogLevel>().map_err(|err| {
|
||||
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
|
||||
let possible_values = LogLevel::value_variants()
|
||||
.iter()
|
||||
.map(|v| format!("- {:?}: {}", v, v.help_message()))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
let msg = format!(
|
||||
"Invalid value '{val}' for {arg}: {err}.\n Possible values:\n{possible_values}"
|
||||
);
|
||||
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
|
||||
})
|
||||
}
|
||||
|
||||
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
|
||||
let values = LogLevel::value_variants()
|
||||
.iter()
|
||||
.map(|v| PossibleValue::new(v.variant_name()).help(v.help_message()));
|
||||
Some(Box::new(values))
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_database_args() {
|
||||
let default_args = DatabaseArgs::default();
|
||||
let args = CommandParser::<DatabaseArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_possible_values() {
|
||||
// Initialize the LogLevelValueParser
|
||||
let parser = LogLevelValueParser;
|
||||
|
||||
// Call the possible_values method
|
||||
let possible_values: Vec<PossibleValue> = parser.possible_values().unwrap().collect();
|
||||
|
||||
// Expected possible values
|
||||
let expected_values = vec![
|
||||
PossibleValue::new("fatal")
|
||||
.help("Enables logging for critical conditions, i.e. assertion failures"),
|
||||
PossibleValue::new("error").help("Enables logging for error conditions"),
|
||||
PossibleValue::new("warn").help("Enables logging for warning conditions"),
|
||||
PossibleValue::new("notice")
|
||||
.help("Enables logging for normal but significant condition"),
|
||||
PossibleValue::new("verbose").help("Enables logging for verbose informational"),
|
||||
PossibleValue::new("debug").help("Enables logging for debug-level messages"),
|
||||
PossibleValue::new("trace").help("Enables logging for trace debug-level messages"),
|
||||
PossibleValue::new("extra").help("Enables logging for extra debug-level messages"),
|
||||
];
|
||||
|
||||
// Check that the possible values match the expected values
|
||||
assert_eq!(possible_values.len(), expected_values.len());
|
||||
for (actual, expected) in possible_values.iter().zip(expected_values.iter()) {
|
||||
assert_eq!(actual.get_name(), expected.get_name());
|
||||
assert_eq!(actual.get_help(), expected.get_help());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_parser_with_valid_log_level() {
|
||||
let cmd =
|
||||
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.log-level", "Debug"])
|
||||
.unwrap();
|
||||
assert_eq!(cmd.args.log_level, Some(LogLevel::Debug));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_parser_with_invalid_log_level() {
|
||||
let result =
|
||||
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.log-level", "invalid"]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_parser_without_log_level() {
|
||||
let cmd = CommandParser::<DatabaseArgs>::try_parse_from(["reth"]).unwrap();
|
||||
assert_eq!(cmd.args.log_level, None);
|
||||
}
|
||||
}
|
||||
53
crates/node/core/src/args/datadir_args.rs
Normal file
53
crates/node/core/src/args/datadir_args.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
//! clap [Args](clap::Args) for datadir config
|
||||
|
||||
use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath};
|
||||
use clap::Args;
|
||||
use reth_chainspec::Chain;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for datadir configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone)]
|
||||
#[command(next_help_heading = "Datadir")]
|
||||
pub struct DatadirArgs {
|
||||
/// The path to the data dir for all reth files and subdirectories.
|
||||
///
|
||||
/// Defaults to the OS-specific data directory:
|
||||
///
|
||||
/// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
|
||||
/// - Windows: `{FOLDERID_RoamingAppData}/reth/`
|
||||
/// - macOS: `$HOME/Library/Application Support/reth/`
|
||||
#[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)]
|
||||
pub datadir: MaybePlatformPath<DataDirPath>,
|
||||
|
||||
/// The absolute path to store static files in.
|
||||
#[arg(long = "datadir.static_files", verbatim_doc_comment, value_name = "PATH")]
|
||||
pub static_files_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl DatadirArgs {
|
||||
/// Resolves the final datadir path.
|
||||
pub fn resolve_datadir(self, chain: Chain) -> ChainPath<DataDirPath> {
|
||||
let datadir = self.datadir.clone();
|
||||
datadir.unwrap_or_chain_default(chain, self)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_datadir_args() {
|
||||
let default_args = DatadirArgs::default();
|
||||
let args = CommandParser::<DatadirArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
78
crates/node/core/src/args/debug.rs
Normal file
78
crates/node/core/src/args/debug.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
//! clap [Args](clap::Args) for debugging purposes
|
||||
|
||||
use clap::Args;
|
||||
use reth_primitives::B256;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for debugging purposes
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq, Default)]
|
||||
#[command(next_help_heading = "Debug")]
|
||||
pub struct DebugArgs {
|
||||
/// Flag indicating whether the node should be terminated after the pipeline sync.
|
||||
#[arg(long = "debug.terminate", help_heading = "Debug")]
|
||||
pub terminate: bool,
|
||||
|
||||
/// Set the chain tip manually for testing purposes.
|
||||
///
|
||||
/// NOTE: This is a temporary flag
|
||||
#[arg(long = "debug.tip", help_heading = "Debug")]
|
||||
pub tip: Option<B256>,
|
||||
|
||||
/// Runs the sync only up to the specified block.
|
||||
#[arg(long = "debug.max-block", help_heading = "Debug")]
|
||||
pub max_block: Option<u64>,
|
||||
|
||||
/// Runs a fake consensus client that advances the chain using recent block hashes
|
||||
/// on Etherscan. If specified, requires an `ETHERSCAN_API_KEY` environment variable.
|
||||
#[arg(
|
||||
long = "debug.etherscan",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "tip",
|
||||
conflicts_with = "rpc_consensus_ws",
|
||||
value_name = "ETHERSCAN_API_URL"
|
||||
)]
|
||||
pub etherscan: Option<Option<String>>,
|
||||
|
||||
/// Runs a fake consensus client using blocks fetched from an RPC `WebSocket` endpoint.
|
||||
#[arg(
|
||||
long = "debug.rpc-consensus-ws",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "tip",
|
||||
conflicts_with = "etherscan"
|
||||
)]
|
||||
pub rpc_consensus_ws: Option<String>,
|
||||
|
||||
/// If provided, the engine will skip `n` consecutive FCUs.
|
||||
#[arg(long = "debug.skip-fcu", help_heading = "Debug")]
|
||||
pub skip_fcu: Option<usize>,
|
||||
|
||||
/// If provided, the engine will skip `n` consecutive new payloads.
|
||||
#[arg(long = "debug.skip-new-payload", help_heading = "Debug")]
|
||||
pub skip_new_payload: Option<usize>,
|
||||
|
||||
/// The path to store engine API messages at.
|
||||
/// If specified, all of the intercepted engine API messages
|
||||
/// will be written to specified location.
|
||||
#[arg(long = "debug.engine-api-store", help_heading = "Debug", value_name = "PATH")]
|
||||
pub engine_api_store: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_database_args() {
|
||||
let default_args = DebugArgs::default();
|
||||
let args = CommandParser::<DebugArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
107
crates/node/core/src/args/dev.rs
Normal file
107
crates/node/core/src/args/dev.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
//! clap [Args](clap::Args) for Dev testnet configuration
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Args;
|
||||
use humantime::parse_duration;
|
||||
|
||||
/// Parameters for Dev testnet configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)]
|
||||
#[command(next_help_heading = "Dev testnet")]
|
||||
pub struct DevArgs {
|
||||
/// Start the node in dev mode
|
||||
///
|
||||
/// This mode uses a local proof-of-authority consensus engine with either fixed block times
|
||||
/// or automatically mined blocks.
|
||||
/// Disables network discovery and enables local http server.
|
||||
/// Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test
|
||||
/// test junk" with 10 000 ETH each.
|
||||
#[arg(long = "dev", alias = "auto-mine", help_heading = "Dev testnet", verbatim_doc_comment)]
|
||||
pub dev: bool,
|
||||
|
||||
/// How many transactions to mine per block.
|
||||
#[arg(
|
||||
long = "dev.block-max-transactions",
|
||||
help_heading = "Dev testnet",
|
||||
conflicts_with = "block_time"
|
||||
)]
|
||||
pub block_max_transactions: Option<usize>,
|
||||
|
||||
/// Interval between blocks.
|
||||
///
|
||||
/// Parses strings using [`humantime::parse_duration`]
|
||||
/// --dev.block-time 12s
|
||||
#[arg(
|
||||
long = "dev.block-time",
|
||||
help_heading = "Dev testnet",
|
||||
conflicts_with = "block_max_transactions",
|
||||
value_parser = parse_duration,
|
||||
verbatim_doc_comment
|
||||
)]
|
||||
pub block_time: Option<Duration>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dev_args() {
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth", "--dev"]).args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth", "--auto-mine"]).args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from([
|
||||
"reth",
|
||||
"--dev",
|
||||
"--dev.block-max-transactions",
|
||||
"2",
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None });
|
||||
|
||||
let args =
|
||||
CommandParser::<DevArgs>::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args;
|
||||
assert_eq!(
|
||||
args,
|
||||
DevArgs {
|
||||
dev: true,
|
||||
block_max_transactions: None,
|
||||
block_time: Some(std::time::Duration::from_secs(1))
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dev_args_conflicts() {
|
||||
let args = CommandParser::<DevArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--dev",
|
||||
"--dev.block-max-transactions",
|
||||
"2",
|
||||
"--dev.block-time",
|
||||
"1s",
|
||||
]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dev_args_default_sanity_check() {
|
||||
let default_args = DevArgs::default();
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
86
crates/node/core/src/args/gas_price_oracle.rs
Normal file
86
crates/node/core/src/args/gas_price_oracle.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
use crate::primitives::U256;
|
||||
use clap::Args;
|
||||
use reth_rpc_eth_types::GasPriceOracleConfig;
|
||||
use reth_rpc_server_types::constants::gas_oracle::{
|
||||
DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE,
|
||||
DEFAULT_MAX_GAS_PRICE,
|
||||
};
|
||||
|
||||
/// Parameters to configure Gas Price Oracle
|
||||
#[derive(Debug, Clone, Copy, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "Gas Price Oracle")]
|
||||
pub struct GasPriceOracleArgs {
|
||||
/// Number of recent blocks to check for gas price
|
||||
#[arg(long = "gpo.blocks", default_value_t = DEFAULT_GAS_PRICE_BLOCKS)]
|
||||
pub blocks: u32,
|
||||
|
||||
/// Gas Price below which gpo will ignore transactions
|
||||
#[arg(long = "gpo.ignoreprice", default_value_t = DEFAULT_IGNORE_GAS_PRICE.to())]
|
||||
pub ignore_price: u64,
|
||||
|
||||
/// Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo
|
||||
#[arg(long = "gpo.maxprice", default_value_t = DEFAULT_MAX_GAS_PRICE.to())]
|
||||
pub max_price: u64,
|
||||
|
||||
/// The percentile of gas prices to use for the estimate
|
||||
#[arg(long = "gpo.percentile", default_value_t = DEFAULT_GAS_PRICE_PERCENTILE)]
|
||||
pub percentile: u32,
|
||||
}
|
||||
|
||||
impl GasPriceOracleArgs {
|
||||
/// Returns a [`GasPriceOracleConfig`] from the arguments.
|
||||
pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig {
|
||||
let Self { blocks, ignore_price, max_price, percentile } = self;
|
||||
GasPriceOracleConfig {
|
||||
max_price: Some(U256::from(*max_price)),
|
||||
ignore_price: Some(U256::from(*ignore_price)),
|
||||
percentile: *percentile,
|
||||
blocks: *blocks,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GasPriceOracleArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
blocks: DEFAULT_GAS_PRICE_BLOCKS,
|
||||
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
|
||||
max_price: DEFAULT_MAX_GAS_PRICE.to(),
|
||||
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gpo_args() {
|
||||
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(
|
||||
args,
|
||||
GasPriceOracleArgs {
|
||||
blocks: DEFAULT_GAS_PRICE_BLOCKS,
|
||||
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
|
||||
max_price: DEFAULT_MAX_GAS_PRICE.to(),
|
||||
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gpo_args_default_sanity_test() {
|
||||
let default_args = GasPriceOracleArgs::default();
|
||||
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
176
crates/node/core/src/args/log.rs
Normal file
176
crates/node/core/src/args/log.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
//! clap [Args](clap::Args) for logging configuration.
|
||||
|
||||
use crate::dirs::{LogsDir, PlatformPath};
|
||||
use clap::{ArgAction, Args, ValueEnum};
|
||||
use reth_tracing::{
|
||||
tracing_subscriber::filter::Directive, FileInfo, FileWorkerGuard, LayerInfo, LogFormat,
|
||||
RethTracer, Tracer,
|
||||
};
|
||||
use std::{fmt, fmt::Display};
|
||||
use tracing::{level_filters::LevelFilter, Level};
|
||||
/// Constant to convert megabytes to bytes
|
||||
const MB_TO_BYTES: u64 = 1024 * 1024;
|
||||
|
||||
/// The log configuration.
|
||||
#[derive(Debug, Args)]
|
||||
#[command(next_help_heading = "Logging")]
|
||||
pub struct LogArgs {
|
||||
/// The format to use for logs written to stdout.
|
||||
#[arg(long = "log.stdout.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
|
||||
pub log_stdout_format: LogFormat,
|
||||
|
||||
/// The filter to use for logs written to stdout.
|
||||
#[arg(long = "log.stdout.filter", value_name = "FILTER", global = true, default_value = "")]
|
||||
pub log_stdout_filter: String,
|
||||
|
||||
/// The format to use for logs written to the log file.
|
||||
#[arg(long = "log.file.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
|
||||
pub log_file_format: LogFormat,
|
||||
|
||||
/// The filter to use for logs written to the log file.
|
||||
#[arg(long = "log.file.filter", value_name = "FILTER", global = true, default_value = "debug")]
|
||||
pub log_file_filter: String,
|
||||
|
||||
/// The path to put log files in.
|
||||
#[arg(long = "log.file.directory", value_name = "PATH", global = true, default_value_t)]
|
||||
pub log_file_directory: PlatformPath<LogsDir>,
|
||||
|
||||
/// The maximum size (in MB) of one log file.
|
||||
#[arg(long = "log.file.max-size", value_name = "SIZE", global = true, default_value_t = 200)]
|
||||
pub log_file_max_size: u64,
|
||||
|
||||
/// The maximum amount of log files that will be stored. If set to 0, background file logging
|
||||
/// is disabled.
|
||||
#[arg(long = "log.file.max-files", value_name = "COUNT", global = true, default_value_t = 5)]
|
||||
pub log_file_max_files: usize,
|
||||
|
||||
/// Write logs to journald.
|
||||
#[arg(long = "log.journald", global = true)]
|
||||
pub journald: bool,
|
||||
|
||||
/// The filter to use for logs written to journald.
|
||||
#[arg(
|
||||
long = "log.journald.filter",
|
||||
value_name = "FILTER",
|
||||
global = true,
|
||||
default_value = "error"
|
||||
)]
|
||||
pub journald_filter: String,
|
||||
|
||||
/// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other
|
||||
/// text formatting.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "COLOR",
|
||||
global = true,
|
||||
default_value_t = ColorMode::Always
|
||||
)]
|
||||
pub color: ColorMode,
|
||||
/// The verbosity settings for the tracer.
|
||||
#[command(flatten)]
|
||||
pub verbosity: Verbosity,
|
||||
}
|
||||
|
||||
impl LogArgs {
|
||||
/// Creates a [`LayerInfo`] instance.
|
||||
fn layer(&self, format: LogFormat, filter: String, use_color: bool) -> LayerInfo {
|
||||
LayerInfo::new(
|
||||
format,
|
||||
self.verbosity.directive().to_string(),
|
||||
filter,
|
||||
if use_color { Some(self.color.to_string()) } else { None },
|
||||
)
|
||||
}
|
||||
|
||||
/// File info from the current log options.
|
||||
fn file_info(&self) -> FileInfo {
|
||||
FileInfo::new(
|
||||
self.log_file_directory.clone().into(),
|
||||
self.log_file_max_size * MB_TO_BYTES,
|
||||
self.log_file_max_files,
|
||||
)
|
||||
}
|
||||
|
||||
/// Initializes tracing with the configured options from cli args.
|
||||
///
|
||||
/// Returns the file worker guard, and the file name, if a file worker was configured.
|
||||
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
|
||||
let mut tracer = RethTracer::new();
|
||||
|
||||
let stdout = self.layer(self.log_stdout_format, self.log_stdout_filter.clone(), true);
|
||||
tracer = tracer.with_stdout(stdout);
|
||||
|
||||
if self.journald {
|
||||
tracer = tracer.with_journald(self.journald_filter.clone());
|
||||
}
|
||||
|
||||
if self.log_file_max_files > 0 {
|
||||
let info = self.file_info();
|
||||
let file = self.layer(self.log_file_format, self.log_file_filter.clone(), false);
|
||||
tracer = tracer.with_file(file, info);
|
||||
}
|
||||
|
||||
let guard = tracer.init()?;
|
||||
Ok(guard)
|
||||
}
|
||||
}
|
||||
|
||||
/// The color mode for the cli.
|
||||
#[derive(Debug, Copy, Clone, ValueEnum, Eq, PartialEq)]
|
||||
pub enum ColorMode {
|
||||
/// Colors on
|
||||
Always,
|
||||
/// Colors on
|
||||
Auto,
|
||||
/// Colors off
|
||||
Never,
|
||||
}
|
||||
|
||||
impl Display for ColorMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Always => write!(f, "always"),
|
||||
Self::Auto => write!(f, "auto"),
|
||||
Self::Never => write!(f, "never"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The verbosity settings for the cli.
|
||||
#[derive(Debug, Copy, Clone, Args)]
|
||||
#[command(next_help_heading = "Display")]
|
||||
pub struct Verbosity {
|
||||
/// Set the minimum log level.
|
||||
///
|
||||
/// -v Errors
|
||||
/// -vv Warnings
|
||||
/// -vvv Info
|
||||
/// -vvvv Debug
|
||||
/// -vvvvv Traces (warning: very verbose!)
|
||||
#[arg(short, long, action = ArgAction::Count, global = true, default_value_t = 3, verbatim_doc_comment, help_heading = "Display")]
|
||||
verbosity: u8,
|
||||
|
||||
/// Silence all log output.
|
||||
#[arg(long, alias = "silent", short = 'q', global = true, help_heading = "Display")]
|
||||
quiet: bool,
|
||||
}
|
||||
|
||||
impl Verbosity {
|
||||
/// Get the corresponding [Directive] for the given verbosity, or none if the verbosity
|
||||
/// corresponds to silent.
|
||||
pub fn directive(&self) -> Directive {
|
||||
if self.quiet {
|
||||
LevelFilter::OFF.into()
|
||||
} else {
|
||||
let level = match self.verbosity - 1 {
|
||||
0 => Level::ERROR,
|
||||
1 => Level::WARN,
|
||||
2 => Level::INFO,
|
||||
3 => Level::DEBUG,
|
||||
_ => Level::TRACE,
|
||||
};
|
||||
|
||||
level.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
64
crates/node/core/src/args/mod.rs
Normal file
64
crates/node/core/src/args/mod.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
//! Parameters for configuring the rpc more granularity via CLI
|
||||
|
||||
/// NetworkArg struct for configuring the network
|
||||
mod network;
|
||||
pub use network::{DiscoveryArgs, NetworkArgs};
|
||||
|
||||
/// RpcServerArg struct for configuring the RPC
|
||||
mod rpc_server;
|
||||
pub use rpc_server::RpcServerArgs;
|
||||
|
||||
/// `RpcStateCacheArgs` struct for configuring RPC state cache
|
||||
mod rpc_state_cache;
|
||||
pub use rpc_state_cache::RpcStateCacheArgs;
|
||||
|
||||
/// DebugArgs struct for debugging purposes
|
||||
mod debug;
|
||||
pub use debug::DebugArgs;
|
||||
|
||||
/// DatabaseArgs struct for configuring the database
|
||||
mod database;
|
||||
pub use database::DatabaseArgs;
|
||||
|
||||
/// LogArgs struct for configuring the logger
|
||||
mod log;
|
||||
pub use log::{ColorMode, LogArgs};
|
||||
|
||||
mod secret_key;
|
||||
pub use secret_key::{get_secret_key, SecretKeyError};
|
||||
|
||||
/// `PayloadBuilderArgs` struct for configuring the payload builder
|
||||
mod payload_builder;
|
||||
pub use payload_builder::PayloadBuilderArgs;
|
||||
|
||||
/// Stage related arguments
|
||||
mod stage;
|
||||
pub use stage::StageEnum;
|
||||
|
||||
/// Gas price oracle related arguments
|
||||
mod gas_price_oracle;
|
||||
pub use gas_price_oracle::GasPriceOracleArgs;
|
||||
|
||||
/// TxPoolArgs for configuring the transaction pool
|
||||
mod txpool;
|
||||
pub use txpool::TxPoolArgs;
|
||||
|
||||
/// DevArgs for configuring the dev testnet
|
||||
mod dev;
|
||||
pub use dev::DevArgs;
|
||||
|
||||
/// PruneArgs for configuring the pruning and full node
|
||||
mod pruning;
|
||||
pub use pruning::PruningArgs;
|
||||
|
||||
/// DatadirArgs for configuring data storage paths
|
||||
mod datadir_args;
|
||||
pub use datadir_args::DatadirArgs;
|
||||
|
||||
/// BenchmarkArgs struct for configuring the benchmark to run
|
||||
mod benchmark_args;
|
||||
pub use benchmark_args::BenchmarkArgs;
|
||||
|
||||
pub mod utils;
|
||||
|
||||
pub mod types;
|
||||
499
crates/node/core/src/args/network.rs
Normal file
499
crates/node/core/src/args/network.rs
Normal file
@@ -0,0 +1,499 @@
|
||||
//! clap [Args](clap::Args) for network related arguments.
|
||||
|
||||
use crate::version::P2P_CLIENT_VERSION;
|
||||
use clap::Args;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_config::Config;
|
||||
use reth_discv4::{NodeRecord, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT};
|
||||
use reth_discv5::{
|
||||
discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT,
|
||||
DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL,
|
||||
};
|
||||
use reth_net_nat::NatResolver;
|
||||
use reth_network::{
|
||||
transactions::{
|
||||
TransactionFetcherConfig, TransactionsManagerConfig,
|
||||
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
|
||||
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
|
||||
},
|
||||
HelloMessageWithProtocols, NetworkConfigBuilder, SessionsConfig,
|
||||
};
|
||||
use reth_network_peers::{mainnet_nodes, TrustedPeer};
|
||||
use secp256k1::SecretKey;
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
ops::Not,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
/// Parameters for configuring the network more granularity via CLI
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "Networking")]
|
||||
pub struct NetworkArgs {
|
||||
/// Arguments to setup discovery service.
|
||||
#[command(flatten)]
|
||||
pub discovery: DiscoveryArgs,
|
||||
|
||||
#[allow(clippy::doc_markdown)]
|
||||
/// Comma separated enode URLs of trusted peers for P2P connections.
|
||||
///
|
||||
/// --trusted-peers enode://abcd@192.168.0.1:30303
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub trusted_peers: Vec<TrustedPeer>,
|
||||
|
||||
/// Connect to or accept from trusted peers only
|
||||
#[arg(long)]
|
||||
pub trusted_only: bool,
|
||||
|
||||
/// Comma separated enode URLs for P2P discovery bootstrap.
|
||||
///
|
||||
/// Will fall back to a network-specific default if not specified.
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub bootnodes: Option<Vec<TrustedPeer>>,
|
||||
|
||||
/// Amount of DNS resolution requests retries to perform when peering.
|
||||
#[arg(long, default_value_t = 0)]
|
||||
pub dns_retries: usize,
|
||||
|
||||
/// The path to the known peers file. Connected peers are dumped to this file on nodes
|
||||
/// shutdown, and read on startup. Cannot be used with `--no-persist-peers`.
|
||||
#[arg(long, value_name = "FILE", verbatim_doc_comment, conflicts_with = "no_persist_peers")]
|
||||
pub peers_file: Option<PathBuf>,
|
||||
|
||||
/// Custom node identity
|
||||
#[arg(long, value_name = "IDENTITY", default_value = P2P_CLIENT_VERSION)]
|
||||
pub identity: String,
|
||||
|
||||
/// Secret key to use for this node.
|
||||
///
|
||||
/// This will also deterministically set the peer ID. If not specified, it will be set in the
|
||||
/// data dir for the chain being used.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub p2p_secret_key: Option<PathBuf>,
|
||||
|
||||
/// Do not persist peers.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub no_persist_peers: bool,
|
||||
|
||||
/// NAT resolution method (any|none|upnp|publicip|extip:\<IP\>)
|
||||
#[arg(long, default_value = "any")]
|
||||
pub nat: NatResolver,
|
||||
|
||||
/// Network listening address
|
||||
#[arg(long = "addr", value_name = "ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
|
||||
pub addr: IpAddr,
|
||||
|
||||
/// Network listening port
|
||||
#[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
|
||||
pub port: u16,
|
||||
|
||||
/// Maximum number of outbound requests. default: 100
|
||||
#[arg(long)]
|
||||
pub max_outbound_peers: Option<usize>,
|
||||
|
||||
/// Maximum number of inbound requests. default: 30
|
||||
#[arg(long)]
|
||||
pub max_inbound_peers: Option<usize>,
|
||||
|
||||
/// Experimental, for usage in research. Sets the max accumulated byte size of transactions
|
||||
/// to pack in one response.
|
||||
/// Spec'd at 2MiB.
|
||||
#[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, verbatim_doc_comment)]
|
||||
pub soft_limit_byte_size_pooled_transactions_response: usize,
|
||||
|
||||
/// Experimental, for usage in research. Sets the max accumulated byte size of transactions to
|
||||
/// request in one request.
|
||||
///
|
||||
/// Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a
|
||||
/// transaction announcement (see `RLPx` specs). This allows a node to request a specific size
|
||||
/// response.
|
||||
///
|
||||
/// By default, nodes request only 128 KiB worth of transactions, but should a peer request
|
||||
/// more, up to 2 MiB, a node will answer with more than 128 KiB.
|
||||
///
|
||||
/// Default is 128 KiB.
|
||||
#[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, verbatim_doc_comment)]
|
||||
pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize,
|
||||
}
|
||||
|
||||
impl NetworkArgs {
|
||||
/// Build a [`NetworkConfigBuilder`] from a [`Config`] and a [`ChainSpec`], in addition to the
|
||||
/// values in this option struct.
|
||||
///
|
||||
/// The `default_peers_file` will be used as the default location to store the persistent peers
|
||||
/// file if `no_persist_peers` is false, and there is no provided `peers_file`.
|
||||
pub fn network_config(
|
||||
&self,
|
||||
config: &Config,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
secret_key: SecretKey,
|
||||
default_peers_file: PathBuf,
|
||||
) -> NetworkConfigBuilder {
|
||||
let chain_bootnodes = chain_spec.bootnodes().unwrap_or_else(mainnet_nodes);
|
||||
let peers_file = self.peers_file.clone().unwrap_or(default_peers_file);
|
||||
|
||||
// Configure peer connections
|
||||
let peers_config = config
|
||||
.peers
|
||||
.clone()
|
||||
.with_max_inbound_opt(self.max_inbound_peers)
|
||||
.with_max_outbound_opt(self.max_outbound_peers);
|
||||
|
||||
// Configure transactions manager
|
||||
let transactions_manager_config = TransactionsManagerConfig {
|
||||
transaction_fetcher_config: TransactionFetcherConfig::new(
|
||||
self.soft_limit_byte_size_pooled_transactions_response,
|
||||
self.soft_limit_byte_size_pooled_transactions_response_on_pack_request,
|
||||
),
|
||||
};
|
||||
|
||||
// Configure basic network stack
|
||||
NetworkConfigBuilder::new(secret_key)
|
||||
.peer_config(config.peers_config_with_basic_nodes_from_file(
|
||||
self.persistent_peers_file(peers_file).as_deref(),
|
||||
))
|
||||
.external_ip_resolver(self.nat)
|
||||
.sessions_config(
|
||||
SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()),
|
||||
)
|
||||
.peer_config(peers_config)
|
||||
.boot_nodes(chain_bootnodes.clone())
|
||||
.chain_spec(chain_spec)
|
||||
.transactions_manager_config(transactions_manager_config)
|
||||
// Configure node identity
|
||||
.apply(|builder| {
|
||||
let peer_id = builder.get_peer_id();
|
||||
builder.hello_message(
|
||||
HelloMessageWithProtocols::builder(peer_id)
|
||||
.client_version(&self.identity)
|
||||
.build(),
|
||||
)
|
||||
})
|
||||
// apply discovery settings
|
||||
.apply(|builder| {
|
||||
let rlpx_socket = (self.addr, self.port).into();
|
||||
self.discovery.apply_to_builder(builder, rlpx_socket, chain_bootnodes)
|
||||
})
|
||||
.listener_addr(SocketAddr::new(
|
||||
self.addr, // set discovery port based on instance number
|
||||
self.port,
|
||||
))
|
||||
.discovery_addr(SocketAddr::new(
|
||||
self.discovery.addr,
|
||||
// set discovery port based on instance number
|
||||
self.discovery.port,
|
||||
))
|
||||
}
|
||||
|
||||
/// If `no_persist_peers` is false then this returns the path to the persistent peers file path.
|
||||
pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option<PathBuf> {
|
||||
self.no_persist_peers.not().then_some(peers_file)
|
||||
}
|
||||
|
||||
/// Sets the p2p port to zero, to allow the OS to assign a random unused port when
|
||||
/// the network components bind to a socket.
|
||||
pub const fn with_unused_p2p_port(mut self) -> Self {
|
||||
self.port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the p2p and discovery ports to zero, allowing the OD to assign a random unused port
|
||||
/// when network components bind to sockets.
|
||||
pub const fn with_unused_ports(mut self) -> Self {
|
||||
self = self.with_unused_p2p_port();
|
||||
self.discovery = self.discovery.with_unused_discovery_port();
|
||||
self
|
||||
}
|
||||
|
||||
/// Change networking port numbers based on the instance number.
|
||||
/// Ports are updated to `previous_value + instance - 1`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
self.port += instance - 1;
|
||||
self.discovery.adjust_instance_ports(instance);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
discovery: DiscoveryArgs::default(),
|
||||
trusted_peers: vec![],
|
||||
trusted_only: false,
|
||||
bootnodes: None,
|
||||
dns_retries: 0,
|
||||
peers_file: None,
|
||||
identity: P2P_CLIENT_VERSION.to_string(),
|
||||
p2p_secret_key: None,
|
||||
no_persist_peers: false,
|
||||
nat: NatResolver::Any,
|
||||
addr: DEFAULT_DISCOVERY_ADDR,
|
||||
port: DEFAULT_DISCOVERY_PORT,
|
||||
max_outbound_peers: None,
|
||||
max_inbound_peers: None,
|
||||
soft_limit_byte_size_pooled_transactions_response:
|
||||
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
|
||||
soft_limit_byte_size_pooled_transactions_response_on_pack_request: DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Arguments to setup discovery
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
pub struct DiscoveryArgs {
|
||||
/// Disable the discovery service.
|
||||
#[arg(short, long, default_value_if("dev", "true", "true"))]
|
||||
pub disable_discovery: bool,
|
||||
|
||||
/// Disable the DNS discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub disable_dns_discovery: bool,
|
||||
|
||||
/// Disable Discv4 discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub disable_discv4_discovery: bool,
|
||||
|
||||
/// Enable Discv5 discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub enable_discv5_discovery: bool,
|
||||
|
||||
/// The UDP address to use for devp2p peer discovery version 4.
|
||||
#[arg(id = "discovery.addr", long = "discovery.addr", value_name = "DISCOVERY_ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
|
||||
pub addr: IpAddr,
|
||||
|
||||
/// The UDP port to use for devp2p peer discovery version 4.
|
||||
#[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
|
||||
pub port: u16,
|
||||
|
||||
/// The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx`
|
||||
/// address, if it's also IPv4.
|
||||
#[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)]
|
||||
pub discv5_addr: Option<Ipv4Addr>,
|
||||
|
||||
/// The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx`
|
||||
/// address, if it's also IPv6.
|
||||
#[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)]
|
||||
pub discv5_addr_ipv6: Option<Ipv6Addr>,
|
||||
|
||||
/// The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is
|
||||
/// IPv4, or `--discv5.addr` is set.
|
||||
#[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT",
|
||||
default_value_t = DEFAULT_DISCOVERY_V5_PORT)]
|
||||
pub discv5_port: u16,
|
||||
|
||||
/// The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is
|
||||
/// IPv6, or `--discv5.addr.ipv6` is set.
|
||||
#[arg(id = "discovery.v5.port.ipv6", long = "discovery.v5.port.ipv6", value_name = "DISCOVERY_V5_PORT_IPV6",
|
||||
default_value = None, default_value_t = DEFAULT_DISCOVERY_V5_PORT)]
|
||||
pub discv5_port_ipv6: u16,
|
||||
|
||||
/// The interval in seconds at which to carry out periodic lookup queries, for the whole
|
||||
/// run of the program.
|
||||
#[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)]
|
||||
pub discv5_lookup_interval: u64,
|
||||
|
||||
/// The interval in seconds at which to carry out boost lookup queries, for a fixed number of
|
||||
/// times, at bootstrap.
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval",
|
||||
default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)]
|
||||
pub discv5_bootstrap_lookup_interval: u64,
|
||||
|
||||
/// The number of times to carry out boost lookup queries at bootstrap.
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown",
|
||||
default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)]
|
||||
pub discv5_bootstrap_lookup_countdown: u64,
|
||||
}
|
||||
|
||||
impl DiscoveryArgs {
|
||||
/// Apply the discovery settings to the given [`NetworkConfigBuilder`]
|
||||
pub fn apply_to_builder(
|
||||
&self,
|
||||
mut network_config_builder: NetworkConfigBuilder,
|
||||
rlpx_tcp_socket: SocketAddr,
|
||||
boot_nodes: impl IntoIterator<Item = NodeRecord>,
|
||||
) -> NetworkConfigBuilder {
|
||||
if self.disable_discovery || self.disable_dns_discovery {
|
||||
network_config_builder = network_config_builder.disable_dns_discovery();
|
||||
}
|
||||
|
||||
if self.disable_discovery || self.disable_discv4_discovery {
|
||||
network_config_builder = network_config_builder.disable_discv4_discovery();
|
||||
}
|
||||
|
||||
if !self.disable_discovery && self.enable_discv5_discovery {
|
||||
network_config_builder = network_config_builder
|
||||
.discovery_v5(self.discovery_v5_builder(rlpx_tcp_socket, boot_nodes));
|
||||
}
|
||||
|
||||
network_config_builder
|
||||
}
|
||||
|
||||
/// Creates a [`reth_discv5::ConfigBuilder`] filling it with the values from this struct.
|
||||
pub fn discovery_v5_builder(
|
||||
&self,
|
||||
rlpx_tcp_socket: SocketAddr,
|
||||
boot_nodes: impl IntoIterator<Item = NodeRecord>,
|
||||
) -> reth_discv5::ConfigBuilder {
|
||||
let Self {
|
||||
discv5_addr,
|
||||
discv5_addr_ipv6,
|
||||
discv5_port,
|
||||
discv5_port_ipv6,
|
||||
discv5_lookup_interval,
|
||||
discv5_bootstrap_lookup_interval,
|
||||
discv5_bootstrap_lookup_countdown,
|
||||
..
|
||||
} = self;
|
||||
|
||||
// Use rlpx address if none given
|
||||
let discv5_addr_ipv4 = discv5_addr.or(match rlpx_tcp_socket {
|
||||
SocketAddr::V4(addr) => Some(*addr.ip()),
|
||||
SocketAddr::V6(_) => None,
|
||||
});
|
||||
let discv5_addr_ipv6 = discv5_addr_ipv6.or(match rlpx_tcp_socket {
|
||||
SocketAddr::V4(_) => None,
|
||||
SocketAddr::V6(addr) => Some(*addr.ip()),
|
||||
});
|
||||
|
||||
reth_discv5::Config::builder(rlpx_tcp_socket)
|
||||
.discv5_config(
|
||||
reth_discv5::discv5::ConfigBuilder::new(ListenConfig::from_two_sockets(
|
||||
discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, *discv5_port)),
|
||||
discv5_addr_ipv6.map(|addr| SocketAddrV6::new(addr, *discv5_port_ipv6, 0, 0)),
|
||||
))
|
||||
.build(),
|
||||
)
|
||||
.add_unsigned_boot_nodes(boot_nodes)
|
||||
.lookup_interval(*discv5_lookup_interval)
|
||||
.bootstrap_lookup_interval(*discv5_bootstrap_lookup_interval)
|
||||
.bootstrap_lookup_countdown(*discv5_bootstrap_lookup_countdown)
|
||||
}
|
||||
|
||||
/// Set the discovery port to zero, to allow the OS to assign a random unused port when
|
||||
/// discovery binds to the socket.
|
||||
pub const fn with_unused_discovery_port(mut self) -> Self {
|
||||
self.port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Change networking port numbers based on the instance number.
|
||||
/// Ports are updated to `previous_value + instance - 1`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
self.port += instance - 1;
|
||||
self.discv5_port += instance - 1;
|
||||
self.discv5_port_ipv6 += instance - 1;
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiscoveryArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
disable_discovery: false,
|
||||
disable_dns_discovery: false,
|
||||
disable_discv4_discovery: false,
|
||||
enable_discv5_discovery: false,
|
||||
addr: DEFAULT_DISCOVERY_ADDR,
|
||||
port: DEFAULT_DISCOVERY_PORT,
|
||||
discv5_addr: None,
|
||||
discv5_addr_ipv6: None,
|
||||
discv5_port: DEFAULT_DISCOVERY_V5_PORT,
|
||||
discv5_port_ipv6: DEFAULT_DISCOVERY_V5_PORT,
|
||||
discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL,
|
||||
discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL,
|
||||
discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_nat_args() {
|
||||
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "none"]).args;
|
||||
assert_eq!(args.nat, NatResolver::None);
|
||||
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "extip:0.0.0.0"]).args;
|
||||
assert_eq!(args.nat, NatResolver::ExternalIp("0.0.0.0".parse().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_peer_args() {
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from(["reth", "--max-outbound-peers", "50"]).args;
|
||||
assert_eq!(args.max_outbound_peers, Some(50));
|
||||
assert_eq!(args.max_inbound_peers, None);
|
||||
|
||||
let args = CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--max-outbound-peers",
|
||||
"75",
|
||||
"--max-inbound-peers",
|
||||
"15",
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args.max_outbound_peers, Some(75));
|
||||
assert_eq!(args.max_inbound_peers, Some(15));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_trusted_peer_args() {
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--trusted-peers",
|
||||
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303,enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303"
|
||||
])
|
||||
.args;
|
||||
|
||||
assert_eq!(
|
||||
args.trusted_peers,
|
||||
vec![
|
||||
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303".parse().unwrap(),
|
||||
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303".parse().unwrap()
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_retry_strategy_args() {
|
||||
let tests = vec![0, 10];
|
||||
|
||||
for retries in tests {
|
||||
let args = CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--dns-retries",
|
||||
retries.to_string().as_str(),
|
||||
])
|
||||
.args;
|
||||
|
||||
assert_eq!(args.dns_retries, retries);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
#[test]
|
||||
fn network_args_default_sanity_test() {
|
||||
let default_args = NetworkArgs::default();
|
||||
let args = CommandParser::<NetworkArgs>::parse_from(["reth"]).args;
|
||||
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
159
crates/node/core/src/args/payload_builder.rs
Normal file
159
crates/node/core/src/args/payload_builder.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
use crate::{
|
||||
args::utils::parse_duration_from_secs, cli::config::PayloadBuilderConfig,
|
||||
version::default_extradata,
|
||||
};
|
||||
use clap::{
|
||||
builder::{RangedU64ValueParser, TypedValueParser},
|
||||
Arg, Args, Command,
|
||||
};
|
||||
use reth_primitives::constants::{
|
||||
ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION,
|
||||
};
|
||||
use std::{borrow::Cow, ffi::OsStr, time::Duration};
|
||||
|
||||
/// Parameters for configuring the Payload Builder
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "Builder")]
|
||||
pub struct PayloadBuilderArgs {
|
||||
/// Block extra data set by the payload builder.
|
||||
#[arg(long = "builder.extradata", value_parser = ExtradataValueParser::default(), default_value_t = default_extradata())]
|
||||
pub extradata: String,
|
||||
|
||||
/// Target gas ceiling for built blocks.
|
||||
#[arg(long = "builder.gaslimit", default_value = "30000000", value_name = "GAS_LIMIT")]
|
||||
pub max_gas_limit: u64,
|
||||
|
||||
/// The interval at which the job should build a new payload after the last (in seconds).
|
||||
#[arg(long = "builder.interval", value_parser = parse_duration_from_secs, default_value = "1", value_name = "SECONDS")]
|
||||
pub interval: Duration,
|
||||
|
||||
/// The deadline for when the payload builder job should resolve.
|
||||
#[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")]
|
||||
pub deadline: Duration,
|
||||
|
||||
/// Maximum number of tasks to spawn for building a payload.
|
||||
#[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::<usize>::new().range(1..))]
|
||||
pub max_payload_tasks: usize,
|
||||
}
|
||||
|
||||
impl Default for PayloadBuilderArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
extradata: default_extradata(),
|
||||
max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT,
|
||||
interval: Duration::from_secs(1),
|
||||
deadline: SLOT_DURATION,
|
||||
max_payload_tasks: 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadBuilderConfig for PayloadBuilderArgs {
|
||||
fn extradata(&self) -> Cow<'_, str> {
|
||||
self.extradata.as_str().into()
|
||||
}
|
||||
|
||||
fn interval(&self) -> Duration {
|
||||
self.interval
|
||||
}
|
||||
|
||||
fn deadline(&self) -> Duration {
|
||||
self.deadline
|
||||
}
|
||||
|
||||
fn max_gas_limit(&self) -> u64 {
|
||||
self.max_gas_limit
|
||||
}
|
||||
|
||||
fn max_payload_tasks(&self) -> usize {
|
||||
self.max_payload_tasks
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct ExtradataValueParser;
|
||||
|
||||
impl TypedValueParser for ExtradataValueParser {
|
||||
type Value = String;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
_arg: Option<&Arg>,
|
||||
value: &OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE {
|
||||
return Err(clap::Error::raw(
|
||||
clap::error::ErrorKind::InvalidValue,
|
||||
format!(
|
||||
"Payload builder extradata size exceeds {MAXIMUM_EXTRA_DATA_SIZE}-byte limit"
|
||||
),
|
||||
))
|
||||
}
|
||||
Ok(val.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_with_valid_max_tasks() {
|
||||
let args =
|
||||
CommandParser::<PayloadBuilderArgs>::parse_from(["reth", "--builder.max-tasks", "1"])
|
||||
.args;
|
||||
assert_eq!(args.max_payload_tasks, 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_with_invalid_max_tasks() {
|
||||
assert!(CommandParser::<PayloadBuilderArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--builder.max-tasks",
|
||||
"0"
|
||||
])
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_extradata() {
|
||||
let extradata = default_extradata();
|
||||
let args = CommandParser::<PayloadBuilderArgs>::parse_from([
|
||||
"reth",
|
||||
"--builder.extradata",
|
||||
extradata.as_str(),
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args.extradata, extradata);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_extradata() {
|
||||
let extradata = "x".repeat(MAXIMUM_EXTRA_DATA_SIZE + 1);
|
||||
let args = CommandParser::<PayloadBuilderArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--builder.extradata",
|
||||
extradata.as_str(),
|
||||
]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn payload_builder_args_default_sanity_check() {
|
||||
let default_args = PayloadBuilderArgs::default();
|
||||
let args = CommandParser::<PayloadBuilderArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
66
crates/node/core/src/args/pruning.rs
Normal file
66
crates/node/core/src/args/pruning.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
//! Pruning and full node arguments
|
||||
|
||||
use clap::Args;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_config::config::PruneConfig;
|
||||
use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE};
|
||||
|
||||
/// Parameters for pruning and full node
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq, Default)]
|
||||
#[command(next_help_heading = "Pruning")]
|
||||
pub struct PruningArgs {
|
||||
/// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored.
|
||||
/// This flag takes priority over pruning configuration in reth.toml.
|
||||
#[arg(long, default_value_t = false)]
|
||||
pub full: bool,
|
||||
}
|
||||
|
||||
impl PruningArgs {
|
||||
/// Returns pruning configuration.
|
||||
pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option<PruneConfig> {
|
||||
if !self.full {
|
||||
return None
|
||||
}
|
||||
Some(PruneConfig {
|
||||
block_interval: 5,
|
||||
segments: PruneModes {
|
||||
sender_recovery: Some(PruneMode::Full),
|
||||
transaction_lookup: None,
|
||||
receipts: chain_spec
|
||||
.deposit_contract
|
||||
.as_ref()
|
||||
.map(|contract| PruneMode::Before(contract.block)),
|
||||
account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
|
||||
storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
|
||||
receipts_log_filter: ReceiptsLogPruneConfig(
|
||||
chain_spec
|
||||
.deposit_contract
|
||||
.as_ref()
|
||||
.map(|contract| (contract.address, PruneMode::Before(contract.block)))
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_args_sanity_check() {
|
||||
let default_args = PruningArgs::default();
|
||||
let args = CommandParser::<PruningArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
383
crates/node/core/src/args/rpc_server.rs
Normal file
383
crates/node/core/src/args/rpc_server.rs
Normal file
@@ -0,0 +1,383 @@
|
||||
//! clap [Args](clap::Args) for RPC related arguments.
|
||||
|
||||
use std::{
|
||||
ffi::OsStr,
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use alloy_rpc_types_engine::JwtSecret;
|
||||
use clap::{
|
||||
builder::{PossibleValue, RangedU64ValueParser, TypedValueParser},
|
||||
Arg, Args, Command,
|
||||
};
|
||||
use rand::Rng;
|
||||
use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
|
||||
|
||||
use crate::args::{
|
||||
types::{MaxU32, ZeroAsNoneU64},
|
||||
GasPriceOracleArgs, RpcStateCacheArgs,
|
||||
};
|
||||
|
||||
/// Default max number of subscriptions per connection.
|
||||
pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024;
|
||||
|
||||
/// Default max request size in MB.
|
||||
pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15;
|
||||
|
||||
/// Default max response size in MB.
|
||||
///
|
||||
/// This is only relevant for very large trace responses.
|
||||
pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 160;
|
||||
|
||||
/// Default number of incoming connections.
|
||||
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500;
|
||||
|
||||
/// Parameters for configuring the rpc more granularity via CLI
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "RPC")]
|
||||
pub struct RpcServerArgs {
|
||||
/// Enable the HTTP-RPC server
|
||||
#[arg(long, default_value_if("dev", "true", "true"))]
|
||||
pub http: bool,
|
||||
|
||||
/// Http server address to listen on
|
||||
#[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub http_addr: IpAddr,
|
||||
|
||||
/// Http server port to listen on
|
||||
#[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)]
|
||||
pub http_port: u16,
|
||||
|
||||
/// Rpc Modules to be configured for the HTTP server
|
||||
#[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())]
|
||||
pub http_api: Option<RpcModuleSelection>,
|
||||
|
||||
/// Http Corsdomain to allow request from
|
||||
#[arg(long = "http.corsdomain")]
|
||||
pub http_corsdomain: Option<String>,
|
||||
|
||||
/// Enable the WS-RPC server
|
||||
#[arg(long)]
|
||||
pub ws: bool,
|
||||
|
||||
/// Ws server address to listen on
|
||||
#[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub ws_addr: IpAddr,
|
||||
|
||||
/// Ws server port to listen on
|
||||
#[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)]
|
||||
pub ws_port: u16,
|
||||
|
||||
/// Origins from which to accept `WebSocket` requests
|
||||
#[arg(id = "ws.origins", long = "ws.origins")]
|
||||
pub ws_allowed_origins: Option<String>,
|
||||
|
||||
/// Rpc Modules to be configured for the WS server
|
||||
#[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())]
|
||||
pub ws_api: Option<RpcModuleSelection>,
|
||||
|
||||
/// Disable the IPC-RPC server
|
||||
#[arg(long)]
|
||||
pub ipcdisable: bool,
|
||||
|
||||
/// Filename for IPC socket/pipe within the datadir
|
||||
#[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())]
|
||||
pub ipcpath: String,
|
||||
|
||||
/// Auth server address to listen on
|
||||
#[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub auth_addr: IpAddr,
|
||||
|
||||
/// Auth server port to listen on
|
||||
#[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)]
|
||||
pub auth_port: u16,
|
||||
|
||||
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
|
||||
///
|
||||
/// This will enforce JWT authentication for all requests coming from the consensus layer.
|
||||
///
|
||||
/// If no path is provided, a secret will be generated and stored in the datadir under
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
|
||||
#[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)]
|
||||
pub auth_jwtsecret: Option<PathBuf>,
|
||||
|
||||
/// Enable auth engine API over IPC
|
||||
#[arg(long)]
|
||||
pub auth_ipc: bool,
|
||||
|
||||
/// Filename for auth IPC socket/pipe within the datadir
|
||||
#[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())]
|
||||
pub auth_ipc_path: String,
|
||||
|
||||
/// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and
|
||||
/// `--ws.api`.
|
||||
///
|
||||
/// This is __not__ used for the authenticated engine-API RPC server, see
|
||||
/// `--authrpc.jwtsecret`.
|
||||
#[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false)]
|
||||
pub rpc_jwtsecret: Option<JwtSecret>,
|
||||
|
||||
/// Set the maximum RPC request payload size for both HTTP and WS in megabytes.
|
||||
#[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())]
|
||||
pub rpc_max_request_size: MaxU32,
|
||||
|
||||
/// Set the maximum RPC response payload size for both HTTP and WS in megabytes.
|
||||
#[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())]
|
||||
pub rpc_max_response_size: MaxU32,
|
||||
|
||||
/// Set the maximum concurrent subscriptions per connection.
|
||||
#[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())]
|
||||
pub rpc_max_subscriptions_per_connection: MaxU32,
|
||||
|
||||
/// Maximum number of RPC server connections.
|
||||
#[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())]
|
||||
pub rpc_max_connections: MaxU32,
|
||||
|
||||
/// Maximum number of concurrent tracing requests.
|
||||
#[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())]
|
||||
pub rpc_max_tracing_requests: usize,
|
||||
|
||||
/// Maximum number of blocks that could be scanned per filter request. (0 = entire chain)
|
||||
#[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))]
|
||||
pub rpc_max_blocks_per_filter: ZeroAsNoneU64,
|
||||
|
||||
/// Maximum number of logs that can be returned in a single response. (0 = no limit)
|
||||
#[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))]
|
||||
pub rpc_max_logs_per_response: ZeroAsNoneU64,
|
||||
|
||||
/// Maximum gas limit for `eth_call` and call tracing RPC methods.
|
||||
#[arg(
|
||||
long = "rpc.gascap",
|
||||
alias = "rpc-gascap",
|
||||
value_name = "GAS_CAP",
|
||||
value_parser = RangedU64ValueParser::<u64>::new().range(1..),
|
||||
default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP
|
||||
)]
|
||||
pub rpc_gas_cap: u64,
|
||||
|
||||
/// State cache configuration.
|
||||
#[command(flatten)]
|
||||
pub rpc_state_cache: RpcStateCacheArgs,
|
||||
|
||||
/// Gas price oracle configuration.
|
||||
#[command(flatten)]
|
||||
pub gas_price_oracle: GasPriceOracleArgs,
|
||||
}
|
||||
|
||||
impl RpcServerArgs {
|
||||
/// Enables the HTTP-RPC server.
|
||||
pub const fn with_http(mut self) -> Self {
|
||||
self.http = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables the WS-RPC server.
|
||||
pub const fn with_ws(mut self) -> Self {
|
||||
self.ws = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables the Auth IPC
|
||||
pub const fn with_auth_ipc(mut self) -> Self {
|
||||
self.auth_ipc = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Change rpc port numbers based on the instance number.
|
||||
/// * The `auth_port` is scaled by a factor of `instance * 100`
|
||||
/// * The `http_port` is scaled by a factor of `-instance`
|
||||
/// * The `ws_port` is scaled by a factor of `instance * 2`
|
||||
/// * The `ipcpath` is appended with the instance number: `/tmp/reth.ipc-<instance>`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
///
|
||||
/// This will also panic in debug mode if either:
|
||||
/// * `instance` is greater than `655` (scaling would overflow `u16`)
|
||||
/// * `self.auth_port / 100 + (instance - 1)` would overflow `u16`
|
||||
///
|
||||
/// In release mode, this will silently wrap around.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
// auth port is scaled by a factor of instance * 100
|
||||
self.auth_port += instance * 100 - 100;
|
||||
// http port is scaled by a factor of -instance
|
||||
self.http_port -= instance - 1;
|
||||
// ws port is scaled by a factor of instance * 2
|
||||
self.ws_port += instance * 2 - 2;
|
||||
|
||||
// if multiple instances are being run, append the instance number to the ipc path
|
||||
if instance > 1 {
|
||||
self.ipcpath = format!("{}-{}", self.ipcpath, instance);
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the http port to zero, to allow the OS to assign a random unused port when the rpc
|
||||
/// server binds to a socket.
|
||||
pub const fn with_http_unused_port(mut self) -> Self {
|
||||
self.http_port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the ws port to zero, to allow the OS to assign a random unused port when the rpc
|
||||
/// server binds to a socket.
|
||||
pub const fn with_ws_unused_port(mut self) -> Self {
|
||||
self.ws_port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the auth port to zero, to allow the OS to assign a random unused port when the rpc
|
||||
/// server binds to a socket.
|
||||
pub const fn with_auth_unused_port(mut self) -> Self {
|
||||
self.auth_port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Append a random string to the ipc path, to prevent possible collisions when multiple nodes
|
||||
/// are being run on the same machine.
|
||||
pub fn with_ipc_random_path(mut self) -> Self {
|
||||
let random_string: String = rand::thread_rng()
|
||||
.sample_iter(rand::distributions::Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
self.ipcpath = format!("{}-{}", self.ipcpath, random_string);
|
||||
self
|
||||
}
|
||||
|
||||
/// Configure all ports to be set to a random unused port when bound, and set the IPC path to a
|
||||
/// random path.
|
||||
pub fn with_unused_ports(mut self) -> Self {
|
||||
self = self.with_http_unused_port();
|
||||
self = self.with_ws_unused_port();
|
||||
self = self.with_auth_unused_port();
|
||||
self = self.with_ipc_random_path();
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RpcServerArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
http: false,
|
||||
http_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
http_port: constants::DEFAULT_HTTP_RPC_PORT,
|
||||
http_api: None,
|
||||
http_corsdomain: None,
|
||||
ws: false,
|
||||
ws_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
ws_port: constants::DEFAULT_WS_RPC_PORT,
|
||||
ws_allowed_origins: None,
|
||||
ws_api: None,
|
||||
ipcdisable: false,
|
||||
ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(),
|
||||
auth_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
auth_port: constants::DEFAULT_AUTH_PORT,
|
||||
auth_jwtsecret: None,
|
||||
auth_ipc: false,
|
||||
auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(),
|
||||
rpc_jwtsecret: None,
|
||||
rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(),
|
||||
rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(),
|
||||
rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(),
|
||||
rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(),
|
||||
rpc_max_tracing_requests: constants::default_max_tracing_requests(),
|
||||
rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(),
|
||||
rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(),
|
||||
rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP,
|
||||
gas_price_oracle: GasPriceOracleArgs::default(),
|
||||
rpc_state_cache: RpcStateCacheArgs::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// clap value parser for [`RpcModuleSelection`].
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct RpcModuleSelectionValueParser;
|
||||
|
||||
impl TypedValueParser for RpcModuleSelectionValueParser {
|
||||
type Value = RpcModuleSelection;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
arg: Option<&Arg>,
|
||||
value: &OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
val.parse::<RpcModuleSelection>().map_err(|err| {
|
||||
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
|
||||
let possible_values = RethRpcModule::all_variant_names().to_vec().join(",");
|
||||
let msg = format!(
|
||||
"Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]"
|
||||
);
|
||||
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
|
||||
})
|
||||
}
|
||||
|
||||
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
|
||||
let values = RethRpcModule::all_variant_names().iter().map(PossibleValue::new);
|
||||
Some(Box::new(values))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{Args, Parser};
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_args_parser() {
|
||||
let args =
|
||||
CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "eth,admin,debug"])
|
||||
.args;
|
||||
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected = RpcModuleSelection::try_from_selection(["eth", "admin", "debug"]).unwrap();
|
||||
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_eth_call_bundle_args() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
"eth,admin,debug,eth-call-bundle",
|
||||
])
|
||||
.args;
|
||||
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected =
|
||||
RpcModuleSelection::try_from_selection(["eth", "admin", "debug", "eth-call-bundle"])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_args_parser_none() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "none"]).args;
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected = RpcModuleSelection::Selection(Default::default());
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_server_args_default_sanity_test() {
|
||||
let default_args = RpcServerArgs::default();
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
|
||||
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
49
crates/node/core/src/args/rpc_state_cache.rs
Normal file
49
crates/node/core/src/args/rpc_state_cache.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use clap::Args;
|
||||
use reth_rpc_server_types::constants::cache::{
|
||||
DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
};
|
||||
|
||||
/// Parameters to configure RPC state cache.
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "RPC State Cache")]
|
||||
pub struct RpcStateCacheArgs {
|
||||
/// Max number of blocks in cache.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-blocks",
|
||||
default_value_t = DEFAULT_BLOCK_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_blocks: u32,
|
||||
|
||||
/// Max number receipts in cache.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-receipts",
|
||||
default_value_t = DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_receipts: u32,
|
||||
|
||||
/// Max number of bytes for cached env data.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-envs",
|
||||
default_value_t = DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_envs: u32,
|
||||
|
||||
/// Max number of concurrent database requests.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-concurrent-db-requests",
|
||||
default_value_t = DEFAULT_CONCURRENT_DB_REQUESTS,
|
||||
)]
|
||||
pub max_concurrent_db_requests: usize,
|
||||
}
|
||||
|
||||
impl Default for RpcStateCacheArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN,
|
||||
max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
max_envs: DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS,
|
||||
}
|
||||
}
|
||||
}
|
||||
62
crates/node/core/src/args/secret_key.rs
Normal file
62
crates/node/core/src/args/secret_key.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use reth_fs_util::{self as fs, FsPathError};
|
||||
use reth_network::config::rng_secret_key;
|
||||
use reth_primitives::hex::encode as hex_encode;
|
||||
use secp256k1::{Error as SecretKeyBaseError, SecretKey};
|
||||
use std::{
|
||||
io,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors returned by loading a [`SecretKey`], including IO errors.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SecretKeyError {
|
||||
/// Error encountered during decoding of the secret key.
|
||||
#[error(transparent)]
|
||||
SecretKeyDecodeError(#[from] SecretKeyBaseError),
|
||||
|
||||
/// Error related to file system path operations.
|
||||
#[error(transparent)]
|
||||
SecretKeyFsPathError(#[from] FsPathError),
|
||||
|
||||
/// Represents an error when failed to access the key file.
|
||||
#[error("failed to access key file {secret_file:?}: {error}")]
|
||||
FailedToAccessKeyFile {
|
||||
/// The encountered IO error.
|
||||
error: io::Error,
|
||||
/// Path to the secret key file.
|
||||
secret_file: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
/// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it
|
||||
/// generates a secret key and stores it in the provided path. I/O errors might occur during write
|
||||
/// operations in the form of a [`SecretKeyError`]
|
||||
pub fn get_secret_key(secret_key_path: &Path) -> Result<SecretKey, SecretKeyError> {
|
||||
let exists = secret_key_path.try_exists();
|
||||
|
||||
match exists {
|
||||
Ok(true) => {
|
||||
let contents = fs::read_to_string(secret_key_path)?;
|
||||
Ok(contents
|
||||
.as_str()
|
||||
.parse::<SecretKey>()
|
||||
.map_err(SecretKeyError::SecretKeyDecodeError)?)
|
||||
}
|
||||
Ok(false) => {
|
||||
if let Some(dir) = secret_key_path.parent() {
|
||||
// Create parent directory
|
||||
fs::create_dir_all(dir)?;
|
||||
}
|
||||
|
||||
let secret = rng_secret_key();
|
||||
let hex = hex_encode(secret.as_ref());
|
||||
fs::write(secret_key_path, hex)?;
|
||||
Ok(secret)
|
||||
}
|
||||
Err(error) => Err(SecretKeyError::FailedToAccessKeyFile {
|
||||
error,
|
||||
secret_file: secret_key_path.to_path_buf(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
53
crates/node/core/src/args/stage.rs
Normal file
53
crates/node/core/src/args/stage.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
//! Shared arguments related to stages
|
||||
use derive_more::Display;
|
||||
|
||||
/// Represents a specific stage within the data pipeline.
|
||||
///
|
||||
/// Different stages within the pipeline have dedicated functionalities and operations.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, clap::ValueEnum, Display)]
|
||||
pub enum StageEnum {
|
||||
/// The headers stage within the pipeline.
|
||||
///
|
||||
/// This stage handles operations related to block headers.
|
||||
Headers,
|
||||
/// The bodies stage within the pipeline.
|
||||
///
|
||||
/// This stage deals with block bodies and their associated data.
|
||||
Bodies,
|
||||
/// The senders stage within the pipeline.
|
||||
///
|
||||
/// Responsible for sender-related processes and data recovery.
|
||||
Senders,
|
||||
/// The execution stage within the pipeline.
|
||||
///
|
||||
/// Handles the execution of transactions and contracts.
|
||||
Execution,
|
||||
/// The account hashing stage within the pipeline.
|
||||
///
|
||||
/// Manages operations related to hashing account data.
|
||||
AccountHashing,
|
||||
/// The storage hashing stage within the pipeline.
|
||||
///
|
||||
/// Manages operations related to hashing storage data.
|
||||
StorageHashing,
|
||||
/// The account and storage hashing stages within the pipeline.
|
||||
///
|
||||
/// Covers general data hashing operations.
|
||||
Hashing,
|
||||
/// The merkle stage within the pipeline.
|
||||
///
|
||||
/// Handles Merkle tree-related computations and data processing.
|
||||
Merkle,
|
||||
/// The transaction lookup stage within the pipeline.
|
||||
///
|
||||
/// Deals with the retrieval and processing of transactions.
|
||||
TxLookup,
|
||||
/// The account history stage within the pipeline.
|
||||
///
|
||||
/// Manages historical data related to accounts.
|
||||
AccountHistory,
|
||||
/// The storage history stage within the pipeline.
|
||||
///
|
||||
/// Manages historical data related to storage.
|
||||
StorageHistory,
|
||||
}
|
||||
141
crates/node/core/src/args/txpool.rs
Normal file
141
crates/node/core/src/args/txpool.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
//! Transaction pool arguments
|
||||
|
||||
use crate::cli::config::RethTransactionPoolConfig;
|
||||
use clap::Args;
|
||||
use reth_primitives::Address;
|
||||
use reth_transaction_pool::{
|
||||
blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, validate::DEFAULT_MAX_TX_INPUT_BYTES,
|
||||
LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP,
|
||||
REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
|
||||
TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
};
|
||||
/// Parameters for debugging purposes
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "TxPool")]
|
||||
pub struct TxPoolArgs {
|
||||
/// Max number of transaction in the pending sub-pool.
|
||||
#[arg(long = "txpool.pending-max-count", alias = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub pending_max_count: usize,
|
||||
/// Max size of the pending sub-pool in megabytes.
|
||||
#[arg(long = "txpool.pending-max-size", alias = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub pending_max_size: usize,
|
||||
|
||||
/// Max number of transaction in the basefee sub-pool
|
||||
#[arg(long = "txpool.basefee-max-count", alias = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub basefee_max_count: usize,
|
||||
/// Max size of the basefee sub-pool in megabytes.
|
||||
#[arg(long = "txpool.basefee-max-size", alias = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub basefee_max_size: usize,
|
||||
|
||||
/// Max number of transaction in the queued sub-pool
|
||||
#[arg(long = "txpool.queued-max-count", alias = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub queued_max_count: usize,
|
||||
/// Max size of the queued sub-pool in megabytes.
|
||||
#[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub queued_max_size: usize,
|
||||
|
||||
/// Max number of executable transaction slots guaranteed per account
|
||||
#[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)]
|
||||
pub max_account_slots: usize,
|
||||
|
||||
/// Price bump (in %) for the transaction pool underpriced check.
|
||||
#[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)]
|
||||
pub price_bump: u128,
|
||||
|
||||
/// Price bump percentage to replace an already existing blob transaction
|
||||
#[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)]
|
||||
pub blob_transaction_price_bump: u128,
|
||||
|
||||
/// Max size in bytes of a single transaction allowed to enter the pool
|
||||
#[arg(long = "txpool.max-tx-input-bytes", alias = "txpool.max_tx_input_bytes", default_value_t = DEFAULT_MAX_TX_INPUT_BYTES)]
|
||||
pub max_tx_input_bytes: usize,
|
||||
|
||||
/// The maximum number of blobs to keep in the in memory blob cache.
|
||||
#[arg(long = "txpool.max-cached-entries", alias = "txpool.max_cached_entries", default_value_t = DEFAULT_MAX_CACHED_BLOBS)]
|
||||
pub max_cached_entries: u32,
|
||||
|
||||
/// Flag to disable local transaction exemptions.
|
||||
#[arg(long = "txpool.nolocals")]
|
||||
pub no_locals: bool,
|
||||
/// Flag to allow certain addresses as local.
|
||||
#[arg(long = "txpool.locals")]
|
||||
pub locals: Vec<Address>,
|
||||
/// Flag to toggle local transaction propagation.
|
||||
#[arg(long = "txpool.no-local-transactions-propagation")]
|
||||
pub no_local_transactions_propagation: bool,
|
||||
}
|
||||
|
||||
impl Default for TxPoolArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pending_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
pending_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
basefee_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
|
||||
price_bump: DEFAULT_PRICE_BUMP,
|
||||
blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP,
|
||||
max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES,
|
||||
max_cached_entries: DEFAULT_MAX_CACHED_BLOBS,
|
||||
no_locals: false,
|
||||
locals: Default::default(),
|
||||
no_local_transactions_propagation: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RethTransactionPoolConfig for TxPoolArgs {
|
||||
/// Returns transaction pool configuration.
|
||||
fn pool_config(&self) -> PoolConfig {
|
||||
PoolConfig {
|
||||
local_transactions_config: LocalTransactionConfig {
|
||||
no_exemptions: self.no_locals,
|
||||
local_addresses: self.locals.clone().into_iter().collect(),
|
||||
propagate_local_transactions: !self.no_local_transactions_propagation,
|
||||
},
|
||||
pending_limit: SubPoolLimit {
|
||||
max_txs: self.pending_max_count,
|
||||
max_size: self.pending_max_size * 1024 * 1024,
|
||||
},
|
||||
basefee_limit: SubPoolLimit {
|
||||
max_txs: self.basefee_max_count,
|
||||
max_size: self.basefee_max_size * 1024 * 1024,
|
||||
},
|
||||
queued_limit: SubPoolLimit {
|
||||
max_txs: self.queued_max_count,
|
||||
max_size: self.queued_max_size * 1024 * 1024,
|
||||
},
|
||||
blob_limit: SubPoolLimit {
|
||||
max_txs: self.queued_max_count,
|
||||
max_size: self.queued_max_size * 1024 * 1024,
|
||||
},
|
||||
max_account_slots: self.max_account_slots,
|
||||
price_bumps: PriceBumpConfig {
|
||||
default_price_bump: self.price_bump,
|
||||
replace_blob_tx_price_bump: self.blob_transaction_price_bump,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn txpool_args_default_sanity_test() {
|
||||
let default_args = TxPoolArgs::default();
|
||||
let args = CommandParser::<TxPoolArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
119
crates/node/core/src/args/types.rs
Normal file
119
crates/node/core/src/args/types.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
//! Additional helper types for CLI parsing.
|
||||
|
||||
use std::{fmt, num::ParseIntError, str::FromStr};
|
||||
|
||||
/// A macro that generates types that maps "0" to "None" when parsing CLI arguments.
|
||||
macro_rules! zero_as_none {
|
||||
($type_name:ident, $inner_type:ty) => {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
/// A helper type that maps `0` to `None` when parsing CLI arguments.
|
||||
pub struct $type_name(pub Option<$inner_type>);
|
||||
|
||||
impl $type_name {
|
||||
/// Returns the inner value.
|
||||
pub const fn new(value: $inner_type) -> Self {
|
||||
Self(Some(value))
|
||||
}
|
||||
|
||||
/// Returns the inner value or `$inner_type::MAX` if `None`.
|
||||
pub fn unwrap_or_max(self) -> $inner_type {
|
||||
self.0.unwrap_or(<$inner_type>::MAX)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for $type_name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.0 {
|
||||
Some(value) => write!(f, "{}", value),
|
||||
None => write!(f, "0"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$inner_type> for $type_name {
|
||||
#[inline]
|
||||
fn from(value: $inner_type) -> Self {
|
||||
Self(if value == 0 { None } else { Some(value) })
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for $type_name {
|
||||
type Err = std::num::ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let value = s.parse::<$inner_type>()?;
|
||||
Ok(Self::from(value))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
zero_as_none!(ZeroAsNoneU64, u64);
|
||||
zero_as_none!(ZeroAsNoneU32, u32);
|
||||
|
||||
/// A macro that generates types that map "max" to "MAX" when parsing CLI arguments.
|
||||
macro_rules! max_values {
|
||||
($name:ident, $ty:ident) => {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
/// A helper type for parsing "max" as the maximum value of the specified type.
|
||||
|
||||
pub struct $name(pub $ty);
|
||||
|
||||
impl $name {
|
||||
/// Returns the inner value.
|
||||
pub const fn get(&self) -> $ty {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$ty> for $name {
|
||||
#[inline]
|
||||
fn from(value: $ty) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for $name {
|
||||
type Err = ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s.eq_ignore_ascii_case("max") {
|
||||
Ok($name(<$ty>::MAX))
|
||||
} else {
|
||||
s.parse::<$ty>().map($name)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
max_values!(MaxU32, u32);
|
||||
max_values!(MaxU64, u64);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_zero_parse() {
|
||||
let val = "0".parse::<ZeroAsNoneU64>().unwrap();
|
||||
assert_eq!(val, ZeroAsNoneU64(None));
|
||||
assert_eq!(val.unwrap_or_max(), u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_u64() {
|
||||
let original = 1u64;
|
||||
let expected = ZeroAsNoneU64(Some(1u64));
|
||||
assert_eq!(ZeroAsNoneU64::from(original), expected);
|
||||
|
||||
let original = 0u64;
|
||||
let expected = ZeroAsNoneU64(None);
|
||||
assert_eq!(ZeroAsNoneU64::from(original), expected);
|
||||
}
|
||||
}
|
||||
175
crates/node/core/src/args/utils.rs
Normal file
175
crates/node/core/src/args/utils.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
//! Clap parser utilities
|
||||
|
||||
use alloy_genesis::Genesis;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_fs_util as fs;
|
||||
use reth_primitives::{BlockHashOrNumber, B256};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs},
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use reth_chainspec::DEV;
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA};
|
||||
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
use reth_chainspec::{GOERLI, HOLESKY, MAINNET, SEPOLIA};
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
/// Chains supported by op-reth. First value should be used as the default.
|
||||
pub const SUPPORTED_CHAINS: &[&str] = &["optimism", "optimism-sepolia", "base", "base-sepolia"];
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
/// Chains supported by reth. First value should be used as the default.
|
||||
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "goerli", "holesky", "dev"];
|
||||
|
||||
/// Helper to parse a [Duration] from seconds
|
||||
pub fn parse_duration_from_secs(arg: &str) -> eyre::Result<Duration, std::num::ParseIntError> {
|
||||
let seconds = arg.parse()?;
|
||||
Ok(Duration::from_secs(seconds))
|
||||
}
|
||||
|
||||
/// The help info for the --chain flag
|
||||
pub fn chain_help() -> String {
|
||||
format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", "))
|
||||
}
|
||||
|
||||
/// Clap value parser for [`ChainSpec`]s.
|
||||
///
|
||||
/// The value parser matches either a known chain, the path
|
||||
/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct.
|
||||
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error> {
|
||||
Ok(match s {
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"mainnet" => MAINNET.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"goerli" => GOERLI.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"sepolia" => SEPOLIA.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"holesky" => HOLESKY.clone(),
|
||||
"dev" => DEV.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"optimism" => OP_MAINNET.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base" => BASE_MAINNET.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(),
|
||||
_ => {
|
||||
// try to read json from path first
|
||||
let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) {
|
||||
Ok(raw) => raw,
|
||||
Err(io_err) => {
|
||||
// valid json may start with "\n", but must contain "{"
|
||||
if s.contains('{') {
|
||||
s.to_string()
|
||||
} else {
|
||||
return Err(io_err.into()) // assume invalid path
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// both serialized Genesis and ChainSpec structs supported
|
||||
let genesis: Genesis = serde_json::from_str(&raw)?;
|
||||
|
||||
Arc::new(genesis.into())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse [`BlockHashOrNumber`]
|
||||
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
|
||||
match B256::from_str(value) {
|
||||
Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)),
|
||||
Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Error thrown while parsing a socket address.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum SocketAddressParsingError {
|
||||
/// Failed to convert the string into a socket addr
|
||||
#[error("could not parse socket address: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
/// Input must not be empty
|
||||
#[error("cannot parse socket address from empty string")]
|
||||
Empty,
|
||||
/// Failed to parse the address
|
||||
#[error("could not parse socket address from {0}")]
|
||||
Parse(String),
|
||||
/// Failed to parse port
|
||||
#[error("could not parse port: {0}")]
|
||||
Port(#[from] std::num::ParseIntError),
|
||||
}
|
||||
|
||||
/// Parse a [`SocketAddr`] from a `str`.
|
||||
///
|
||||
/// The following formats are checked:
|
||||
///
|
||||
/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the
|
||||
/// hostname is set to `localhost`.
|
||||
/// - If the value contains `:` it is assumed to be the format `<host>:<port>`
|
||||
/// - Otherwise it is assumed to be a hostname
|
||||
///
|
||||
/// An error is returned if the value is empty.
|
||||
pub fn parse_socket_address(value: &str) -> eyre::Result<SocketAddr, SocketAddressParsingError> {
|
||||
if value.is_empty() {
|
||||
return Err(SocketAddressParsingError::Empty)
|
||||
}
|
||||
|
||||
if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) {
|
||||
let port: u16 = port.parse()?;
|
||||
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
|
||||
}
|
||||
if let Ok(port) = value.parse::<u16>() {
|
||||
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
|
||||
}
|
||||
value
|
||||
.to_socket_addrs()?
|
||||
.next()
|
||||
.ok_or_else(|| SocketAddressParsingError::Parse(value.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use proptest::prelude::Rng;
|
||||
use secp256k1::rand::thread_rng;
|
||||
|
||||
#[test]
|
||||
fn parse_known_chain_spec() {
|
||||
for chain in SUPPORTED_CHAINS {
|
||||
chain_value_parser(chain).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_socket_addresses() {
|
||||
for value in ["localhost:9000", ":9000", "9000"] {
|
||||
let socket_addr = parse_socket_address(value)
|
||||
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
|
||||
|
||||
assert!(socket_addr.ip().is_loopback());
|
||||
assert_eq!(socket_addr.port(), 9000);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_socket_address_random() {
|
||||
let port: u16 = thread_rng().gen();
|
||||
|
||||
for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] {
|
||||
let socket_addr = parse_socket_address(&value)
|
||||
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
|
||||
|
||||
assert!(socket_addr.ip().is_loopback());
|
||||
assert_eq!(socket_addr.port(), port);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user