mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-04-30 03:01:58 -04:00
chore: finally move node-core to node folder (#9191)
This commit is contained in:
62
crates/node/core/src/args/benchmark_args.rs
Normal file
62
crates/node/core/src/args/benchmark_args.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
//! clap [Args](clap::Args) for benchmark configuration
|
||||
|
||||
use clap::Args;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for benchmark configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone)]
|
||||
#[command(next_help_heading = "Benchmark")]
|
||||
pub struct BenchmarkArgs {
|
||||
/// Run the benchmark from a specific block.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub from: Option<u64>,
|
||||
|
||||
/// Run the benchmark to a specific block.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub to: Option<u64>,
|
||||
|
||||
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
|
||||
///
|
||||
/// This will perform JWT authentication for all requests to the given engine RPC url.
|
||||
///
|
||||
/// If no path is provided, a secret will be generated and stored in the datadir under
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
|
||||
#[arg(long = "jwtsecret", value_name = "PATH", global = true, required = false)]
|
||||
pub auth_jwtsecret: Option<PathBuf>,
|
||||
|
||||
/// The RPC url to use for sending engine requests.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "ENGINE_RPC_URL",
|
||||
verbatim_doc_comment,
|
||||
default_value = "http://localhost:8551"
|
||||
)]
|
||||
pub engine_rpc_url: String,
|
||||
|
||||
/// The path to the output directory for granular benchmark results.
|
||||
#[arg(long, short, value_name = "BENCHMARK_OUTPUT", verbatim_doc_comment)]
|
||||
pub output: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_benchmark_args() {
|
||||
let default_args = BenchmarkArgs {
|
||||
engine_rpc_url: "http://localhost:8551".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
let args = CommandParser::<BenchmarkArgs>::parse_from(["reth-bench"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
140
crates/node/core/src/args/database.rs
Normal file
140
crates/node/core/src/args/database.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
//! clap [Args](clap::Args) for database configuration
|
||||
|
||||
use crate::version::default_client_version;
|
||||
use clap::{
|
||||
builder::{PossibleValue, TypedValueParser},
|
||||
error::ErrorKind,
|
||||
Arg, Args, Command, Error,
|
||||
};
|
||||
use reth_storage_errors::db::LogLevel;
|
||||
|
||||
/// Parameters for database configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)]
|
||||
#[command(next_help_heading = "Database")]
|
||||
pub struct DatabaseArgs {
|
||||
/// Database logging level. Levels higher than "notice" require a debug build.
|
||||
#[arg(long = "db.log-level", value_parser = LogLevelValueParser::default())]
|
||||
pub log_level: Option<LogLevel>,
|
||||
/// Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an
|
||||
/// NFS volume.
|
||||
#[arg(long = "db.exclusive")]
|
||||
pub exclusive: Option<bool>,
|
||||
}
|
||||
|
||||
impl DatabaseArgs {
|
||||
/// Returns default database arguments with configured log level and client version.
|
||||
pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments {
|
||||
reth_db::mdbx::DatabaseArguments::new(default_client_version())
|
||||
.with_log_level(self.log_level)
|
||||
.with_exclusive(self.exclusive)
|
||||
}
|
||||
}
|
||||
|
||||
/// clap value parser for [`LogLevel`].
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct LogLevelValueParser;
|
||||
|
||||
impl TypedValueParser for LogLevelValueParser {
|
||||
type Value = LogLevel;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
arg: Option<&Arg>,
|
||||
value: &std::ffi::OsStr,
|
||||
) -> Result<Self::Value, Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| Error::raw(ErrorKind::InvalidUtf8, "Invalid UTF-8"))?;
|
||||
|
||||
val.parse::<LogLevel>().map_err(|err| {
|
||||
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
|
||||
let possible_values = LogLevel::value_variants()
|
||||
.iter()
|
||||
.map(|v| format!("- {:?}: {}", v, v.help_message()))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
let msg = format!(
|
||||
"Invalid value '{val}' for {arg}: {err}.\n Possible values:\n{possible_values}"
|
||||
);
|
||||
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
|
||||
})
|
||||
}
|
||||
|
||||
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
|
||||
let values = LogLevel::value_variants()
|
||||
.iter()
|
||||
.map(|v| PossibleValue::new(v.variant_name()).help(v.help_message()));
|
||||
Some(Box::new(values))
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_database_args() {
|
||||
let default_args = DatabaseArgs::default();
|
||||
let args = CommandParser::<DatabaseArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_possible_values() {
|
||||
// Initialize the LogLevelValueParser
|
||||
let parser = LogLevelValueParser;
|
||||
|
||||
// Call the possible_values method
|
||||
let possible_values: Vec<PossibleValue> = parser.possible_values().unwrap().collect();
|
||||
|
||||
// Expected possible values
|
||||
let expected_values = vec![
|
||||
PossibleValue::new("fatal")
|
||||
.help("Enables logging for critical conditions, i.e. assertion failures"),
|
||||
PossibleValue::new("error").help("Enables logging for error conditions"),
|
||||
PossibleValue::new("warn").help("Enables logging for warning conditions"),
|
||||
PossibleValue::new("notice")
|
||||
.help("Enables logging for normal but significant condition"),
|
||||
PossibleValue::new("verbose").help("Enables logging for verbose informational"),
|
||||
PossibleValue::new("debug").help("Enables logging for debug-level messages"),
|
||||
PossibleValue::new("trace").help("Enables logging for trace debug-level messages"),
|
||||
PossibleValue::new("extra").help("Enables logging for extra debug-level messages"),
|
||||
];
|
||||
|
||||
// Check that the possible values match the expected values
|
||||
assert_eq!(possible_values.len(), expected_values.len());
|
||||
for (actual, expected) in possible_values.iter().zip(expected_values.iter()) {
|
||||
assert_eq!(actual.get_name(), expected.get_name());
|
||||
assert_eq!(actual.get_help(), expected.get_help());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_parser_with_valid_log_level() {
|
||||
let cmd =
|
||||
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.log-level", "Debug"])
|
||||
.unwrap();
|
||||
assert_eq!(cmd.args.log_level, Some(LogLevel::Debug));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_parser_with_invalid_log_level() {
|
||||
let result =
|
||||
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.log-level", "invalid"]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_parser_without_log_level() {
|
||||
let cmd = CommandParser::<DatabaseArgs>::try_parse_from(["reth"]).unwrap();
|
||||
assert_eq!(cmd.args.log_level, None);
|
||||
}
|
||||
}
|
||||
53
crates/node/core/src/args/datadir_args.rs
Normal file
53
crates/node/core/src/args/datadir_args.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
//! clap [Args](clap::Args) for datadir config
|
||||
|
||||
use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath};
|
||||
use clap::Args;
|
||||
use reth_chainspec::Chain;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for datadir configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone)]
|
||||
#[command(next_help_heading = "Datadir")]
|
||||
pub struct DatadirArgs {
|
||||
/// The path to the data dir for all reth files and subdirectories.
|
||||
///
|
||||
/// Defaults to the OS-specific data directory:
|
||||
///
|
||||
/// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
|
||||
/// - Windows: `{FOLDERID_RoamingAppData}/reth/`
|
||||
/// - macOS: `$HOME/Library/Application Support/reth/`
|
||||
#[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)]
|
||||
pub datadir: MaybePlatformPath<DataDirPath>,
|
||||
|
||||
/// The absolute path to store static files in.
|
||||
#[arg(long = "datadir.static_files", verbatim_doc_comment, value_name = "PATH")]
|
||||
pub static_files_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl DatadirArgs {
|
||||
/// Resolves the final datadir path.
|
||||
pub fn resolve_datadir(self, chain: Chain) -> ChainPath<DataDirPath> {
|
||||
let datadir = self.datadir.clone();
|
||||
datadir.unwrap_or_chain_default(chain, self)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_datadir_args() {
|
||||
let default_args = DatadirArgs::default();
|
||||
let args = CommandParser::<DatadirArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
78
crates/node/core/src/args/debug.rs
Normal file
78
crates/node/core/src/args/debug.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
//! clap [Args](clap::Args) for debugging purposes
|
||||
|
||||
use clap::Args;
|
||||
use reth_primitives::B256;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Parameters for debugging purposes
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq, Default)]
|
||||
#[command(next_help_heading = "Debug")]
|
||||
pub struct DebugArgs {
|
||||
/// Flag indicating whether the node should be terminated after the pipeline sync.
|
||||
#[arg(long = "debug.terminate", help_heading = "Debug")]
|
||||
pub terminate: bool,
|
||||
|
||||
/// Set the chain tip manually for testing purposes.
|
||||
///
|
||||
/// NOTE: This is a temporary flag
|
||||
#[arg(long = "debug.tip", help_heading = "Debug")]
|
||||
pub tip: Option<B256>,
|
||||
|
||||
/// Runs the sync only up to the specified block.
|
||||
#[arg(long = "debug.max-block", help_heading = "Debug")]
|
||||
pub max_block: Option<u64>,
|
||||
|
||||
/// Runs a fake consensus client that advances the chain using recent block hashes
|
||||
/// on Etherscan. If specified, requires an `ETHERSCAN_API_KEY` environment variable.
|
||||
#[arg(
|
||||
long = "debug.etherscan",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "tip",
|
||||
conflicts_with = "rpc_consensus_ws",
|
||||
value_name = "ETHERSCAN_API_URL"
|
||||
)]
|
||||
pub etherscan: Option<Option<String>>,
|
||||
|
||||
/// Runs a fake consensus client using blocks fetched from an RPC `WebSocket` endpoint.
|
||||
#[arg(
|
||||
long = "debug.rpc-consensus-ws",
|
||||
help_heading = "Debug",
|
||||
conflicts_with = "tip",
|
||||
conflicts_with = "etherscan"
|
||||
)]
|
||||
pub rpc_consensus_ws: Option<String>,
|
||||
|
||||
/// If provided, the engine will skip `n` consecutive FCUs.
|
||||
#[arg(long = "debug.skip-fcu", help_heading = "Debug")]
|
||||
pub skip_fcu: Option<usize>,
|
||||
|
||||
/// If provided, the engine will skip `n` consecutive new payloads.
|
||||
#[arg(long = "debug.skip-new-payload", help_heading = "Debug")]
|
||||
pub skip_new_payload: Option<usize>,
|
||||
|
||||
/// The path to store engine API messages at.
|
||||
/// If specified, all of the intercepted engine API messages
|
||||
/// will be written to specified location.
|
||||
#[arg(long = "debug.engine-api-store", help_heading = "Debug", value_name = "PATH")]
|
||||
pub engine_api_store: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_database_args() {
|
||||
let default_args = DebugArgs::default();
|
||||
let args = CommandParser::<DebugArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
107
crates/node/core/src/args/dev.rs
Normal file
107
crates/node/core/src/args/dev.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
//! clap [Args](clap::Args) for Dev testnet configuration
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Args;
|
||||
use humantime::parse_duration;
|
||||
|
||||
/// Parameters for Dev testnet configuration
|
||||
#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)]
|
||||
#[command(next_help_heading = "Dev testnet")]
|
||||
pub struct DevArgs {
|
||||
/// Start the node in dev mode
|
||||
///
|
||||
/// This mode uses a local proof-of-authority consensus engine with either fixed block times
|
||||
/// or automatically mined blocks.
|
||||
/// Disables network discovery and enables local http server.
|
||||
/// Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test
|
||||
/// test junk" with 10 000 ETH each.
|
||||
#[arg(long = "dev", alias = "auto-mine", help_heading = "Dev testnet", verbatim_doc_comment)]
|
||||
pub dev: bool,
|
||||
|
||||
/// How many transactions to mine per block.
|
||||
#[arg(
|
||||
long = "dev.block-max-transactions",
|
||||
help_heading = "Dev testnet",
|
||||
conflicts_with = "block_time"
|
||||
)]
|
||||
pub block_max_transactions: Option<usize>,
|
||||
|
||||
/// Interval between blocks.
|
||||
///
|
||||
/// Parses strings using [`humantime::parse_duration`]
|
||||
/// --dev.block-time 12s
|
||||
#[arg(
|
||||
long = "dev.block-time",
|
||||
help_heading = "Dev testnet",
|
||||
conflicts_with = "block_max_transactions",
|
||||
value_parser = parse_duration,
|
||||
verbatim_doc_comment
|
||||
)]
|
||||
pub block_time: Option<Duration>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dev_args() {
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth", "--dev"]).args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth", "--auto-mine"]).args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
|
||||
|
||||
let args = CommandParser::<DevArgs>::parse_from([
|
||||
"reth",
|
||||
"--dev",
|
||||
"--dev.block-max-transactions",
|
||||
"2",
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None });
|
||||
|
||||
let args =
|
||||
CommandParser::<DevArgs>::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args;
|
||||
assert_eq!(
|
||||
args,
|
||||
DevArgs {
|
||||
dev: true,
|
||||
block_max_transactions: None,
|
||||
block_time: Some(std::time::Duration::from_secs(1))
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_dev_args_conflicts() {
|
||||
let args = CommandParser::<DevArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--dev",
|
||||
"--dev.block-max-transactions",
|
||||
"2",
|
||||
"--dev.block-time",
|
||||
"1s",
|
||||
]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dev_args_default_sanity_check() {
|
||||
let default_args = DevArgs::default();
|
||||
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
86
crates/node/core/src/args/gas_price_oracle.rs
Normal file
86
crates/node/core/src/args/gas_price_oracle.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
use crate::primitives::U256;
|
||||
use clap::Args;
|
||||
use reth_rpc_eth_types::GasPriceOracleConfig;
|
||||
use reth_rpc_server_types::constants::gas_oracle::{
|
||||
DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE,
|
||||
DEFAULT_MAX_GAS_PRICE,
|
||||
};
|
||||
|
||||
/// Parameters to configure Gas Price Oracle
|
||||
#[derive(Debug, Clone, Copy, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "Gas Price Oracle")]
|
||||
pub struct GasPriceOracleArgs {
|
||||
/// Number of recent blocks to check for gas price
|
||||
#[arg(long = "gpo.blocks", default_value_t = DEFAULT_GAS_PRICE_BLOCKS)]
|
||||
pub blocks: u32,
|
||||
|
||||
/// Gas Price below which gpo will ignore transactions
|
||||
#[arg(long = "gpo.ignoreprice", default_value_t = DEFAULT_IGNORE_GAS_PRICE.to())]
|
||||
pub ignore_price: u64,
|
||||
|
||||
/// Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo
|
||||
#[arg(long = "gpo.maxprice", default_value_t = DEFAULT_MAX_GAS_PRICE.to())]
|
||||
pub max_price: u64,
|
||||
|
||||
/// The percentile of gas prices to use for the estimate
|
||||
#[arg(long = "gpo.percentile", default_value_t = DEFAULT_GAS_PRICE_PERCENTILE)]
|
||||
pub percentile: u32,
|
||||
}
|
||||
|
||||
impl GasPriceOracleArgs {
|
||||
/// Returns a [`GasPriceOracleConfig`] from the arguments.
|
||||
pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig {
|
||||
let Self { blocks, ignore_price, max_price, percentile } = self;
|
||||
GasPriceOracleConfig {
|
||||
max_price: Some(U256::from(*max_price)),
|
||||
ignore_price: Some(U256::from(*ignore_price)),
|
||||
percentile: *percentile,
|
||||
blocks: *blocks,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GasPriceOracleArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
blocks: DEFAULT_GAS_PRICE_BLOCKS,
|
||||
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
|
||||
max_price: DEFAULT_MAX_GAS_PRICE.to(),
|
||||
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_gpo_args() {
|
||||
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(
|
||||
args,
|
||||
GasPriceOracleArgs {
|
||||
blocks: DEFAULT_GAS_PRICE_BLOCKS,
|
||||
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
|
||||
max_price: DEFAULT_MAX_GAS_PRICE.to(),
|
||||
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gpo_args_default_sanity_test() {
|
||||
let default_args = GasPriceOracleArgs::default();
|
||||
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
176
crates/node/core/src/args/log.rs
Normal file
176
crates/node/core/src/args/log.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
//! clap [Args](clap::Args) for logging configuration.
|
||||
|
||||
use crate::dirs::{LogsDir, PlatformPath};
|
||||
use clap::{ArgAction, Args, ValueEnum};
|
||||
use reth_tracing::{
|
||||
tracing_subscriber::filter::Directive, FileInfo, FileWorkerGuard, LayerInfo, LogFormat,
|
||||
RethTracer, Tracer,
|
||||
};
|
||||
use std::{fmt, fmt::Display};
|
||||
use tracing::{level_filters::LevelFilter, Level};
|
||||
/// Constant to convert megabytes to bytes
|
||||
const MB_TO_BYTES: u64 = 1024 * 1024;
|
||||
|
||||
/// The log configuration.
|
||||
#[derive(Debug, Args)]
|
||||
#[command(next_help_heading = "Logging")]
|
||||
pub struct LogArgs {
|
||||
/// The format to use for logs written to stdout.
|
||||
#[arg(long = "log.stdout.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
|
||||
pub log_stdout_format: LogFormat,
|
||||
|
||||
/// The filter to use for logs written to stdout.
|
||||
#[arg(long = "log.stdout.filter", value_name = "FILTER", global = true, default_value = "")]
|
||||
pub log_stdout_filter: String,
|
||||
|
||||
/// The format to use for logs written to the log file.
|
||||
#[arg(long = "log.file.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
|
||||
pub log_file_format: LogFormat,
|
||||
|
||||
/// The filter to use for logs written to the log file.
|
||||
#[arg(long = "log.file.filter", value_name = "FILTER", global = true, default_value = "debug")]
|
||||
pub log_file_filter: String,
|
||||
|
||||
/// The path to put log files in.
|
||||
#[arg(long = "log.file.directory", value_name = "PATH", global = true, default_value_t)]
|
||||
pub log_file_directory: PlatformPath<LogsDir>,
|
||||
|
||||
/// The maximum size (in MB) of one log file.
|
||||
#[arg(long = "log.file.max-size", value_name = "SIZE", global = true, default_value_t = 200)]
|
||||
pub log_file_max_size: u64,
|
||||
|
||||
/// The maximum amount of log files that will be stored. If set to 0, background file logging
|
||||
/// is disabled.
|
||||
#[arg(long = "log.file.max-files", value_name = "COUNT", global = true, default_value_t = 5)]
|
||||
pub log_file_max_files: usize,
|
||||
|
||||
/// Write logs to journald.
|
||||
#[arg(long = "log.journald", global = true)]
|
||||
pub journald: bool,
|
||||
|
||||
/// The filter to use for logs written to journald.
|
||||
#[arg(
|
||||
long = "log.journald.filter",
|
||||
value_name = "FILTER",
|
||||
global = true,
|
||||
default_value = "error"
|
||||
)]
|
||||
pub journald_filter: String,
|
||||
|
||||
/// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other
|
||||
/// text formatting.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "COLOR",
|
||||
global = true,
|
||||
default_value_t = ColorMode::Always
|
||||
)]
|
||||
pub color: ColorMode,
|
||||
/// The verbosity settings for the tracer.
|
||||
#[command(flatten)]
|
||||
pub verbosity: Verbosity,
|
||||
}
|
||||
|
||||
impl LogArgs {
|
||||
/// Creates a [`LayerInfo`] instance.
|
||||
fn layer(&self, format: LogFormat, filter: String, use_color: bool) -> LayerInfo {
|
||||
LayerInfo::new(
|
||||
format,
|
||||
self.verbosity.directive().to_string(),
|
||||
filter,
|
||||
if use_color { Some(self.color.to_string()) } else { None },
|
||||
)
|
||||
}
|
||||
|
||||
/// File info from the current log options.
|
||||
fn file_info(&self) -> FileInfo {
|
||||
FileInfo::new(
|
||||
self.log_file_directory.clone().into(),
|
||||
self.log_file_max_size * MB_TO_BYTES,
|
||||
self.log_file_max_files,
|
||||
)
|
||||
}
|
||||
|
||||
/// Initializes tracing with the configured options from cli args.
|
||||
///
|
||||
/// Returns the file worker guard, and the file name, if a file worker was configured.
|
||||
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
|
||||
let mut tracer = RethTracer::new();
|
||||
|
||||
let stdout = self.layer(self.log_stdout_format, self.log_stdout_filter.clone(), true);
|
||||
tracer = tracer.with_stdout(stdout);
|
||||
|
||||
if self.journald {
|
||||
tracer = tracer.with_journald(self.journald_filter.clone());
|
||||
}
|
||||
|
||||
if self.log_file_max_files > 0 {
|
||||
let info = self.file_info();
|
||||
let file = self.layer(self.log_file_format, self.log_file_filter.clone(), false);
|
||||
tracer = tracer.with_file(file, info);
|
||||
}
|
||||
|
||||
let guard = tracer.init()?;
|
||||
Ok(guard)
|
||||
}
|
||||
}
|
||||
|
||||
/// The color mode for the cli.
|
||||
#[derive(Debug, Copy, Clone, ValueEnum, Eq, PartialEq)]
|
||||
pub enum ColorMode {
|
||||
/// Colors on
|
||||
Always,
|
||||
/// Colors on
|
||||
Auto,
|
||||
/// Colors off
|
||||
Never,
|
||||
}
|
||||
|
||||
impl Display for ColorMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Always => write!(f, "always"),
|
||||
Self::Auto => write!(f, "auto"),
|
||||
Self::Never => write!(f, "never"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The verbosity settings for the cli.
|
||||
#[derive(Debug, Copy, Clone, Args)]
|
||||
#[command(next_help_heading = "Display")]
|
||||
pub struct Verbosity {
|
||||
/// Set the minimum log level.
|
||||
///
|
||||
/// -v Errors
|
||||
/// -vv Warnings
|
||||
/// -vvv Info
|
||||
/// -vvvv Debug
|
||||
/// -vvvvv Traces (warning: very verbose!)
|
||||
#[arg(short, long, action = ArgAction::Count, global = true, default_value_t = 3, verbatim_doc_comment, help_heading = "Display")]
|
||||
verbosity: u8,
|
||||
|
||||
/// Silence all log output.
|
||||
#[arg(long, alias = "silent", short = 'q', global = true, help_heading = "Display")]
|
||||
quiet: bool,
|
||||
}
|
||||
|
||||
impl Verbosity {
|
||||
/// Get the corresponding [Directive] for the given verbosity, or none if the verbosity
|
||||
/// corresponds to silent.
|
||||
pub fn directive(&self) -> Directive {
|
||||
if self.quiet {
|
||||
LevelFilter::OFF.into()
|
||||
} else {
|
||||
let level = match self.verbosity - 1 {
|
||||
0 => Level::ERROR,
|
||||
1 => Level::WARN,
|
||||
2 => Level::INFO,
|
||||
3 => Level::DEBUG,
|
||||
_ => Level::TRACE,
|
||||
};
|
||||
|
||||
level.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
64
crates/node/core/src/args/mod.rs
Normal file
64
crates/node/core/src/args/mod.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
//! Parameters for configuring the rpc more granularity via CLI
|
||||
|
||||
/// NetworkArg struct for configuring the network
|
||||
mod network;
|
||||
pub use network::{DiscoveryArgs, NetworkArgs};
|
||||
|
||||
/// RpcServerArg struct for configuring the RPC
|
||||
mod rpc_server;
|
||||
pub use rpc_server::RpcServerArgs;
|
||||
|
||||
/// `RpcStateCacheArgs` struct for configuring RPC state cache
|
||||
mod rpc_state_cache;
|
||||
pub use rpc_state_cache::RpcStateCacheArgs;
|
||||
|
||||
/// DebugArgs struct for debugging purposes
|
||||
mod debug;
|
||||
pub use debug::DebugArgs;
|
||||
|
||||
/// DatabaseArgs struct for configuring the database
|
||||
mod database;
|
||||
pub use database::DatabaseArgs;
|
||||
|
||||
/// LogArgs struct for configuring the logger
|
||||
mod log;
|
||||
pub use log::{ColorMode, LogArgs};
|
||||
|
||||
mod secret_key;
|
||||
pub use secret_key::{get_secret_key, SecretKeyError};
|
||||
|
||||
/// `PayloadBuilderArgs` struct for configuring the payload builder
|
||||
mod payload_builder;
|
||||
pub use payload_builder::PayloadBuilderArgs;
|
||||
|
||||
/// Stage related arguments
|
||||
mod stage;
|
||||
pub use stage::StageEnum;
|
||||
|
||||
/// Gas price oracle related arguments
|
||||
mod gas_price_oracle;
|
||||
pub use gas_price_oracle::GasPriceOracleArgs;
|
||||
|
||||
/// TxPoolArgs for configuring the transaction pool
|
||||
mod txpool;
|
||||
pub use txpool::TxPoolArgs;
|
||||
|
||||
/// DevArgs for configuring the dev testnet
|
||||
mod dev;
|
||||
pub use dev::DevArgs;
|
||||
|
||||
/// PruneArgs for configuring the pruning and full node
|
||||
mod pruning;
|
||||
pub use pruning::PruningArgs;
|
||||
|
||||
/// DatadirArgs for configuring data storage paths
|
||||
mod datadir_args;
|
||||
pub use datadir_args::DatadirArgs;
|
||||
|
||||
/// BenchmarkArgs struct for configuring the benchmark to run
|
||||
mod benchmark_args;
|
||||
pub use benchmark_args::BenchmarkArgs;
|
||||
|
||||
pub mod utils;
|
||||
|
||||
pub mod types;
|
||||
499
crates/node/core/src/args/network.rs
Normal file
499
crates/node/core/src/args/network.rs
Normal file
@@ -0,0 +1,499 @@
|
||||
//! clap [Args](clap::Args) for network related arguments.
|
||||
|
||||
use crate::version::P2P_CLIENT_VERSION;
|
||||
use clap::Args;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_config::Config;
|
||||
use reth_discv4::{NodeRecord, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT};
|
||||
use reth_discv5::{
|
||||
discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT,
|
||||
DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL,
|
||||
};
|
||||
use reth_net_nat::NatResolver;
|
||||
use reth_network::{
|
||||
transactions::{
|
||||
TransactionFetcherConfig, TransactionsManagerConfig,
|
||||
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
|
||||
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
|
||||
},
|
||||
HelloMessageWithProtocols, NetworkConfigBuilder, SessionsConfig,
|
||||
};
|
||||
use reth_network_peers::{mainnet_nodes, TrustedPeer};
|
||||
use secp256k1::SecretKey;
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
ops::Not,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
/// Parameters for configuring the network more granularity via CLI
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "Networking")]
|
||||
pub struct NetworkArgs {
|
||||
/// Arguments to setup discovery service.
|
||||
#[command(flatten)]
|
||||
pub discovery: DiscoveryArgs,
|
||||
|
||||
#[allow(clippy::doc_markdown)]
|
||||
/// Comma separated enode URLs of trusted peers for P2P connections.
|
||||
///
|
||||
/// --trusted-peers enode://abcd@192.168.0.1:30303
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub trusted_peers: Vec<TrustedPeer>,
|
||||
|
||||
/// Connect to or accept from trusted peers only
|
||||
#[arg(long)]
|
||||
pub trusted_only: bool,
|
||||
|
||||
/// Comma separated enode URLs for P2P discovery bootstrap.
|
||||
///
|
||||
/// Will fall back to a network-specific default if not specified.
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
pub bootnodes: Option<Vec<TrustedPeer>>,
|
||||
|
||||
/// Amount of DNS resolution requests retries to perform when peering.
|
||||
#[arg(long, default_value_t = 0)]
|
||||
pub dns_retries: usize,
|
||||
|
||||
/// The path to the known peers file. Connected peers are dumped to this file on nodes
|
||||
/// shutdown, and read on startup. Cannot be used with `--no-persist-peers`.
|
||||
#[arg(long, value_name = "FILE", verbatim_doc_comment, conflicts_with = "no_persist_peers")]
|
||||
pub peers_file: Option<PathBuf>,
|
||||
|
||||
/// Custom node identity
|
||||
#[arg(long, value_name = "IDENTITY", default_value = P2P_CLIENT_VERSION)]
|
||||
pub identity: String,
|
||||
|
||||
/// Secret key to use for this node.
|
||||
///
|
||||
/// This will also deterministically set the peer ID. If not specified, it will be set in the
|
||||
/// data dir for the chain being used.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub p2p_secret_key: Option<PathBuf>,
|
||||
|
||||
/// Do not persist peers.
|
||||
#[arg(long, verbatim_doc_comment)]
|
||||
pub no_persist_peers: bool,
|
||||
|
||||
/// NAT resolution method (any|none|upnp|publicip|extip:\<IP\>)
|
||||
#[arg(long, default_value = "any")]
|
||||
pub nat: NatResolver,
|
||||
|
||||
/// Network listening address
|
||||
#[arg(long = "addr", value_name = "ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
|
||||
pub addr: IpAddr,
|
||||
|
||||
/// Network listening port
|
||||
#[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
|
||||
pub port: u16,
|
||||
|
||||
/// Maximum number of outbound requests. default: 100
|
||||
#[arg(long)]
|
||||
pub max_outbound_peers: Option<usize>,
|
||||
|
||||
/// Maximum number of inbound requests. default: 30
|
||||
#[arg(long)]
|
||||
pub max_inbound_peers: Option<usize>,
|
||||
|
||||
/// Experimental, for usage in research. Sets the max accumulated byte size of transactions
|
||||
/// to pack in one response.
|
||||
/// Spec'd at 2MiB.
|
||||
#[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, verbatim_doc_comment)]
|
||||
pub soft_limit_byte_size_pooled_transactions_response: usize,
|
||||
|
||||
/// Experimental, for usage in research. Sets the max accumulated byte size of transactions to
|
||||
/// request in one request.
|
||||
///
|
||||
/// Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a
|
||||
/// transaction announcement (see `RLPx` specs). This allows a node to request a specific size
|
||||
/// response.
|
||||
///
|
||||
/// By default, nodes request only 128 KiB worth of transactions, but should a peer request
|
||||
/// more, up to 2 MiB, a node will answer with more than 128 KiB.
|
||||
///
|
||||
/// Default is 128 KiB.
|
||||
#[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, verbatim_doc_comment)]
|
||||
pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize,
|
||||
}
|
||||
|
||||
impl NetworkArgs {
|
||||
/// Build a [`NetworkConfigBuilder`] from a [`Config`] and a [`ChainSpec`], in addition to the
|
||||
/// values in this option struct.
|
||||
///
|
||||
/// The `default_peers_file` will be used as the default location to store the persistent peers
|
||||
/// file if `no_persist_peers` is false, and there is no provided `peers_file`.
|
||||
pub fn network_config(
|
||||
&self,
|
||||
config: &Config,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
secret_key: SecretKey,
|
||||
default_peers_file: PathBuf,
|
||||
) -> NetworkConfigBuilder {
|
||||
let chain_bootnodes = chain_spec.bootnodes().unwrap_or_else(mainnet_nodes);
|
||||
let peers_file = self.peers_file.clone().unwrap_or(default_peers_file);
|
||||
|
||||
// Configure peer connections
|
||||
let peers_config = config
|
||||
.peers
|
||||
.clone()
|
||||
.with_max_inbound_opt(self.max_inbound_peers)
|
||||
.with_max_outbound_opt(self.max_outbound_peers);
|
||||
|
||||
// Configure transactions manager
|
||||
let transactions_manager_config = TransactionsManagerConfig {
|
||||
transaction_fetcher_config: TransactionFetcherConfig::new(
|
||||
self.soft_limit_byte_size_pooled_transactions_response,
|
||||
self.soft_limit_byte_size_pooled_transactions_response_on_pack_request,
|
||||
),
|
||||
};
|
||||
|
||||
// Configure basic network stack
|
||||
NetworkConfigBuilder::new(secret_key)
|
||||
.peer_config(config.peers_config_with_basic_nodes_from_file(
|
||||
self.persistent_peers_file(peers_file).as_deref(),
|
||||
))
|
||||
.external_ip_resolver(self.nat)
|
||||
.sessions_config(
|
||||
SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()),
|
||||
)
|
||||
.peer_config(peers_config)
|
||||
.boot_nodes(chain_bootnodes.clone())
|
||||
.chain_spec(chain_spec)
|
||||
.transactions_manager_config(transactions_manager_config)
|
||||
// Configure node identity
|
||||
.apply(|builder| {
|
||||
let peer_id = builder.get_peer_id();
|
||||
builder.hello_message(
|
||||
HelloMessageWithProtocols::builder(peer_id)
|
||||
.client_version(&self.identity)
|
||||
.build(),
|
||||
)
|
||||
})
|
||||
// apply discovery settings
|
||||
.apply(|builder| {
|
||||
let rlpx_socket = (self.addr, self.port).into();
|
||||
self.discovery.apply_to_builder(builder, rlpx_socket, chain_bootnodes)
|
||||
})
|
||||
.listener_addr(SocketAddr::new(
|
||||
self.addr, // set discovery port based on instance number
|
||||
self.port,
|
||||
))
|
||||
.discovery_addr(SocketAddr::new(
|
||||
self.discovery.addr,
|
||||
// set discovery port based on instance number
|
||||
self.discovery.port,
|
||||
))
|
||||
}
|
||||
|
||||
/// If `no_persist_peers` is false then this returns the path to the persistent peers file path.
|
||||
pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option<PathBuf> {
|
||||
self.no_persist_peers.not().then_some(peers_file)
|
||||
}
|
||||
|
||||
/// Sets the p2p port to zero, to allow the OS to assign a random unused port when
|
||||
/// the network components bind to a socket.
|
||||
pub const fn with_unused_p2p_port(mut self) -> Self {
|
||||
self.port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the p2p and discovery ports to zero, allowing the OD to assign a random unused port
|
||||
/// when network components bind to sockets.
|
||||
pub const fn with_unused_ports(mut self) -> Self {
|
||||
self = self.with_unused_p2p_port();
|
||||
self.discovery = self.discovery.with_unused_discovery_port();
|
||||
self
|
||||
}
|
||||
|
||||
/// Change networking port numbers based on the instance number.
|
||||
/// Ports are updated to `previous_value + instance - 1`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
self.port += instance - 1;
|
||||
self.discovery.adjust_instance_ports(instance);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
discovery: DiscoveryArgs::default(),
|
||||
trusted_peers: vec![],
|
||||
trusted_only: false,
|
||||
bootnodes: None,
|
||||
dns_retries: 0,
|
||||
peers_file: None,
|
||||
identity: P2P_CLIENT_VERSION.to_string(),
|
||||
p2p_secret_key: None,
|
||||
no_persist_peers: false,
|
||||
nat: NatResolver::Any,
|
||||
addr: DEFAULT_DISCOVERY_ADDR,
|
||||
port: DEFAULT_DISCOVERY_PORT,
|
||||
max_outbound_peers: None,
|
||||
max_inbound_peers: None,
|
||||
soft_limit_byte_size_pooled_transactions_response:
|
||||
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
|
||||
soft_limit_byte_size_pooled_transactions_response_on_pack_request: DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Arguments to setup discovery
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
pub struct DiscoveryArgs {
|
||||
/// Disable the discovery service.
|
||||
#[arg(short, long, default_value_if("dev", "true", "true"))]
|
||||
pub disable_discovery: bool,
|
||||
|
||||
/// Disable the DNS discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub disable_dns_discovery: bool,
|
||||
|
||||
/// Disable Discv4 discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub disable_discv4_discovery: bool,
|
||||
|
||||
/// Enable Discv5 discovery.
|
||||
#[arg(long, conflicts_with = "disable_discovery")]
|
||||
pub enable_discv5_discovery: bool,
|
||||
|
||||
/// The UDP address to use for devp2p peer discovery version 4.
|
||||
#[arg(id = "discovery.addr", long = "discovery.addr", value_name = "DISCOVERY_ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
|
||||
pub addr: IpAddr,
|
||||
|
||||
/// The UDP port to use for devp2p peer discovery version 4.
|
||||
#[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
|
||||
pub port: u16,
|
||||
|
||||
/// The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx`
|
||||
/// address, if it's also IPv4.
|
||||
#[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)]
|
||||
pub discv5_addr: Option<Ipv4Addr>,
|
||||
|
||||
/// The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx`
|
||||
/// address, if it's also IPv6.
|
||||
#[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)]
|
||||
pub discv5_addr_ipv6: Option<Ipv6Addr>,
|
||||
|
||||
/// The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is
|
||||
/// IPv4, or `--discv5.addr` is set.
|
||||
#[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT",
|
||||
default_value_t = DEFAULT_DISCOVERY_V5_PORT)]
|
||||
pub discv5_port: u16,
|
||||
|
||||
/// The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is
|
||||
/// IPv6, or `--discv5.addr.ipv6` is set.
|
||||
#[arg(id = "discovery.v5.port.ipv6", long = "discovery.v5.port.ipv6", value_name = "DISCOVERY_V5_PORT_IPV6",
|
||||
default_value = None, default_value_t = DEFAULT_DISCOVERY_V5_PORT)]
|
||||
pub discv5_port_ipv6: u16,
|
||||
|
||||
/// The interval in seconds at which to carry out periodic lookup queries, for the whole
|
||||
/// run of the program.
|
||||
#[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)]
|
||||
pub discv5_lookup_interval: u64,
|
||||
|
||||
/// The interval in seconds at which to carry out boost lookup queries, for a fixed number of
|
||||
/// times, at bootstrap.
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_bootstrap_lookup_interval",
|
||||
default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)]
|
||||
pub discv5_bootstrap_lookup_interval: u64,
|
||||
|
||||
/// The number of times to carry out boost lookup queries at bootstrap.
|
||||
#[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_bootstrap_lookup_countdown",
|
||||
default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)]
|
||||
pub discv5_bootstrap_lookup_countdown: u64,
|
||||
}
|
||||
|
||||
impl DiscoveryArgs {
|
||||
/// Apply the discovery settings to the given [`NetworkConfigBuilder`]
|
||||
pub fn apply_to_builder(
|
||||
&self,
|
||||
mut network_config_builder: NetworkConfigBuilder,
|
||||
rlpx_tcp_socket: SocketAddr,
|
||||
boot_nodes: impl IntoIterator<Item = NodeRecord>,
|
||||
) -> NetworkConfigBuilder {
|
||||
if self.disable_discovery || self.disable_dns_discovery {
|
||||
network_config_builder = network_config_builder.disable_dns_discovery();
|
||||
}
|
||||
|
||||
if self.disable_discovery || self.disable_discv4_discovery {
|
||||
network_config_builder = network_config_builder.disable_discv4_discovery();
|
||||
}
|
||||
|
||||
if !self.disable_discovery && self.enable_discv5_discovery {
|
||||
network_config_builder = network_config_builder
|
||||
.discovery_v5(self.discovery_v5_builder(rlpx_tcp_socket, boot_nodes));
|
||||
}
|
||||
|
||||
network_config_builder
|
||||
}
|
||||
|
||||
/// Creates a [`reth_discv5::ConfigBuilder`] filling it with the values from this struct.
|
||||
pub fn discovery_v5_builder(
|
||||
&self,
|
||||
rlpx_tcp_socket: SocketAddr,
|
||||
boot_nodes: impl IntoIterator<Item = NodeRecord>,
|
||||
) -> reth_discv5::ConfigBuilder {
|
||||
let Self {
|
||||
discv5_addr,
|
||||
discv5_addr_ipv6,
|
||||
discv5_port,
|
||||
discv5_port_ipv6,
|
||||
discv5_lookup_interval,
|
||||
discv5_bootstrap_lookup_interval,
|
||||
discv5_bootstrap_lookup_countdown,
|
||||
..
|
||||
} = self;
|
||||
|
||||
// Use rlpx address if none given
|
||||
let discv5_addr_ipv4 = discv5_addr.or(match rlpx_tcp_socket {
|
||||
SocketAddr::V4(addr) => Some(*addr.ip()),
|
||||
SocketAddr::V6(_) => None,
|
||||
});
|
||||
let discv5_addr_ipv6 = discv5_addr_ipv6.or(match rlpx_tcp_socket {
|
||||
SocketAddr::V4(_) => None,
|
||||
SocketAddr::V6(addr) => Some(*addr.ip()),
|
||||
});
|
||||
|
||||
reth_discv5::Config::builder(rlpx_tcp_socket)
|
||||
.discv5_config(
|
||||
reth_discv5::discv5::ConfigBuilder::new(ListenConfig::from_two_sockets(
|
||||
discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, *discv5_port)),
|
||||
discv5_addr_ipv6.map(|addr| SocketAddrV6::new(addr, *discv5_port_ipv6, 0, 0)),
|
||||
))
|
||||
.build(),
|
||||
)
|
||||
.add_unsigned_boot_nodes(boot_nodes)
|
||||
.lookup_interval(*discv5_lookup_interval)
|
||||
.bootstrap_lookup_interval(*discv5_bootstrap_lookup_interval)
|
||||
.bootstrap_lookup_countdown(*discv5_bootstrap_lookup_countdown)
|
||||
}
|
||||
|
||||
/// Set the discovery port to zero, to allow the OS to assign a random unused port when
|
||||
/// discovery binds to the socket.
|
||||
pub const fn with_unused_discovery_port(mut self) -> Self {
|
||||
self.port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Change networking port numbers based on the instance number.
|
||||
/// Ports are updated to `previous_value + instance - 1`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
self.port += instance - 1;
|
||||
self.discv5_port += instance - 1;
|
||||
self.discv5_port_ipv6 += instance - 1;
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiscoveryArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
disable_discovery: false,
|
||||
disable_dns_discovery: false,
|
||||
disable_discv4_discovery: false,
|
||||
enable_discv5_discovery: false,
|
||||
addr: DEFAULT_DISCOVERY_ADDR,
|
||||
port: DEFAULT_DISCOVERY_PORT,
|
||||
discv5_addr: None,
|
||||
discv5_addr_ipv6: None,
|
||||
discv5_port: DEFAULT_DISCOVERY_V5_PORT,
|
||||
discv5_port_ipv6: DEFAULT_DISCOVERY_V5_PORT,
|
||||
discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL,
|
||||
discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL,
|
||||
discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_nat_args() {
|
||||
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "none"]).args;
|
||||
assert_eq!(args.nat, NatResolver::None);
|
||||
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "extip:0.0.0.0"]).args;
|
||||
assert_eq!(args.nat, NatResolver::ExternalIp("0.0.0.0".parse().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_peer_args() {
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from(["reth", "--max-outbound-peers", "50"]).args;
|
||||
assert_eq!(args.max_outbound_peers, Some(50));
|
||||
assert_eq!(args.max_inbound_peers, None);
|
||||
|
||||
let args = CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--max-outbound-peers",
|
||||
"75",
|
||||
"--max-inbound-peers",
|
||||
"15",
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args.max_outbound_peers, Some(75));
|
||||
assert_eq!(args.max_inbound_peers, Some(15));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_trusted_peer_args() {
|
||||
let args =
|
||||
CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--trusted-peers",
|
||||
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303,enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303"
|
||||
])
|
||||
.args;
|
||||
|
||||
assert_eq!(
|
||||
args.trusted_peers,
|
||||
vec![
|
||||
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303".parse().unwrap(),
|
||||
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303".parse().unwrap()
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_retry_strategy_args() {
|
||||
let tests = vec![0, 10];
|
||||
|
||||
for retries in tests {
|
||||
let args = CommandParser::<NetworkArgs>::parse_from([
|
||||
"reth",
|
||||
"--dns-retries",
|
||||
retries.to_string().as_str(),
|
||||
])
|
||||
.args;
|
||||
|
||||
assert_eq!(args.dns_retries, retries);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
#[test]
|
||||
fn network_args_default_sanity_test() {
|
||||
let default_args = NetworkArgs::default();
|
||||
let args = CommandParser::<NetworkArgs>::parse_from(["reth"]).args;
|
||||
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
159
crates/node/core/src/args/payload_builder.rs
Normal file
159
crates/node/core/src/args/payload_builder.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
use crate::{
|
||||
args::utils::parse_duration_from_secs, cli::config::PayloadBuilderConfig,
|
||||
version::default_extradata,
|
||||
};
|
||||
use clap::{
|
||||
builder::{RangedU64ValueParser, TypedValueParser},
|
||||
Arg, Args, Command,
|
||||
};
|
||||
use reth_primitives::constants::{
|
||||
ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION,
|
||||
};
|
||||
use std::{borrow::Cow, ffi::OsStr, time::Duration};
|
||||
|
||||
/// Parameters for configuring the Payload Builder
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "Builder")]
|
||||
pub struct PayloadBuilderArgs {
|
||||
/// Block extra data set by the payload builder.
|
||||
#[arg(long = "builder.extradata", value_parser = ExtradataValueParser::default(), default_value_t = default_extradata())]
|
||||
pub extradata: String,
|
||||
|
||||
/// Target gas ceiling for built blocks.
|
||||
#[arg(long = "builder.gaslimit", default_value = "30000000", value_name = "GAS_LIMIT")]
|
||||
pub max_gas_limit: u64,
|
||||
|
||||
/// The interval at which the job should build a new payload after the last (in seconds).
|
||||
#[arg(long = "builder.interval", value_parser = parse_duration_from_secs, default_value = "1", value_name = "SECONDS")]
|
||||
pub interval: Duration,
|
||||
|
||||
/// The deadline for when the payload builder job should resolve.
|
||||
#[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")]
|
||||
pub deadline: Duration,
|
||||
|
||||
/// Maximum number of tasks to spawn for building a payload.
|
||||
#[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::<usize>::new().range(1..))]
|
||||
pub max_payload_tasks: usize,
|
||||
}
|
||||
|
||||
impl Default for PayloadBuilderArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
extradata: default_extradata(),
|
||||
max_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT,
|
||||
interval: Duration::from_secs(1),
|
||||
deadline: SLOT_DURATION,
|
||||
max_payload_tasks: 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadBuilderConfig for PayloadBuilderArgs {
|
||||
fn extradata(&self) -> Cow<'_, str> {
|
||||
self.extradata.as_str().into()
|
||||
}
|
||||
|
||||
fn interval(&self) -> Duration {
|
||||
self.interval
|
||||
}
|
||||
|
||||
fn deadline(&self) -> Duration {
|
||||
self.deadline
|
||||
}
|
||||
|
||||
fn max_gas_limit(&self) -> u64 {
|
||||
self.max_gas_limit
|
||||
}
|
||||
|
||||
fn max_payload_tasks(&self) -> usize {
|
||||
self.max_payload_tasks
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct ExtradataValueParser;
|
||||
|
||||
impl TypedValueParser for ExtradataValueParser {
|
||||
type Value = String;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
_arg: Option<&Arg>,
|
||||
value: &OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE {
|
||||
return Err(clap::Error::raw(
|
||||
clap::error::ErrorKind::InvalidValue,
|
||||
format!(
|
||||
"Payload builder extradata size exceeds {MAXIMUM_EXTRA_DATA_SIZE}-byte limit"
|
||||
),
|
||||
))
|
||||
}
|
||||
Ok(val.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_with_valid_max_tasks() {
|
||||
let args =
|
||||
CommandParser::<PayloadBuilderArgs>::parse_from(["reth", "--builder.max-tasks", "1"])
|
||||
.args;
|
||||
assert_eq!(args.max_payload_tasks, 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_with_invalid_max_tasks() {
|
||||
assert!(CommandParser::<PayloadBuilderArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--builder.max-tasks",
|
||||
"0"
|
||||
])
|
||||
.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_extradata() {
|
||||
let extradata = default_extradata();
|
||||
let args = CommandParser::<PayloadBuilderArgs>::parse_from([
|
||||
"reth",
|
||||
"--builder.extradata",
|
||||
extradata.as_str(),
|
||||
])
|
||||
.args;
|
||||
assert_eq!(args.extradata, extradata);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_extradata() {
|
||||
let extradata = "x".repeat(MAXIMUM_EXTRA_DATA_SIZE + 1);
|
||||
let args = CommandParser::<PayloadBuilderArgs>::try_parse_from([
|
||||
"reth",
|
||||
"--builder.extradata",
|
||||
extradata.as_str(),
|
||||
]);
|
||||
assert!(args.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn payload_builder_args_default_sanity_check() {
|
||||
let default_args = PayloadBuilderArgs::default();
|
||||
let args = CommandParser::<PayloadBuilderArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
66
crates/node/core/src/args/pruning.rs
Normal file
66
crates/node/core/src/args/pruning.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
//! Pruning and full node arguments
|
||||
|
||||
use clap::Args;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_config::config::PruneConfig;
|
||||
use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE};
|
||||
|
||||
/// Parameters for pruning and full node
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq, Default)]
|
||||
#[command(next_help_heading = "Pruning")]
|
||||
pub struct PruningArgs {
|
||||
/// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored.
|
||||
/// This flag takes priority over pruning configuration in reth.toml.
|
||||
#[arg(long, default_value_t = false)]
|
||||
pub full: bool,
|
||||
}
|
||||
|
||||
impl PruningArgs {
|
||||
/// Returns pruning configuration.
|
||||
pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option<PruneConfig> {
|
||||
if !self.full {
|
||||
return None
|
||||
}
|
||||
Some(PruneConfig {
|
||||
block_interval: 5,
|
||||
segments: PruneModes {
|
||||
sender_recovery: Some(PruneMode::Full),
|
||||
transaction_lookup: None,
|
||||
receipts: chain_spec
|
||||
.deposit_contract
|
||||
.as_ref()
|
||||
.map(|contract| PruneMode::Before(contract.block)),
|
||||
account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
|
||||
storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
|
||||
receipts_log_filter: ReceiptsLogPruneConfig(
|
||||
chain_spec
|
||||
.deposit_contract
|
||||
.as_ref()
|
||||
.map(|contract| (contract.address, PruneMode::Before(contract.block)))
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_args_sanity_check() {
|
||||
let default_args = PruningArgs::default();
|
||||
let args = CommandParser::<PruningArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
383
crates/node/core/src/args/rpc_server.rs
Normal file
383
crates/node/core/src/args/rpc_server.rs
Normal file
@@ -0,0 +1,383 @@
|
||||
//! clap [Args](clap::Args) for RPC related arguments.
|
||||
|
||||
use std::{
|
||||
ffi::OsStr,
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use alloy_rpc_types_engine::JwtSecret;
|
||||
use clap::{
|
||||
builder::{PossibleValue, RangedU64ValueParser, TypedValueParser},
|
||||
Arg, Args, Command,
|
||||
};
|
||||
use rand::Rng;
|
||||
use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
|
||||
|
||||
use crate::args::{
|
||||
types::{MaxU32, ZeroAsNoneU64},
|
||||
GasPriceOracleArgs, RpcStateCacheArgs,
|
||||
};
|
||||
|
||||
/// Default max number of subscriptions per connection.
|
||||
pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024;
|
||||
|
||||
/// Default max request size in MB.
|
||||
pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15;
|
||||
|
||||
/// Default max response size in MB.
|
||||
///
|
||||
/// This is only relevant for very large trace responses.
|
||||
pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 160;
|
||||
|
||||
/// Default number of incoming connections.
|
||||
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500;
|
||||
|
||||
/// Parameters for configuring the rpc more granularity via CLI
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "RPC")]
|
||||
pub struct RpcServerArgs {
|
||||
/// Enable the HTTP-RPC server
|
||||
#[arg(long, default_value_if("dev", "true", "true"))]
|
||||
pub http: bool,
|
||||
|
||||
/// Http server address to listen on
|
||||
#[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub http_addr: IpAddr,
|
||||
|
||||
/// Http server port to listen on
|
||||
#[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)]
|
||||
pub http_port: u16,
|
||||
|
||||
/// Rpc Modules to be configured for the HTTP server
|
||||
#[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())]
|
||||
pub http_api: Option<RpcModuleSelection>,
|
||||
|
||||
/// Http Corsdomain to allow request from
|
||||
#[arg(long = "http.corsdomain")]
|
||||
pub http_corsdomain: Option<String>,
|
||||
|
||||
/// Enable the WS-RPC server
|
||||
#[arg(long)]
|
||||
pub ws: bool,
|
||||
|
||||
/// Ws server address to listen on
|
||||
#[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub ws_addr: IpAddr,
|
||||
|
||||
/// Ws server port to listen on
|
||||
#[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)]
|
||||
pub ws_port: u16,
|
||||
|
||||
/// Origins from which to accept `WebSocket` requests
|
||||
#[arg(id = "ws.origins", long = "ws.origins")]
|
||||
pub ws_allowed_origins: Option<String>,
|
||||
|
||||
/// Rpc Modules to be configured for the WS server
|
||||
#[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())]
|
||||
pub ws_api: Option<RpcModuleSelection>,
|
||||
|
||||
/// Disable the IPC-RPC server
|
||||
#[arg(long)]
|
||||
pub ipcdisable: bool,
|
||||
|
||||
/// Filename for IPC socket/pipe within the datadir
|
||||
#[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())]
|
||||
pub ipcpath: String,
|
||||
|
||||
/// Auth server address to listen on
|
||||
#[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
|
||||
pub auth_addr: IpAddr,
|
||||
|
||||
/// Auth server port to listen on
|
||||
#[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)]
|
||||
pub auth_port: u16,
|
||||
|
||||
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
|
||||
///
|
||||
/// This will enforce JWT authentication for all requests coming from the consensus layer.
|
||||
///
|
||||
/// If no path is provided, a secret will be generated and stored in the datadir under
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
|
||||
#[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)]
|
||||
pub auth_jwtsecret: Option<PathBuf>,
|
||||
|
||||
/// Enable auth engine API over IPC
|
||||
#[arg(long)]
|
||||
pub auth_ipc: bool,
|
||||
|
||||
/// Filename for auth IPC socket/pipe within the datadir
|
||||
#[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())]
|
||||
pub auth_ipc_path: String,
|
||||
|
||||
/// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and
|
||||
/// `--ws.api`.
|
||||
///
|
||||
/// This is __not__ used for the authenticated engine-API RPC server, see
|
||||
/// `--authrpc.jwtsecret`.
|
||||
#[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false)]
|
||||
pub rpc_jwtsecret: Option<JwtSecret>,
|
||||
|
||||
/// Set the maximum RPC request payload size for both HTTP and WS in megabytes.
|
||||
#[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())]
|
||||
pub rpc_max_request_size: MaxU32,
|
||||
|
||||
/// Set the maximum RPC response payload size for both HTTP and WS in megabytes.
|
||||
#[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())]
|
||||
pub rpc_max_response_size: MaxU32,
|
||||
|
||||
/// Set the maximum concurrent subscriptions per connection.
|
||||
#[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())]
|
||||
pub rpc_max_subscriptions_per_connection: MaxU32,
|
||||
|
||||
/// Maximum number of RPC server connections.
|
||||
#[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())]
|
||||
pub rpc_max_connections: MaxU32,
|
||||
|
||||
/// Maximum number of concurrent tracing requests.
|
||||
#[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())]
|
||||
pub rpc_max_tracing_requests: usize,
|
||||
|
||||
/// Maximum number of blocks that could be scanned per filter request. (0 = entire chain)
|
||||
#[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))]
|
||||
pub rpc_max_blocks_per_filter: ZeroAsNoneU64,
|
||||
|
||||
/// Maximum number of logs that can be returned in a single response. (0 = no limit)
|
||||
#[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))]
|
||||
pub rpc_max_logs_per_response: ZeroAsNoneU64,
|
||||
|
||||
/// Maximum gas limit for `eth_call` and call tracing RPC methods.
|
||||
#[arg(
|
||||
long = "rpc.gascap",
|
||||
alias = "rpc-gascap",
|
||||
value_name = "GAS_CAP",
|
||||
value_parser = RangedU64ValueParser::<u64>::new().range(1..),
|
||||
default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP
|
||||
)]
|
||||
pub rpc_gas_cap: u64,
|
||||
|
||||
/// State cache configuration.
|
||||
#[command(flatten)]
|
||||
pub rpc_state_cache: RpcStateCacheArgs,
|
||||
|
||||
/// Gas price oracle configuration.
|
||||
#[command(flatten)]
|
||||
pub gas_price_oracle: GasPriceOracleArgs,
|
||||
}
|
||||
|
||||
impl RpcServerArgs {
|
||||
/// Enables the HTTP-RPC server.
|
||||
pub const fn with_http(mut self) -> Self {
|
||||
self.http = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables the WS-RPC server.
|
||||
pub const fn with_ws(mut self) -> Self {
|
||||
self.ws = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables the Auth IPC
|
||||
pub const fn with_auth_ipc(mut self) -> Self {
|
||||
self.auth_ipc = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Change rpc port numbers based on the instance number.
|
||||
/// * The `auth_port` is scaled by a factor of `instance * 100`
|
||||
/// * The `http_port` is scaled by a factor of `-instance`
|
||||
/// * The `ws_port` is scaled by a factor of `instance * 2`
|
||||
/// * The `ipcpath` is appended with the instance number: `/tmp/reth.ipc-<instance>`
|
||||
///
|
||||
/// # Panics
|
||||
/// Warning: if `instance` is zero in debug mode, this will panic.
|
||||
///
|
||||
/// This will also panic in debug mode if either:
|
||||
/// * `instance` is greater than `655` (scaling would overflow `u16`)
|
||||
/// * `self.auth_port / 100 + (instance - 1)` would overflow `u16`
|
||||
///
|
||||
/// In release mode, this will silently wrap around.
|
||||
pub fn adjust_instance_ports(&mut self, instance: u16) {
|
||||
debug_assert_ne!(instance, 0, "instance must be non-zero");
|
||||
// auth port is scaled by a factor of instance * 100
|
||||
self.auth_port += instance * 100 - 100;
|
||||
// http port is scaled by a factor of -instance
|
||||
self.http_port -= instance - 1;
|
||||
// ws port is scaled by a factor of instance * 2
|
||||
self.ws_port += instance * 2 - 2;
|
||||
|
||||
// if multiple instances are being run, append the instance number to the ipc path
|
||||
if instance > 1 {
|
||||
self.ipcpath = format!("{}-{}", self.ipcpath, instance);
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the http port to zero, to allow the OS to assign a random unused port when the rpc
|
||||
/// server binds to a socket.
|
||||
pub const fn with_http_unused_port(mut self) -> Self {
|
||||
self.http_port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the ws port to zero, to allow the OS to assign a random unused port when the rpc
|
||||
/// server binds to a socket.
|
||||
pub const fn with_ws_unused_port(mut self) -> Self {
|
||||
self.ws_port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the auth port to zero, to allow the OS to assign a random unused port when the rpc
|
||||
/// server binds to a socket.
|
||||
pub const fn with_auth_unused_port(mut self) -> Self {
|
||||
self.auth_port = 0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Append a random string to the ipc path, to prevent possible collisions when multiple nodes
|
||||
/// are being run on the same machine.
|
||||
pub fn with_ipc_random_path(mut self) -> Self {
|
||||
let random_string: String = rand::thread_rng()
|
||||
.sample_iter(rand::distributions::Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
self.ipcpath = format!("{}-{}", self.ipcpath, random_string);
|
||||
self
|
||||
}
|
||||
|
||||
/// Configure all ports to be set to a random unused port when bound, and set the IPC path to a
|
||||
/// random path.
|
||||
pub fn with_unused_ports(mut self) -> Self {
|
||||
self = self.with_http_unused_port();
|
||||
self = self.with_ws_unused_port();
|
||||
self = self.with_auth_unused_port();
|
||||
self = self.with_ipc_random_path();
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RpcServerArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
http: false,
|
||||
http_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
http_port: constants::DEFAULT_HTTP_RPC_PORT,
|
||||
http_api: None,
|
||||
http_corsdomain: None,
|
||||
ws: false,
|
||||
ws_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
ws_port: constants::DEFAULT_WS_RPC_PORT,
|
||||
ws_allowed_origins: None,
|
||||
ws_api: None,
|
||||
ipcdisable: false,
|
||||
ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(),
|
||||
auth_addr: Ipv4Addr::LOCALHOST.into(),
|
||||
auth_port: constants::DEFAULT_AUTH_PORT,
|
||||
auth_jwtsecret: None,
|
||||
auth_ipc: false,
|
||||
auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(),
|
||||
rpc_jwtsecret: None,
|
||||
rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(),
|
||||
rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(),
|
||||
rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(),
|
||||
rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(),
|
||||
rpc_max_tracing_requests: constants::default_max_tracing_requests(),
|
||||
rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(),
|
||||
rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(),
|
||||
rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP,
|
||||
gas_price_oracle: GasPriceOracleArgs::default(),
|
||||
rpc_state_cache: RpcStateCacheArgs::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// clap value parser for [`RpcModuleSelection`].
|
||||
#[derive(Clone, Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
struct RpcModuleSelectionValueParser;
|
||||
|
||||
impl TypedValueParser for RpcModuleSelectionValueParser {
|
||||
type Value = RpcModuleSelection;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &Command,
|
||||
arg: Option<&Arg>,
|
||||
value: &OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let val =
|
||||
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
val.parse::<RpcModuleSelection>().map_err(|err| {
|
||||
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
|
||||
let possible_values = RethRpcModule::all_variant_names().to_vec().join(",");
|
||||
let msg = format!(
|
||||
"Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]"
|
||||
);
|
||||
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
|
||||
})
|
||||
}
|
||||
|
||||
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
|
||||
let values = RethRpcModule::all_variant_names().iter().map(PossibleValue::new);
|
||||
Some(Box::new(values))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::{Args, Parser};
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_args_parser() {
|
||||
let args =
|
||||
CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "eth,admin,debug"])
|
||||
.args;
|
||||
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected = RpcModuleSelection::try_from_selection(["eth", "admin", "debug"]).unwrap();
|
||||
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_eth_call_bundle_args() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from([
|
||||
"reth",
|
||||
"--http.api",
|
||||
"eth,admin,debug,eth-call-bundle",
|
||||
])
|
||||
.args;
|
||||
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected =
|
||||
RpcModuleSelection::try_from_selection(["eth", "admin", "debug", "eth-call-bundle"])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_server_args_parser_none() {
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "none"]).args;
|
||||
let apis = args.http_api.unwrap();
|
||||
let expected = RpcModuleSelection::Selection(Default::default());
|
||||
assert_eq!(apis, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_server_args_default_sanity_test() {
|
||||
let default_args = RpcServerArgs::default();
|
||||
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
|
||||
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
49
crates/node/core/src/args/rpc_state_cache.rs
Normal file
49
crates/node/core/src/args/rpc_state_cache.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
use clap::Args;
|
||||
use reth_rpc_server_types::constants::cache::{
|
||||
DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
};
|
||||
|
||||
/// Parameters to configure RPC state cache.
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "RPC State Cache")]
|
||||
pub struct RpcStateCacheArgs {
|
||||
/// Max number of blocks in cache.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-blocks",
|
||||
default_value_t = DEFAULT_BLOCK_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_blocks: u32,
|
||||
|
||||
/// Max number receipts in cache.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-receipts",
|
||||
default_value_t = DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_receipts: u32,
|
||||
|
||||
/// Max number of bytes for cached env data.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-envs",
|
||||
default_value_t = DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
)]
|
||||
pub max_envs: u32,
|
||||
|
||||
/// Max number of concurrent database requests.
|
||||
#[arg(
|
||||
long = "rpc-cache.max-concurrent-db-requests",
|
||||
default_value_t = DEFAULT_CONCURRENT_DB_REQUESTS,
|
||||
)]
|
||||
pub max_concurrent_db_requests: usize,
|
||||
}
|
||||
|
||||
impl Default for RpcStateCacheArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN,
|
||||
max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN,
|
||||
max_envs: DEFAULT_ENV_CACHE_MAX_LEN,
|
||||
max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS,
|
||||
}
|
||||
}
|
||||
}
|
||||
62
crates/node/core/src/args/secret_key.rs
Normal file
62
crates/node/core/src/args/secret_key.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use reth_fs_util::{self as fs, FsPathError};
|
||||
use reth_network::config::rng_secret_key;
|
||||
use reth_primitives::hex::encode as hex_encode;
|
||||
use secp256k1::{Error as SecretKeyBaseError, SecretKey};
|
||||
use std::{
|
||||
io,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors returned by loading a [`SecretKey`], including IO errors.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SecretKeyError {
|
||||
/// Error encountered during decoding of the secret key.
|
||||
#[error(transparent)]
|
||||
SecretKeyDecodeError(#[from] SecretKeyBaseError),
|
||||
|
||||
/// Error related to file system path operations.
|
||||
#[error(transparent)]
|
||||
SecretKeyFsPathError(#[from] FsPathError),
|
||||
|
||||
/// Represents an error when failed to access the key file.
|
||||
#[error("failed to access key file {secret_file:?}: {error}")]
|
||||
FailedToAccessKeyFile {
|
||||
/// The encountered IO error.
|
||||
error: io::Error,
|
||||
/// Path to the secret key file.
|
||||
secret_file: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
/// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it
|
||||
/// generates a secret key and stores it in the provided path. I/O errors might occur during write
|
||||
/// operations in the form of a [`SecretKeyError`]
|
||||
pub fn get_secret_key(secret_key_path: &Path) -> Result<SecretKey, SecretKeyError> {
|
||||
let exists = secret_key_path.try_exists();
|
||||
|
||||
match exists {
|
||||
Ok(true) => {
|
||||
let contents = fs::read_to_string(secret_key_path)?;
|
||||
Ok(contents
|
||||
.as_str()
|
||||
.parse::<SecretKey>()
|
||||
.map_err(SecretKeyError::SecretKeyDecodeError)?)
|
||||
}
|
||||
Ok(false) => {
|
||||
if let Some(dir) = secret_key_path.parent() {
|
||||
// Create parent directory
|
||||
fs::create_dir_all(dir)?;
|
||||
}
|
||||
|
||||
let secret = rng_secret_key();
|
||||
let hex = hex_encode(secret.as_ref());
|
||||
fs::write(secret_key_path, hex)?;
|
||||
Ok(secret)
|
||||
}
|
||||
Err(error) => Err(SecretKeyError::FailedToAccessKeyFile {
|
||||
error,
|
||||
secret_file: secret_key_path.to_path_buf(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
53
crates/node/core/src/args/stage.rs
Normal file
53
crates/node/core/src/args/stage.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
//! Shared arguments related to stages
|
||||
use derive_more::Display;
|
||||
|
||||
/// Represents a specific stage within the data pipeline.
|
||||
///
|
||||
/// Different stages within the pipeline have dedicated functionalities and operations.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, clap::ValueEnum, Display)]
|
||||
pub enum StageEnum {
|
||||
/// The headers stage within the pipeline.
|
||||
///
|
||||
/// This stage handles operations related to block headers.
|
||||
Headers,
|
||||
/// The bodies stage within the pipeline.
|
||||
///
|
||||
/// This stage deals with block bodies and their associated data.
|
||||
Bodies,
|
||||
/// The senders stage within the pipeline.
|
||||
///
|
||||
/// Responsible for sender-related processes and data recovery.
|
||||
Senders,
|
||||
/// The execution stage within the pipeline.
|
||||
///
|
||||
/// Handles the execution of transactions and contracts.
|
||||
Execution,
|
||||
/// The account hashing stage within the pipeline.
|
||||
///
|
||||
/// Manages operations related to hashing account data.
|
||||
AccountHashing,
|
||||
/// The storage hashing stage within the pipeline.
|
||||
///
|
||||
/// Manages operations related to hashing storage data.
|
||||
StorageHashing,
|
||||
/// The account and storage hashing stages within the pipeline.
|
||||
///
|
||||
/// Covers general data hashing operations.
|
||||
Hashing,
|
||||
/// The merkle stage within the pipeline.
|
||||
///
|
||||
/// Handles Merkle tree-related computations and data processing.
|
||||
Merkle,
|
||||
/// The transaction lookup stage within the pipeline.
|
||||
///
|
||||
/// Deals with the retrieval and processing of transactions.
|
||||
TxLookup,
|
||||
/// The account history stage within the pipeline.
|
||||
///
|
||||
/// Manages historical data related to accounts.
|
||||
AccountHistory,
|
||||
/// The storage history stage within the pipeline.
|
||||
///
|
||||
/// Manages historical data related to storage.
|
||||
StorageHistory,
|
||||
}
|
||||
141
crates/node/core/src/args/txpool.rs
Normal file
141
crates/node/core/src/args/txpool.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
//! Transaction pool arguments
|
||||
|
||||
use crate::cli::config::RethTransactionPoolConfig;
|
||||
use clap::Args;
|
||||
use reth_primitives::Address;
|
||||
use reth_transaction_pool::{
|
||||
blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, validate::DEFAULT_MAX_TX_INPUT_BYTES,
|
||||
LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP,
|
||||
REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
|
||||
TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
};
|
||||
/// Parameters for debugging purposes
|
||||
#[derive(Debug, Clone, Args, PartialEq, Eq)]
|
||||
#[command(next_help_heading = "TxPool")]
|
||||
pub struct TxPoolArgs {
|
||||
/// Max number of transaction in the pending sub-pool.
|
||||
#[arg(long = "txpool.pending-max-count", alias = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub pending_max_count: usize,
|
||||
/// Max size of the pending sub-pool in megabytes.
|
||||
#[arg(long = "txpool.pending-max-size", alias = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub pending_max_size: usize,
|
||||
|
||||
/// Max number of transaction in the basefee sub-pool
|
||||
#[arg(long = "txpool.basefee-max-count", alias = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub basefee_max_count: usize,
|
||||
/// Max size of the basefee sub-pool in megabytes.
|
||||
#[arg(long = "txpool.basefee-max-size", alias = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub basefee_max_size: usize,
|
||||
|
||||
/// Max number of transaction in the queued sub-pool
|
||||
#[arg(long = "txpool.queued-max-count", alias = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
|
||||
pub queued_max_count: usize,
|
||||
/// Max size of the queued sub-pool in megabytes.
|
||||
#[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
|
||||
pub queued_max_size: usize,
|
||||
|
||||
/// Max number of executable transaction slots guaranteed per account
|
||||
#[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)]
|
||||
pub max_account_slots: usize,
|
||||
|
||||
/// Price bump (in %) for the transaction pool underpriced check.
|
||||
#[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)]
|
||||
pub price_bump: u128,
|
||||
|
||||
/// Price bump percentage to replace an already existing blob transaction
|
||||
#[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)]
|
||||
pub blob_transaction_price_bump: u128,
|
||||
|
||||
/// Max size in bytes of a single transaction allowed to enter the pool
|
||||
#[arg(long = "txpool.max-tx-input-bytes", alias = "txpool.max_tx_input_bytes", default_value_t = DEFAULT_MAX_TX_INPUT_BYTES)]
|
||||
pub max_tx_input_bytes: usize,
|
||||
|
||||
/// The maximum number of blobs to keep in the in memory blob cache.
|
||||
#[arg(long = "txpool.max-cached-entries", alias = "txpool.max_cached_entries", default_value_t = DEFAULT_MAX_CACHED_BLOBS)]
|
||||
pub max_cached_entries: u32,
|
||||
|
||||
/// Flag to disable local transaction exemptions.
|
||||
#[arg(long = "txpool.nolocals")]
|
||||
pub no_locals: bool,
|
||||
/// Flag to allow certain addresses as local.
|
||||
#[arg(long = "txpool.locals")]
|
||||
pub locals: Vec<Address>,
|
||||
/// Flag to toggle local transaction propagation.
|
||||
#[arg(long = "txpool.no-local-transactions-propagation")]
|
||||
pub no_local_transactions_propagation: bool,
|
||||
}
|
||||
|
||||
impl Default for TxPoolArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pending_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
pending_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
basefee_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
|
||||
queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
|
||||
max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
|
||||
price_bump: DEFAULT_PRICE_BUMP,
|
||||
blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP,
|
||||
max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES,
|
||||
max_cached_entries: DEFAULT_MAX_CACHED_BLOBS,
|
||||
no_locals: false,
|
||||
locals: Default::default(),
|
||||
no_local_transactions_propagation: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RethTransactionPoolConfig for TxPoolArgs {
|
||||
/// Returns transaction pool configuration.
|
||||
fn pool_config(&self) -> PoolConfig {
|
||||
PoolConfig {
|
||||
local_transactions_config: LocalTransactionConfig {
|
||||
no_exemptions: self.no_locals,
|
||||
local_addresses: self.locals.clone().into_iter().collect(),
|
||||
propagate_local_transactions: !self.no_local_transactions_propagation,
|
||||
},
|
||||
pending_limit: SubPoolLimit {
|
||||
max_txs: self.pending_max_count,
|
||||
max_size: self.pending_max_size * 1024 * 1024,
|
||||
},
|
||||
basefee_limit: SubPoolLimit {
|
||||
max_txs: self.basefee_max_count,
|
||||
max_size: self.basefee_max_size * 1024 * 1024,
|
||||
},
|
||||
queued_limit: SubPoolLimit {
|
||||
max_txs: self.queued_max_count,
|
||||
max_size: self.queued_max_size * 1024 * 1024,
|
||||
},
|
||||
blob_limit: SubPoolLimit {
|
||||
max_txs: self.queued_max_count,
|
||||
max_size: self.queued_max_size * 1024 * 1024,
|
||||
},
|
||||
max_account_slots: self.max_account_slots,
|
||||
price_bumps: PriceBumpConfig {
|
||||
default_price_bump: self.price_bump,
|
||||
replace_blob_tx_price_bump: self.blob_transaction_price_bump,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
|
||||
/// A helper type to parse Args more easily
|
||||
#[derive(Parser)]
|
||||
struct CommandParser<T: Args> {
|
||||
#[command(flatten)]
|
||||
args: T,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn txpool_args_default_sanity_test() {
|
||||
let default_args = TxPoolArgs::default();
|
||||
let args = CommandParser::<TxPoolArgs>::parse_from(["reth"]).args;
|
||||
assert_eq!(args, default_args);
|
||||
}
|
||||
}
|
||||
119
crates/node/core/src/args/types.rs
Normal file
119
crates/node/core/src/args/types.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
//! Additional helper types for CLI parsing.
|
||||
|
||||
use std::{fmt, num::ParseIntError, str::FromStr};
|
||||
|
||||
/// A macro that generates types that maps "0" to "None" when parsing CLI arguments.
|
||||
macro_rules! zero_as_none {
|
||||
($type_name:ident, $inner_type:ty) => {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
/// A helper type that maps `0` to `None` when parsing CLI arguments.
|
||||
pub struct $type_name(pub Option<$inner_type>);
|
||||
|
||||
impl $type_name {
|
||||
/// Returns the inner value.
|
||||
pub const fn new(value: $inner_type) -> Self {
|
||||
Self(Some(value))
|
||||
}
|
||||
|
||||
/// Returns the inner value or `$inner_type::MAX` if `None`.
|
||||
pub fn unwrap_or_max(self) -> $inner_type {
|
||||
self.0.unwrap_or(<$inner_type>::MAX)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for $type_name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.0 {
|
||||
Some(value) => write!(f, "{}", value),
|
||||
None => write!(f, "0"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$inner_type> for $type_name {
|
||||
#[inline]
|
||||
fn from(value: $inner_type) -> Self {
|
||||
Self(if value == 0 { None } else { Some(value) })
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for $type_name {
|
||||
type Err = std::num::ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let value = s.parse::<$inner_type>()?;
|
||||
Ok(Self::from(value))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
zero_as_none!(ZeroAsNoneU64, u64);
|
||||
zero_as_none!(ZeroAsNoneU32, u32);
|
||||
|
||||
/// A macro that generates types that map "max" to "MAX" when parsing CLI arguments.
|
||||
macro_rules! max_values {
|
||||
($name:ident, $ty:ident) => {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
/// A helper type for parsing "max" as the maximum value of the specified type.
|
||||
|
||||
pub struct $name(pub $ty);
|
||||
|
||||
impl $name {
|
||||
/// Returns the inner value.
|
||||
pub const fn get(&self) -> $ty {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$ty> for $name {
|
||||
#[inline]
|
||||
fn from(value: $ty) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for $name {
|
||||
type Err = ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s.eq_ignore_ascii_case("max") {
|
||||
Ok($name(<$ty>::MAX))
|
||||
} else {
|
||||
s.parse::<$ty>().map($name)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
max_values!(MaxU32, u32);
|
||||
max_values!(MaxU64, u64);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_zero_parse() {
|
||||
let val = "0".parse::<ZeroAsNoneU64>().unwrap();
|
||||
assert_eq!(val, ZeroAsNoneU64(None));
|
||||
assert_eq!(val.unwrap_or_max(), u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_u64() {
|
||||
let original = 1u64;
|
||||
let expected = ZeroAsNoneU64(Some(1u64));
|
||||
assert_eq!(ZeroAsNoneU64::from(original), expected);
|
||||
|
||||
let original = 0u64;
|
||||
let expected = ZeroAsNoneU64(None);
|
||||
assert_eq!(ZeroAsNoneU64::from(original), expected);
|
||||
}
|
||||
}
|
||||
175
crates/node/core/src/args/utils.rs
Normal file
175
crates/node/core/src/args/utils.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
//! Clap parser utilities
|
||||
|
||||
use alloy_genesis::Genesis;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_fs_util as fs;
|
||||
use reth_primitives::{BlockHashOrNumber, B256};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs},
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use reth_chainspec::DEV;
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA};
|
||||
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
use reth_chainspec::{GOERLI, HOLESKY, MAINNET, SEPOLIA};
|
||||
|
||||
#[cfg(feature = "optimism")]
|
||||
/// Chains supported by op-reth. First value should be used as the default.
|
||||
pub const SUPPORTED_CHAINS: &[&str] = &["optimism", "optimism-sepolia", "base", "base-sepolia"];
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
/// Chains supported by reth. First value should be used as the default.
|
||||
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "goerli", "holesky", "dev"];
|
||||
|
||||
/// Helper to parse a [Duration] from seconds
|
||||
pub fn parse_duration_from_secs(arg: &str) -> eyre::Result<Duration, std::num::ParseIntError> {
|
||||
let seconds = arg.parse()?;
|
||||
Ok(Duration::from_secs(seconds))
|
||||
}
|
||||
|
||||
/// The help info for the --chain flag
|
||||
pub fn chain_help() -> String {
|
||||
format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", "))
|
||||
}
|
||||
|
||||
/// Clap value parser for [`ChainSpec`]s.
|
||||
///
|
||||
/// The value parser matches either a known chain, the path
|
||||
/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct.
|
||||
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error> {
|
||||
Ok(match s {
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"mainnet" => MAINNET.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"goerli" => GOERLI.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"sepolia" => SEPOLIA.clone(),
|
||||
#[cfg(not(feature = "optimism"))]
|
||||
"holesky" => HOLESKY.clone(),
|
||||
"dev" => DEV.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"optimism" => OP_MAINNET.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base" => BASE_MAINNET.clone(),
|
||||
#[cfg(feature = "optimism")]
|
||||
"base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(),
|
||||
_ => {
|
||||
// try to read json from path first
|
||||
let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) {
|
||||
Ok(raw) => raw,
|
||||
Err(io_err) => {
|
||||
// valid json may start with "\n", but must contain "{"
|
||||
if s.contains('{') {
|
||||
s.to_string()
|
||||
} else {
|
||||
return Err(io_err.into()) // assume invalid path
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// both serialized Genesis and ChainSpec structs supported
|
||||
let genesis: Genesis = serde_json::from_str(&raw)?;
|
||||
|
||||
Arc::new(genesis.into())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse [`BlockHashOrNumber`]
|
||||
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
|
||||
match B256::from_str(value) {
|
||||
Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)),
|
||||
Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Error thrown while parsing a socket address.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum SocketAddressParsingError {
|
||||
/// Failed to convert the string into a socket addr
|
||||
#[error("could not parse socket address: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
/// Input must not be empty
|
||||
#[error("cannot parse socket address from empty string")]
|
||||
Empty,
|
||||
/// Failed to parse the address
|
||||
#[error("could not parse socket address from {0}")]
|
||||
Parse(String),
|
||||
/// Failed to parse port
|
||||
#[error("could not parse port: {0}")]
|
||||
Port(#[from] std::num::ParseIntError),
|
||||
}
|
||||
|
||||
/// Parse a [`SocketAddr`] from a `str`.
|
||||
///
|
||||
/// The following formats are checked:
|
||||
///
|
||||
/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the
|
||||
/// hostname is set to `localhost`.
|
||||
/// - If the value contains `:` it is assumed to be the format `<host>:<port>`
|
||||
/// - Otherwise it is assumed to be a hostname
|
||||
///
|
||||
/// An error is returned if the value is empty.
|
||||
pub fn parse_socket_address(value: &str) -> eyre::Result<SocketAddr, SocketAddressParsingError> {
|
||||
if value.is_empty() {
|
||||
return Err(SocketAddressParsingError::Empty)
|
||||
}
|
||||
|
||||
if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) {
|
||||
let port: u16 = port.parse()?;
|
||||
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
|
||||
}
|
||||
if let Ok(port) = value.parse::<u16>() {
|
||||
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
|
||||
}
|
||||
value
|
||||
.to_socket_addrs()?
|
||||
.next()
|
||||
.ok_or_else(|| SocketAddressParsingError::Parse(value.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use proptest::prelude::Rng;
|
||||
use secp256k1::rand::thread_rng;
|
||||
|
||||
#[test]
|
||||
fn parse_known_chain_spec() {
|
||||
for chain in SUPPORTED_CHAINS {
|
||||
chain_value_parser(chain).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_socket_addresses() {
|
||||
for value in ["localhost:9000", ":9000", "9000"] {
|
||||
let socket_addr = parse_socket_address(value)
|
||||
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
|
||||
|
||||
assert!(socket_addr.ip().is_loopback());
|
||||
assert_eq!(socket_addr.port(), 9000);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_socket_address_random() {
|
||||
let port: u16 = thread_rng().gen();
|
||||
|
||||
for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] {
|
||||
let socket_addr = parse_socket_address(&value)
|
||||
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
|
||||
|
||||
assert!(socket_addr.ip().is_loopback());
|
||||
assert_eq!(socket_addr.port(), port);
|
||||
}
|
||||
}
|
||||
}
|
||||
67
crates/node/core/src/cli/config.rs
Normal file
67
crates/node/core/src/cli/config.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
//! Config traits for various node components.
|
||||
|
||||
use reth_network::protocol::IntoRlpxSubProtocol;
|
||||
use reth_primitives::Bytes;
|
||||
use reth_transaction_pool::PoolConfig;
|
||||
use std::{borrow::Cow, time::Duration};
|
||||
|
||||
/// A trait that provides payload builder settings.
|
||||
///
|
||||
/// This provides all basic payload builder settings and is implemented by the
|
||||
/// [`PayloadBuilderArgs`](crate::args::PayloadBuilderArgs) type.
|
||||
pub trait PayloadBuilderConfig {
|
||||
/// Block extra data set by the payload builder.
|
||||
fn extradata(&self) -> Cow<'_, str>;
|
||||
|
||||
/// Returns the extradata as bytes.
|
||||
fn extradata_bytes(&self) -> Bytes {
|
||||
self.extradata().as_bytes().to_vec().into()
|
||||
}
|
||||
|
||||
/// The interval at which the job should build a new payload after the last.
|
||||
fn interval(&self) -> Duration;
|
||||
|
||||
/// The deadline for when the payload builder job should resolve.
|
||||
fn deadline(&self) -> Duration;
|
||||
|
||||
/// Target gas ceiling for built blocks.
|
||||
fn max_gas_limit(&self) -> u64;
|
||||
|
||||
/// Maximum number of tasks to spawn for building a payload.
|
||||
fn max_payload_tasks(&self) -> usize;
|
||||
}
|
||||
|
||||
/// A trait that represents the configured network and can be used to apply additional configuration
|
||||
/// to the network.
|
||||
pub trait RethNetworkConfig {
|
||||
/// Adds a new additional protocol to the `RLPx` sub-protocol list.
|
||||
///
|
||||
/// These additional protocols are negotiated during the `RLPx` handshake.
|
||||
/// If both peers share the same protocol, the corresponding handler will be included alongside
|
||||
/// the `eth` protocol.
|
||||
///
|
||||
/// See also [`ProtocolHandler`](reth_network::protocol::ProtocolHandler)
|
||||
fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol);
|
||||
|
||||
/// Returns the secret key used for authenticating sessions.
|
||||
fn secret_key(&self) -> secp256k1::SecretKey;
|
||||
|
||||
// TODO add more network config methods here
|
||||
}
|
||||
|
||||
impl<C> RethNetworkConfig for reth_network::NetworkManager<C> {
|
||||
fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
|
||||
Self::add_rlpx_sub_protocol(self, protocol);
|
||||
}
|
||||
|
||||
fn secret_key(&self) -> secp256k1::SecretKey {
|
||||
self.secret_key()
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait that provides all basic config values for the transaction pool and is implemented by the
|
||||
/// [`TxPoolArgs`](crate::args::TxPoolArgs) type.
|
||||
pub trait RethTransactionPoolConfig {
|
||||
/// Returns transaction pool configuration.
|
||||
fn pool_config(&self) -> PoolConfig;
|
||||
}
|
||||
3
crates/node/core/src/cli/mod.rs
Normal file
3
crates/node/core/src/cli/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
//! Additional CLI configuration support.
|
||||
|
||||
pub mod config;
|
||||
398
crates/node/core/src/dirs.rs
Normal file
398
crates/node/core/src/dirs.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
//! reth data directories.
|
||||
|
||||
use crate::{args::DatadirArgs, utils::parse_path};
|
||||
use reth_chainspec::Chain;
|
||||
use std::{
|
||||
env::VarError,
|
||||
fmt::{Debug, Display, Formatter},
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
/// Constructs a string to be used as a path for configuration and db paths.
|
||||
pub fn config_path_prefix(chain: Chain) -> String {
|
||||
chain.to_string()
|
||||
}
|
||||
|
||||
/// Returns the path to the reth data directory.
|
||||
///
|
||||
/// Refer to [`dirs_next::data_dir`] for cross-platform behavior.
|
||||
pub fn data_dir() -> Option<PathBuf> {
|
||||
dirs_next::data_dir().map(|root| root.join("reth"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth database.
|
||||
///
|
||||
/// Refer to [`dirs_next::data_dir`] for cross-platform behavior.
|
||||
pub fn database_path() -> Option<PathBuf> {
|
||||
data_dir().map(|root| root.join("db"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth configuration directory.
|
||||
///
|
||||
/// Refer to [`dirs_next::config_dir`] for cross-platform behavior.
|
||||
pub fn config_dir() -> Option<PathBuf> {
|
||||
dirs_next::config_dir().map(|root| root.join("reth"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth cache directory.
|
||||
///
|
||||
/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior.
|
||||
pub fn cache_dir() -> Option<PathBuf> {
|
||||
dirs_next::cache_dir().map(|root| root.join("reth"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth logs directory.
|
||||
///
|
||||
/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior.
|
||||
pub fn logs_dir() -> Option<PathBuf> {
|
||||
cache_dir().map(|root| root.join("logs"))
|
||||
}
|
||||
|
||||
/// Returns the path to the reth data dir.
|
||||
///
|
||||
/// The data dir should contain a subdirectory for each chain, and those chain directories will
|
||||
/// include all information for that chain, such as the p2p secret.
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
#[non_exhaustive]
|
||||
pub struct DataDirPath;
|
||||
|
||||
impl XdgPath for DataDirPath {
|
||||
fn resolve() -> Option<PathBuf> {
|
||||
data_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the path to the reth logs directory.
|
||||
///
|
||||
/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior.
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
#[non_exhaustive]
|
||||
pub struct LogsDir;
|
||||
|
||||
impl XdgPath for LogsDir {
|
||||
fn resolve() -> Option<PathBuf> {
|
||||
logs_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// A small helper trait for unit structs that represent a standard path following the XDG
|
||||
/// path specification.
|
||||
pub trait XdgPath {
|
||||
/// Resolve the standard path.
|
||||
fn resolve() -> Option<PathBuf>;
|
||||
}
|
||||
|
||||
/// A wrapper type that either parses a user-given path or defaults to an
|
||||
/// OS-specific path.
|
||||
///
|
||||
/// The [`FromStr`] implementation supports shell expansions and common patterns such as `~` for the
|
||||
/// home directory.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use reth_node_core::dirs::{DataDirPath, PlatformPath};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// // Resolves to the platform-specific database path
|
||||
/// let default: PlatformPath<DataDirPath> = PlatformPath::default();
|
||||
/// // Resolves to `$(pwd)/my/path/to/datadir`
|
||||
/// let custom: PlatformPath<DataDirPath> = PlatformPath::from_str("my/path/to/datadir").unwrap();
|
||||
///
|
||||
/// assert_ne!(default.as_ref(), custom.as_ref());
|
||||
/// ```
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct PlatformPath<D>(PathBuf, std::marker::PhantomData<D>);
|
||||
|
||||
impl<D> Display for PlatformPath<D> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0.display())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Clone for PlatformPath<D> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone(), std::marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: XdgPath> Default for PlatformPath<D> {
|
||||
fn default() -> Self {
|
||||
Self(
|
||||
D::resolve().expect("Could not resolve default path. Set one manually."),
|
||||
std::marker::PhantomData,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> FromStr for PlatformPath<D> {
|
||||
type Err = shellexpand::LookupError<VarError>;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self(parse_path(s)?, std::marker::PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> AsRef<Path> for PlatformPath<D> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_path()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> From<PlatformPath<D>> for PathBuf {
|
||||
fn from(value: PlatformPath<D>) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> PlatformPath<D> {
|
||||
/// Returns the path joined with another path
|
||||
pub fn join<P: AsRef<Path>>(&self, path: P) -> Self {
|
||||
Self(self.0.join(path), std::marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> PlatformPath<D> {
|
||||
/// Converts the path to a `ChainPath` with the given `Chain`.
|
||||
pub fn with_chain(&self, chain: Chain, datadir_args: DatadirArgs) -> ChainPath<D> {
|
||||
// extract chain name
|
||||
let platform_path = self.platform_path_from_chain(chain);
|
||||
|
||||
ChainPath::new(platform_path, chain, datadir_args)
|
||||
}
|
||||
|
||||
fn platform_path_from_chain(&self, chain: Chain) -> Self {
|
||||
let chain_name = config_path_prefix(chain);
|
||||
let path = self.0.join(chain_name);
|
||||
Self(path, std::marker::PhantomData)
|
||||
}
|
||||
|
||||
/// Map the inner path to a new type `T`.
|
||||
pub fn map_to<T>(&self) -> PlatformPath<T> {
|
||||
PlatformPath(self.0.clone(), std::marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
/// An Optional wrapper type around [`PlatformPath`].
|
||||
///
|
||||
/// This is useful for when a path is optional, such as the `--data-dir` flag.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct MaybePlatformPath<D>(Option<PlatformPath<D>>);
|
||||
|
||||
// === impl MaybePlatformPath ===
|
||||
|
||||
impl<D: XdgPath> MaybePlatformPath<D> {
|
||||
/// Returns the path if it is set, otherwise returns the default path for the given chain.
|
||||
pub fn unwrap_or_chain_default(&self, chain: Chain, datadir_args: DatadirArgs) -> ChainPath<D> {
|
||||
ChainPath(
|
||||
self.0
|
||||
.clone()
|
||||
.unwrap_or_else(|| PlatformPath::default().platform_path_from_chain(chain)),
|
||||
chain,
|
||||
datadir_args,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the default platform path for the specified [Chain].
|
||||
pub fn chain_default(chain: Chain) -> ChainPath<D> {
|
||||
PlatformPath::default().with_chain(chain, DatadirArgs::default())
|
||||
}
|
||||
|
||||
/// Returns true if a custom path is set
|
||||
pub const fn is_some(&self) -> bool {
|
||||
self.0.is_some()
|
||||
}
|
||||
|
||||
/// Returns the path if it is set, otherwise returns `None`.
|
||||
pub fn as_ref(&self) -> Option<&Path> {
|
||||
self.0.as_ref().map(|p| p.as_ref())
|
||||
}
|
||||
|
||||
/// Returns the path if it is set, otherwise returns the default path, without any chain
|
||||
/// directory.
|
||||
pub fn unwrap_or_default(&self) -> PlatformPath<D> {
|
||||
self.0.clone().unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: XdgPath> Display for MaybePlatformPath<D> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
if let Some(path) = &self.0 {
|
||||
path.fmt(f)
|
||||
} else {
|
||||
// NOTE: this is a workaround for making it work with clap's `default_value_t` which
|
||||
// computes the default value via `Default -> Display -> FromStr`
|
||||
write!(f, "default")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Default for MaybePlatformPath<D> {
|
||||
fn default() -> Self {
|
||||
Self(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> FromStr for MaybePlatformPath<D> {
|
||||
type Err = shellexpand::LookupError<VarError>;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let p = match s {
|
||||
"default" => {
|
||||
// NOTE: this is a workaround for making it work with clap's `default_value_t` which
|
||||
// computes the default value via `Default -> Display -> FromStr`
|
||||
None
|
||||
}
|
||||
_ => Some(PlatformPath::from_str(s)?),
|
||||
};
|
||||
Ok(Self(p))
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> From<PathBuf> for MaybePlatformPath<D> {
|
||||
fn from(path: PathBuf) -> Self {
|
||||
Self(Some(PlatformPath(path, std::marker::PhantomData)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper type around `PlatformPath` that includes a `Chain`, used for separating reth data for
|
||||
/// different networks.
|
||||
///
|
||||
/// If the chain is either mainnet, goerli, or sepolia, then the path will be:
|
||||
/// * mainnet: `<DIR>/mainnet`
|
||||
/// * goerli: `<DIR>/goerli`
|
||||
/// * sepolia: `<DIR>/sepolia`
|
||||
///
|
||||
/// Otherwise, the path will be dependent on the chain ID:
|
||||
/// * `<DIR>/<CHAIN_ID>`
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ChainPath<D>(PlatformPath<D>, Chain, DatadirArgs);
|
||||
|
||||
impl<D> ChainPath<D> {
|
||||
/// Returns a new `ChainPath` given a `PlatformPath` and a `Chain`.
|
||||
pub const fn new(path: PlatformPath<D>, chain: Chain, datadir_args: DatadirArgs) -> Self {
|
||||
Self(path, chain, datadir_args)
|
||||
}
|
||||
|
||||
/// Returns the path to the reth data directory for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>`
|
||||
pub fn data_dir(&self) -> &Path {
|
||||
self.0.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the path to the db directory for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/db`
|
||||
pub fn db(&self) -> PathBuf {
|
||||
self.data_dir().join("db")
|
||||
}
|
||||
|
||||
/// Returns the path to the static files directory for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/static_files`
|
||||
pub fn static_files(&self) -> PathBuf {
|
||||
let datadir_args = &self.2;
|
||||
if let Some(static_files_path) = &datadir_args.static_files_path {
|
||||
static_files_path.to_path_buf()
|
||||
} else {
|
||||
self.data_dir().join("static_files")
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the path to the reth p2p secret key for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/discovery-secret`
|
||||
pub fn p2p_secret(&self) -> PathBuf {
|
||||
self.data_dir().join("discovery-secret")
|
||||
}
|
||||
|
||||
/// Returns the path to the known peers file for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/known-peers.json`
|
||||
pub fn known_peers(&self) -> PathBuf {
|
||||
self.data_dir().join("known-peers.json")
|
||||
}
|
||||
|
||||
/// Returns the path to the blobstore directory for this chain where blobs of unfinalized
|
||||
/// transactions are stored.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/blobstore`
|
||||
pub fn blobstore(&self) -> PathBuf {
|
||||
self.data_dir().join("blobstore")
|
||||
}
|
||||
|
||||
/// Returns the path to the local transactions backup file
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/txpool-transactions-backup.rlp`
|
||||
pub fn txpool_transactions(&self) -> PathBuf {
|
||||
self.data_dir().join("txpool-transactions-backup.rlp")
|
||||
}
|
||||
|
||||
/// Returns the path to the config file for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/reth.toml`
|
||||
pub fn config(&self) -> PathBuf {
|
||||
self.data_dir().join("reth.toml")
|
||||
}
|
||||
|
||||
/// Returns the path to the jwtsecret file for this chain.
|
||||
///
|
||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`
|
||||
pub fn jwt(&self) -> PathBuf {
|
||||
self.data_dir().join("jwt.hex")
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> AsRef<Path> for ChainPath<D> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Display for ChainPath<D> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> From<ChainPath<D>> for PathBuf {
|
||||
fn from(value: ChainPath<D>) -> Self {
|
||||
value.0.into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_maybe_data_dir_path() {
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::mainnet(), DatadirArgs::default());
|
||||
assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}");
|
||||
|
||||
let db_path = path.db();
|
||||
assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}");
|
||||
|
||||
let path = MaybePlatformPath::<DataDirPath>::from_str("my/path/to/datadir").unwrap();
|
||||
let path = path.unwrap_or_chain_default(Chain::mainnet(), DatadirArgs::default());
|
||||
assert!(path.as_ref().ends_with("my/path/to/datadir"), "{path:?}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maybe_testnet_datadir_path() {
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::goerli(), DatadirArgs::default());
|
||||
assert!(path.as_ref().ends_with("reth/goerli"), "{path:?}");
|
||||
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::holesky(), DatadirArgs::default());
|
||||
assert!(path.as_ref().ends_with("reth/holesky"), "{path:?}");
|
||||
|
||||
let path = MaybePlatformPath::<DataDirPath>::default();
|
||||
let path = path.unwrap_or_chain_default(Chain::sepolia(), DatadirArgs::default());
|
||||
assert!(path.as_ref().ends_with("reth/sepolia"), "{path:?}");
|
||||
}
|
||||
}
|
||||
90
crates/node/core/src/exit.rs
Normal file
90
crates/node/core/src/exit.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
//! Helper types for waiting for the node to exit.
|
||||
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use std::{
|
||||
fmt,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
/// A Future which resolves when the node exits
|
||||
pub struct NodeExitFuture {
|
||||
/// The consensus engine future.
|
||||
/// This can be polled to wait for the consensus engine to exit.
|
||||
consensus_engine_fut: Option<BoxFuture<'static, eyre::Result<()>>>,
|
||||
|
||||
/// Flag indicating whether the node should be terminated after the pipeline sync.
|
||||
terminate: bool,
|
||||
}
|
||||
|
||||
impl fmt::Debug for NodeExitFuture {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeExitFuture")
|
||||
.field("consensus_engine_fut", &"...")
|
||||
.field("terminate", &self.terminate)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeExitFuture {
|
||||
/// Create a new `NodeExitFuture`.
|
||||
pub fn new<F>(consensus_engine_fut: F, terminate: bool) -> Self
|
||||
where
|
||||
F: Future<Output = eyre::Result<()>> + 'static + Send,
|
||||
{
|
||||
Self { consensus_engine_fut: Some(Box::pin(consensus_engine_fut)), terminate }
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for NodeExitFuture {
|
||||
type Output = eyre::Result<()>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
if let Some(rx) = this.consensus_engine_fut.as_mut() {
|
||||
match ready!(rx.poll_unpin(cx)) {
|
||||
Ok(_) => {
|
||||
this.consensus_engine_fut.take();
|
||||
if this.terminate {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
Err(err) => Poll::Ready(Err(err)),
|
||||
}
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::future::poll_fn;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_node_exit_future_terminate_true() {
|
||||
let fut = async { Ok(()) };
|
||||
|
||||
let node_exit_future = NodeExitFuture::new(fut, true);
|
||||
|
||||
let res = node_exit_future.await;
|
||||
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_node_exit_future_terminate_false() {
|
||||
let fut = async { Ok(()) };
|
||||
|
||||
let mut node_exit_future = NodeExitFuture::new(fut, false);
|
||||
poll_fn(|cx| {
|
||||
assert!(node_exit_future.poll_unpin(cx).is_pending());
|
||||
Poll::Ready(())
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
53
crates/node/core/src/lib.rs
Normal file
53
crates/node/core/src/lib.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
//! The core of the Ethereum node. Collection of utilities and libraries that are used by the node.
|
||||
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
pub mod args;
|
||||
pub mod cli;
|
||||
pub mod dirs;
|
||||
pub mod exit;
|
||||
pub mod metrics;
|
||||
pub mod node_config;
|
||||
pub mod utils;
|
||||
pub mod version;
|
||||
|
||||
// Re-export for backwards compatibility.
|
||||
pub use metrics::prometheus_exporter;
|
||||
|
||||
/// Re-exported from `reth_primitives`.
|
||||
pub mod primitives {
|
||||
pub use reth_primitives::*;
|
||||
}
|
||||
|
||||
/// Re-export of `reth_rpc_*` crates.
|
||||
pub mod rpc {
|
||||
/// Re-exported from `reth_rpc_types`.
|
||||
pub mod types {
|
||||
pub use reth_rpc_types::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc_api`.
|
||||
pub mod api {
|
||||
pub use reth_rpc_api::*;
|
||||
}
|
||||
/// Re-exported from `reth_rpc::eth`.
|
||||
pub mod eth {
|
||||
pub use reth_rpc_eth_api::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc::rpc`.
|
||||
pub mod result {
|
||||
pub use reth_rpc_server_types::result::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_rpc::eth`.
|
||||
pub mod compat {
|
||||
pub use reth_rpc_types_compat::*;
|
||||
}
|
||||
}
|
||||
4
crates/node/core/src/metrics/mod.rs
Normal file
4
crates/node/core/src/metrics/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
//! Metrics utilities for the node.
|
||||
|
||||
pub mod prometheus_exporter;
|
||||
pub mod version_metrics;
|
||||
314
crates/node/core/src/metrics/prometheus_exporter.rs
Normal file
314
crates/node/core/src/metrics/prometheus_exporter.rs
Normal file
@@ -0,0 +1,314 @@
|
||||
//! Prometheus exporter
|
||||
|
||||
use crate::metrics::version_metrics::VersionInfo;
|
||||
use eyre::WrapErr;
|
||||
use futures::{future::FusedFuture, FutureExt};
|
||||
use http::Response;
|
||||
use metrics::describe_gauge;
|
||||
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
|
||||
use metrics_util::layers::{PrefixLayer, Stack};
|
||||
use reth_db_api::database_metrics::DatabaseMetrics;
|
||||
use reth_metrics::metrics::Unit;
|
||||
use reth_provider::providers::StaticFileProvider;
|
||||
use reth_tasks::TaskExecutor;
|
||||
use std::{convert::Infallible, net::SocketAddr, sync::Arc};
|
||||
|
||||
pub(crate) trait Hook: Fn() + Send + Sync {}
|
||||
impl<T: Fn() + Send + Sync> Hook for T {}
|
||||
|
||||
/// Installs Prometheus as the metrics recorder.
|
||||
pub fn install_recorder() -> eyre::Result<PrometheusHandle> {
|
||||
let recorder = PrometheusBuilder::new().build_recorder();
|
||||
let handle = recorder.handle();
|
||||
|
||||
// Build metrics stack
|
||||
Stack::new(recorder)
|
||||
.push(PrefixLayer::new("reth"))
|
||||
.install()
|
||||
.wrap_err("Couldn't set metrics recorder.")?;
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Serves Prometheus metrics over HTTP with hooks.
|
||||
///
|
||||
/// The hooks are called every time the metrics are requested at the given endpoint, and can be used
|
||||
/// to record values for pull-style metrics, i.e. metrics that are not automatically updated.
|
||||
pub(crate) async fn serve_with_hooks<F: Hook + 'static>(
|
||||
listen_addr: SocketAddr,
|
||||
handle: PrometheusHandle,
|
||||
hooks: impl IntoIterator<Item = F>,
|
||||
task_executor: TaskExecutor,
|
||||
) -> eyre::Result<()> {
|
||||
let hooks: Vec<_> = hooks.into_iter().collect();
|
||||
|
||||
// Start endpoint
|
||||
start_endpoint(
|
||||
listen_addr,
|
||||
handle,
|
||||
Arc::new(move || hooks.iter().for_each(|hook| hook())),
|
||||
task_executor,
|
||||
)
|
||||
.await
|
||||
.wrap_err("Could not start Prometheus endpoint")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Starts an endpoint at the given address to serve Prometheus metrics.
|
||||
async fn start_endpoint<F: Hook + 'static>(
|
||||
listen_addr: SocketAddr,
|
||||
handle: PrometheusHandle,
|
||||
hook: Arc<F>,
|
||||
task_executor: TaskExecutor,
|
||||
) -> eyre::Result<()> {
|
||||
let listener =
|
||||
tokio::net::TcpListener::bind(listen_addr).await.wrap_err("Could not bind to address")?;
|
||||
|
||||
task_executor.spawn_with_graceful_shutdown_signal(|signal| async move {
|
||||
let mut shutdown = signal.ignore_guard().fuse();
|
||||
loop {
|
||||
let io = match listener.accept().await {
|
||||
Ok((stream, _remote_addr)) => stream,
|
||||
Err(err) => {
|
||||
tracing::error!(%err, "failed to accept connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let handle = handle.clone();
|
||||
let hook = hook.clone();
|
||||
let service = tower::service_fn(move |_| {
|
||||
(hook)();
|
||||
let metrics = handle.render();
|
||||
async move { Ok::<_, Infallible>(Response::new(metrics)) }
|
||||
});
|
||||
|
||||
if let Err(error) =
|
||||
jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown).await
|
||||
{
|
||||
tracing::debug!(%error, "failed to serve request")
|
||||
}
|
||||
|
||||
if shutdown.is_terminated() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serves Prometheus metrics over HTTP with database and process metrics.
|
||||
pub async fn serve<Metrics>(
|
||||
listen_addr: SocketAddr,
|
||||
handle: PrometheusHandle,
|
||||
db: Metrics,
|
||||
static_file_provider: StaticFileProvider,
|
||||
process: metrics_process::Collector,
|
||||
task_executor: TaskExecutor,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Metrics: DatabaseMetrics + 'static + Send + Sync,
|
||||
{
|
||||
let db_metrics_hook = move || db.report_metrics();
|
||||
let static_file_metrics_hook = move || {
|
||||
let _ = static_file_provider.report_metrics().map_err(
|
||||
|error| tracing::error!(%error, "Failed to report static file provider metrics"),
|
||||
);
|
||||
};
|
||||
|
||||
// Clone `process` to move it into the hook and use the original `process` for describe below.
|
||||
let cloned_process = process.clone();
|
||||
let hooks: Vec<Box<dyn Hook<Output = ()>>> = vec![
|
||||
Box::new(db_metrics_hook),
|
||||
Box::new(static_file_metrics_hook),
|
||||
Box::new(move || cloned_process.collect()),
|
||||
Box::new(collect_memory_stats),
|
||||
Box::new(collect_io_stats),
|
||||
];
|
||||
serve_with_hooks(listen_addr, handle, hooks, task_executor).await?;
|
||||
|
||||
// We describe the metrics after the recorder is installed, otherwise this information is not
|
||||
// registered
|
||||
describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)");
|
||||
describe_gauge!("db.table_pages", "The number of database pages for a table");
|
||||
describe_gauge!("db.table_entries", "The number of entries for a table");
|
||||
describe_gauge!("db.freelist", "The number of pages on the freelist");
|
||||
describe_gauge!("db.page_size", Unit::Bytes, "The size of a database page (in bytes)");
|
||||
describe_gauge!(
|
||||
"db.timed_out_not_aborted_transactions",
|
||||
"Number of timed out transactions that were not aborted by the user yet"
|
||||
);
|
||||
|
||||
describe_gauge!("static_files.segment_size", Unit::Bytes, "The size of a static file segment");
|
||||
describe_gauge!("static_files.segment_files", "The number of files for a static file segment");
|
||||
describe_gauge!(
|
||||
"static_files.segment_entries",
|
||||
"The number of entries for a static file segment"
|
||||
);
|
||||
|
||||
process.describe();
|
||||
describe_memory_stats();
|
||||
describe_io_stats();
|
||||
VersionInfo::default().register_version_metrics();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "jemalloc", unix))]
|
||||
fn collect_memory_stats() {
|
||||
use metrics::gauge;
|
||||
use tikv_jemalloc_ctl::{epoch, stats};
|
||||
use tracing::error;
|
||||
|
||||
if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err()
|
||||
{
|
||||
return
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::active::read()
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.active"))
|
||||
{
|
||||
gauge!("jemalloc.active").set(value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::allocated::read()
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated"))
|
||||
{
|
||||
gauge!("jemalloc.allocated").set(value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::mapped::read()
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped"))
|
||||
{
|
||||
gauge!("jemalloc.mapped").set(value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::metadata::read()
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata"))
|
||||
{
|
||||
gauge!("jemalloc.metadata").set(value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::resident::read()
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident"))
|
||||
{
|
||||
gauge!("jemalloc.resident").set(value as f64);
|
||||
}
|
||||
|
||||
if let Ok(value) = stats::retained::read()
|
||||
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained"))
|
||||
{
|
||||
gauge!("jemalloc.retained").set(value as f64);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "jemalloc", unix))]
|
||||
fn describe_memory_stats() {
|
||||
describe_gauge!(
|
||||
"jemalloc.active",
|
||||
Unit::Bytes,
|
||||
"Total number of bytes in active pages allocated by the application"
|
||||
);
|
||||
describe_gauge!(
|
||||
"jemalloc.allocated",
|
||||
Unit::Bytes,
|
||||
"Total number of bytes allocated by the application"
|
||||
);
|
||||
describe_gauge!(
|
||||
"jemalloc.mapped",
|
||||
Unit::Bytes,
|
||||
"Total number of bytes in active extents mapped by the allocator"
|
||||
);
|
||||
describe_gauge!(
|
||||
"jemalloc.metadata",
|
||||
Unit::Bytes,
|
||||
"Total number of bytes dedicated to jemalloc metadata"
|
||||
);
|
||||
describe_gauge!(
|
||||
"jemalloc.resident",
|
||||
Unit::Bytes,
|
||||
"Total number of bytes in physically resident data pages mapped by the allocator"
|
||||
);
|
||||
describe_gauge!(
|
||||
"jemalloc.retained",
|
||||
Unit::Bytes,
|
||||
"Total number of bytes in virtual memory mappings that were retained rather than \
|
||||
being returned to the operating system via e.g. munmap(2)"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(all(feature = "jemalloc", unix)))]
|
||||
fn collect_memory_stats() {}
|
||||
|
||||
#[cfg(not(all(feature = "jemalloc", unix)))]
|
||||
fn describe_memory_stats() {}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn collect_io_stats() {
|
||||
use metrics::counter;
|
||||
use tracing::error;
|
||||
|
||||
let Ok(process) = procfs::process::Process::myself()
|
||||
.map_err(|error| error!(%error, "Failed to get currently running process"))
|
||||
else {
|
||||
return
|
||||
};
|
||||
|
||||
let Ok(io) = process.io().map_err(
|
||||
|error| error!(%error, "Failed to get IO stats for the currently running process"),
|
||||
) else {
|
||||
return
|
||||
};
|
||||
|
||||
counter!("io.rchar").absolute(io.rchar);
|
||||
counter!("io.wchar").absolute(io.wchar);
|
||||
counter!("io.syscr").absolute(io.syscr);
|
||||
counter!("io.syscw").absolute(io.syscw);
|
||||
counter!("io.read_bytes").absolute(io.read_bytes);
|
||||
counter!("io.write_bytes").absolute(io.write_bytes);
|
||||
counter!("io.cancelled_write_bytes").absolute(io.cancelled_write_bytes);
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn describe_io_stats() {
|
||||
use metrics::describe_counter;
|
||||
|
||||
describe_counter!("io.rchar", "Characters read");
|
||||
describe_counter!("io.wchar", "Characters written");
|
||||
describe_counter!("io.syscr", "Read syscalls");
|
||||
describe_counter!("io.syscw", "Write syscalls");
|
||||
describe_counter!("io.read_bytes", Unit::Bytes, "Bytes read");
|
||||
describe_counter!("io.write_bytes", Unit::Bytes, "Bytes written");
|
||||
describe_counter!("io.cancelled_write_bytes", Unit::Bytes, "Cancelled write bytes");
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
const fn collect_io_stats() {}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
const fn describe_io_stats() {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::node_config::PROMETHEUS_RECORDER_HANDLE;
|
||||
|
||||
// Dependencies using different version of the `metrics` crate (to be exact, 0.21 vs 0.22)
|
||||
// may not be able to communicate with each other through the global recorder.
|
||||
//
|
||||
// This test ensures that `metrics-process` dependency plays well with the current
|
||||
// `metrics-exporter-prometheus` dependency version.
|
||||
#[test]
|
||||
fn process_metrics() {
|
||||
// initialize the lazy handle
|
||||
let _ = &*PROMETHEUS_RECORDER_HANDLE;
|
||||
|
||||
let process = metrics_process::Collector::default();
|
||||
process.describe();
|
||||
process.collect();
|
||||
|
||||
let metrics = PROMETHEUS_RECORDER_HANDLE.render();
|
||||
assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}");
|
||||
}
|
||||
}
|
||||
50
crates/node/core/src/metrics/version_metrics.rs
Normal file
50
crates/node/core/src/metrics/version_metrics.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
//! This exposes reth's version information over prometheus.
|
||||
|
||||
use crate::version::{build_profile_name, VERGEN_GIT_SHA};
|
||||
use metrics::gauge;
|
||||
|
||||
/// Contains version information for the application.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VersionInfo {
|
||||
/// The version of the application.
|
||||
pub version: &'static str,
|
||||
/// The build timestamp of the application.
|
||||
pub build_timestamp: &'static str,
|
||||
/// The cargo features enabled for the build.
|
||||
pub cargo_features: &'static str,
|
||||
/// The Git SHA of the build.
|
||||
pub git_sha: &'static str,
|
||||
/// The target triple for the build.
|
||||
pub target_triple: &'static str,
|
||||
/// The build profile (e.g., debug or release).
|
||||
pub build_profile: &'static str,
|
||||
}
|
||||
|
||||
impl Default for VersionInfo {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
version: env!("CARGO_PKG_VERSION"),
|
||||
build_timestamp: env!("VERGEN_BUILD_TIMESTAMP"),
|
||||
cargo_features: env!("VERGEN_CARGO_FEATURES"),
|
||||
git_sha: VERGEN_GIT_SHA,
|
||||
target_triple: env!("VERGEN_CARGO_TARGET_TRIPLE"),
|
||||
build_profile: build_profile_name(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionInfo {
|
||||
/// This exposes reth's version information over prometheus.
|
||||
pub fn register_version_metrics(&self) {
|
||||
let labels: [(&str, &str); 6] = [
|
||||
("version", self.version),
|
||||
("build_timestamp", self.build_timestamp),
|
||||
("cargo_features", self.cargo_features),
|
||||
("git_sha", self.git_sha),
|
||||
("target_triple", self.target_triple),
|
||||
("build_profile", self.build_profile),
|
||||
];
|
||||
|
||||
let _gauge = gauge!("info", &labels);
|
||||
}
|
||||
}
|
||||
428
crates/node/core/src/node_config.rs
Normal file
428
crates/node/core/src/node_config.rs
Normal file
@@ -0,0 +1,428 @@
|
||||
//! Support for customizing the node
|
||||
|
||||
use crate::{
|
||||
args::{
|
||||
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs,
|
||||
PruningArgs, RpcServerArgs, TxPoolArgs,
|
||||
},
|
||||
dirs::{ChainPath, DataDirPath},
|
||||
metrics::prometheus_exporter,
|
||||
utils::get_single_header,
|
||||
};
|
||||
use metrics_exporter_prometheus::PrometheusHandle;
|
||||
use once_cell::sync::Lazy;
|
||||
use reth_chainspec::{ChainSpec, MAINNET};
|
||||
use reth_config::config::PruneConfig;
|
||||
use reth_db_api::{database::Database, database_metrics::DatabaseMetrics};
|
||||
use reth_network_p2p::headers::client::HeadersClient;
|
||||
use reth_primitives::{
|
||||
revm_primitives::EnvKzgSettings, BlockHashOrNumber, BlockNumber, Head, SealedHeader, B256,
|
||||
};
|
||||
use reth_provider::{
|
||||
providers::StaticFileProvider, BlockHashReader, HeaderProvider, ProviderFactory,
|
||||
StageCheckpointReader,
|
||||
};
|
||||
use reth_stages_types::StageId;
|
||||
use reth_storage_errors::provider::ProviderResult;
|
||||
use reth_tasks::TaskExecutor;
|
||||
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
|
||||
use tracing::*;
|
||||
|
||||
/// The default prometheus recorder handle. We use a global static to ensure that it is only
|
||||
/// installed once.
|
||||
pub static PROMETHEUS_RECORDER_HANDLE: Lazy<PrometheusHandle> =
|
||||
Lazy::new(|| prometheus_exporter::install_recorder().unwrap());
|
||||
|
||||
/// This includes all necessary configuration to launch the node.
|
||||
/// The individual configuration options can be overwritten before launching the node.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use reth_tasks::{TaskManager, TaskSpawner};
|
||||
/// # use reth_node_core::{
|
||||
/// # node_config::NodeConfig,
|
||||
/// # args::RpcServerArgs,
|
||||
/// # };
|
||||
/// # use reth_rpc_server_types::RpcModuleSelection;
|
||||
/// # use tokio::runtime::Handle;
|
||||
///
|
||||
/// async fn t() {
|
||||
/// let handle = Handle::current();
|
||||
/// let manager = TaskManager::new(handle);
|
||||
/// let executor = manager.executor();
|
||||
///
|
||||
/// // create the builder
|
||||
/// let builder = NodeConfig::default();
|
||||
///
|
||||
/// // configure the rpc apis
|
||||
/// let mut rpc = RpcServerArgs::default().with_http().with_ws();
|
||||
/// rpc.http_api = Some(RpcModuleSelection::All);
|
||||
/// let builder = builder.with_rpc(rpc);
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This can also be used to launch a node with a temporary test database. This can be done with
|
||||
/// the [`NodeConfig::test`] method.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use reth_tasks::{TaskManager, TaskSpawner};
|
||||
/// # use reth_node_core::{
|
||||
/// # node_config::NodeConfig,
|
||||
/// # args::RpcServerArgs,
|
||||
/// # };
|
||||
/// # use reth_rpc_server_types::RpcModuleSelection;
|
||||
/// # use tokio::runtime::Handle;
|
||||
///
|
||||
/// async fn t() {
|
||||
/// let handle = Handle::current();
|
||||
/// let manager = TaskManager::new(handle);
|
||||
/// let executor = manager.executor();
|
||||
///
|
||||
/// // create the builder with a test database, using the `test` method
|
||||
/// let builder = NodeConfig::test();
|
||||
///
|
||||
/// // configure the rpc apis
|
||||
/// let mut rpc = RpcServerArgs::default().with_http().with_ws();
|
||||
/// rpc.http_api = Some(RpcModuleSelection::All);
|
||||
/// let builder = builder.with_rpc(rpc);
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NodeConfig {
|
||||
/// All data directory related arguments
|
||||
pub datadir: DatadirArgs,
|
||||
|
||||
/// The path to the configuration file to use.
|
||||
pub config: Option<PathBuf>,
|
||||
|
||||
/// The chain this node is running.
|
||||
///
|
||||
/// Possible values are either a built-in chain or the path to a chain specification file.
|
||||
pub chain: Arc<ChainSpec>,
|
||||
|
||||
/// Enable Prometheus metrics.
|
||||
///
|
||||
/// The metrics will be served at the given interface and port.
|
||||
pub metrics: Option<SocketAddr>,
|
||||
|
||||
/// Add a new instance of a node.
|
||||
///
|
||||
/// Configures the ports of the node to avoid conflicts with the defaults.
|
||||
/// This is useful for running multiple nodes on the same machine.
|
||||
///
|
||||
/// Max number of instances is 200. It is chosen in a way so that it's not possible to have
|
||||
/// port numbers that conflict with each other.
|
||||
///
|
||||
/// Changes to the following port numbers:
|
||||
/// - `DISCOVERY_PORT`: default + `instance` - 1
|
||||
/// - `DISCOVERY_V5_PORT`: default + `instance` - 1
|
||||
/// - `AUTH_PORT`: default + `instance` * 100 - 100
|
||||
/// - `HTTP_RPC_PORT`: default - `instance` + 1
|
||||
/// - `WS_RPC_PORT`: default + `instance` * 2 - 2
|
||||
pub instance: u16,
|
||||
|
||||
/// All networking related arguments
|
||||
pub network: NetworkArgs,
|
||||
|
||||
/// All rpc related arguments
|
||||
pub rpc: RpcServerArgs,
|
||||
|
||||
/// All txpool related arguments with --txpool prefix
|
||||
pub txpool: TxPoolArgs,
|
||||
|
||||
/// All payload builder related arguments
|
||||
pub builder: PayloadBuilderArgs,
|
||||
|
||||
/// All debug related arguments with --debug prefix
|
||||
pub debug: DebugArgs,
|
||||
|
||||
/// All database related arguments
|
||||
pub db: DatabaseArgs,
|
||||
|
||||
/// All dev related arguments with --dev prefix
|
||||
pub dev: DevArgs,
|
||||
|
||||
/// All pruning related arguments
|
||||
pub pruning: PruningArgs,
|
||||
}
|
||||
|
||||
impl NodeConfig {
|
||||
/// Creates a testing [`NodeConfig`], causing the database to be launched ephemerally.
|
||||
pub fn test() -> Self {
|
||||
Self::default()
|
||||
// set all ports to zero by default for test instances
|
||||
.with_unused_ports()
|
||||
}
|
||||
|
||||
/// Sets --dev mode for the node
|
||||
pub const fn dev(mut self) -> Self {
|
||||
self.dev.dev = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the data directory args for the node
|
||||
pub fn with_datadir_args(mut self, datadir_args: DatadirArgs) -> Self {
|
||||
self.datadir = datadir_args;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the config file for the node
|
||||
pub fn with_config(mut self, config: impl Into<PathBuf>) -> Self {
|
||||
self.config = Some(config.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the [`ChainSpec`] for the node
|
||||
pub fn with_chain(mut self, chain: impl Into<Arc<ChainSpec>>) -> Self {
|
||||
self.chain = chain.into();
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the metrics address for the node
|
||||
pub const fn with_metrics(mut self, metrics: SocketAddr) -> Self {
|
||||
self.metrics = Some(metrics);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the instance for the node
|
||||
pub const fn with_instance(mut self, instance: u16) -> Self {
|
||||
self.instance = instance;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the network args for the node
|
||||
pub fn with_network(mut self, network: NetworkArgs) -> Self {
|
||||
self.network = network;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the rpc args for the node
|
||||
pub fn with_rpc(mut self, rpc: RpcServerArgs) -> Self {
|
||||
self.rpc = rpc;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the txpool args for the node
|
||||
pub fn with_txpool(mut self, txpool: TxPoolArgs) -> Self {
|
||||
self.txpool = txpool;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the builder args for the node
|
||||
pub fn with_payload_builder(mut self, builder: PayloadBuilderArgs) -> Self {
|
||||
self.builder = builder;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the debug args for the node
|
||||
pub fn with_debug(mut self, debug: DebugArgs) -> Self {
|
||||
self.debug = debug;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the database args for the node
|
||||
pub const fn with_db(mut self, db: DatabaseArgs) -> Self {
|
||||
self.db = db;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the dev args for the node
|
||||
pub const fn with_dev(mut self, dev: DevArgs) -> Self {
|
||||
self.dev = dev;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the pruning args for the node
|
||||
pub const fn with_pruning(mut self, pruning: PruningArgs) -> Self {
|
||||
self.pruning = pruning;
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns pruning configuration.
|
||||
pub fn prune_config(&self) -> Option<PruneConfig> {
|
||||
self.pruning.prune_config(&self.chain)
|
||||
}
|
||||
|
||||
/// Returns the max block that the node should run to, looking it up from the network if
|
||||
/// necessary
|
||||
pub async fn max_block<Provider, Client>(
|
||||
&self,
|
||||
network_client: Client,
|
||||
provider: Provider,
|
||||
) -> eyre::Result<Option<BlockNumber>>
|
||||
where
|
||||
Provider: HeaderProvider,
|
||||
Client: HeadersClient,
|
||||
{
|
||||
let max_block = if let Some(block) = self.debug.max_block {
|
||||
Some(block)
|
||||
} else if let Some(tip) = self.debug.tip {
|
||||
Some(self.lookup_or_fetch_tip(provider, network_client, tip).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(max_block)
|
||||
}
|
||||
|
||||
/// Loads '`EnvKzgSettings::Default`'
|
||||
pub const fn kzg_settings(&self) -> eyre::Result<EnvKzgSettings> {
|
||||
Ok(EnvKzgSettings::Default)
|
||||
}
|
||||
|
||||
/// Installs the prometheus recorder.
|
||||
pub fn install_prometheus_recorder(&self) -> eyre::Result<PrometheusHandle> {
|
||||
Ok(PROMETHEUS_RECORDER_HANDLE.clone())
|
||||
}
|
||||
|
||||
/// Serves the prometheus endpoint over HTTP with the given database and prometheus handle.
|
||||
pub async fn start_metrics_endpoint<Metrics>(
|
||||
&self,
|
||||
prometheus_handle: PrometheusHandle,
|
||||
db: Metrics,
|
||||
static_file_provider: StaticFileProvider,
|
||||
task_executor: TaskExecutor,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
Metrics: DatabaseMetrics + 'static + Send + Sync,
|
||||
{
|
||||
if let Some(listen_addr) = self.metrics {
|
||||
info!(target: "reth::cli", addr = %listen_addr, "Starting metrics endpoint");
|
||||
prometheus_exporter::serve(
|
||||
listen_addr,
|
||||
prometheus_handle,
|
||||
db,
|
||||
static_file_provider,
|
||||
metrics_process::Collector::default(),
|
||||
task_executor,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetches the head block from the database.
|
||||
///
|
||||
/// If the database is empty, returns the genesis block.
|
||||
pub fn lookup_head<DB: Database>(&self, factory: ProviderFactory<DB>) -> ProviderResult<Head> {
|
||||
let provider = factory.provider()?;
|
||||
|
||||
let head = provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number;
|
||||
|
||||
let header = provider
|
||||
.header_by_number(head)?
|
||||
.expect("the header for the latest block is missing, database is corrupt");
|
||||
|
||||
let total_difficulty = provider
|
||||
.header_td_by_number(head)?
|
||||
.expect("the total difficulty for the latest block is missing, database is corrupt");
|
||||
|
||||
let hash = provider
|
||||
.block_hash(head)?
|
||||
.expect("the hash for the latest block is missing, database is corrupt");
|
||||
|
||||
Ok(Head {
|
||||
number: head,
|
||||
hash,
|
||||
difficulty: header.difficulty,
|
||||
total_difficulty,
|
||||
timestamp: header.timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
/// Attempt to look up the block number for the tip hash in the database.
|
||||
/// If it doesn't exist, download the header and return the block number.
|
||||
///
|
||||
/// NOTE: The download is attempted with infinite retries.
|
||||
pub async fn lookup_or_fetch_tip<Provider, Client>(
|
||||
&self,
|
||||
provider: Provider,
|
||||
client: Client,
|
||||
tip: B256,
|
||||
) -> ProviderResult<u64>
|
||||
where
|
||||
Provider: HeaderProvider,
|
||||
Client: HeadersClient,
|
||||
{
|
||||
let header = provider.header_by_hash_or_number(tip.into())?;
|
||||
|
||||
// try to look up the header in the database
|
||||
if let Some(header) = header {
|
||||
info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database");
|
||||
return Ok(header.number)
|
||||
}
|
||||
|
||||
Ok(self.fetch_tip_from_network(client, tip.into()).await.number)
|
||||
}
|
||||
|
||||
/// Attempt to look up the block with the given number and return the header.
|
||||
///
|
||||
/// NOTE: The download is attempted with infinite retries.
|
||||
pub async fn fetch_tip_from_network<Client>(
|
||||
&self,
|
||||
client: Client,
|
||||
tip: BlockHashOrNumber,
|
||||
) -> SealedHeader
|
||||
where
|
||||
Client: HeadersClient,
|
||||
{
|
||||
info!(target: "reth::cli", ?tip, "Fetching tip block from the network.");
|
||||
let mut fetch_failures = 0;
|
||||
loop {
|
||||
match get_single_header(&client, tip).await {
|
||||
Ok(tip_header) => {
|
||||
info!(target: "reth::cli", ?tip, "Successfully fetched tip");
|
||||
return tip_header
|
||||
}
|
||||
Err(error) => {
|
||||
fetch_failures += 1;
|
||||
if fetch_failures % 20 == 0 {
|
||||
error!(target: "reth::cli", ?fetch_failures, %error, "Failed to fetch the tip. Retrying...");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Change rpc port numbers based on the instance number, using the inner
|
||||
/// [`RpcServerArgs::adjust_instance_ports`] method.
|
||||
pub fn adjust_instance_ports(&mut self) {
|
||||
self.rpc.adjust_instance_ports(self.instance);
|
||||
self.network.adjust_instance_ports(self.instance);
|
||||
}
|
||||
|
||||
/// Sets networking and RPC ports to zero, causing the OS to choose random unused ports when
|
||||
/// sockets are bound.
|
||||
pub fn with_unused_ports(mut self) -> Self {
|
||||
self.rpc = self.rpc.with_unused_ports();
|
||||
self.network = self.network.with_unused_ports();
|
||||
self
|
||||
}
|
||||
|
||||
/// Resolve the final datadir path.
|
||||
pub fn datadir(&self) -> ChainPath<DataDirPath> {
|
||||
self.datadir.clone().resolve_datadir(self.chain.chain)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NodeConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
config: None,
|
||||
chain: MAINNET.clone(),
|
||||
metrics: None,
|
||||
instance: 1,
|
||||
network: NetworkArgs::default(),
|
||||
rpc: RpcServerArgs::default(),
|
||||
txpool: TxPoolArgs::default(),
|
||||
builder: PayloadBuilderArgs::default(),
|
||||
debug: DebugArgs::default(),
|
||||
db: DatabaseArgs::default(),
|
||||
dev: DevArgs::default(),
|
||||
pruning: PruningArgs::default(),
|
||||
datadir: DatadirArgs::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
129
crates/node/core/src/utils.rs
Normal file
129
crates/node/core/src/utils.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
//! Utility functions for node startup and shutdown, for example path parsing and retrieving single
|
||||
//! blocks from the network.
|
||||
|
||||
use eyre::Result;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_consensus_common::validation::validate_block_pre_execution;
|
||||
use reth_fs_util as fs;
|
||||
use reth_network::NetworkManager;
|
||||
use reth_network_p2p::{
|
||||
bodies::client::BodiesClient,
|
||||
headers::client::{HeadersClient, HeadersRequest},
|
||||
priority::Priority,
|
||||
};
|
||||
use reth_primitives::{BlockHashOrNumber, HeadersDirection, SealedBlock, SealedHeader};
|
||||
use reth_provider::BlockReader;
|
||||
use reth_rpc_types::engine::{JwtError, JwtSecret};
|
||||
use std::{
|
||||
env::VarError,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing::{debug, info, trace, warn};
|
||||
|
||||
/// Parses a user-specified path with support for environment variables and common shorthands (e.g.
|
||||
/// ~ for the user's home directory).
|
||||
pub fn parse_path(value: &str) -> Result<PathBuf, shellexpand::LookupError<VarError>> {
|
||||
shellexpand::full(value).map(|path| PathBuf::from(path.into_owned()))
|
||||
}
|
||||
|
||||
/// Attempts to retrieve or create a JWT secret from the specified path.
|
||||
pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result<JwtSecret, JwtError> {
|
||||
if path.exists() {
|
||||
debug!(target: "reth::cli", ?path, "Reading JWT auth secret file");
|
||||
JwtSecret::from_file(path)
|
||||
} else {
|
||||
info!(target: "reth::cli", ?path, "Creating JWT auth secret file");
|
||||
JwtSecret::try_create_random(path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Collect the peers from the [`NetworkManager`] and write them to the given
|
||||
/// `persistent_peers_file`, if configured.
|
||||
pub fn write_peers_to_file<C>(network: &NetworkManager<C>, persistent_peers_file: Option<PathBuf>)
|
||||
where
|
||||
C: BlockReader + Unpin,
|
||||
{
|
||||
if let Some(file_path) = persistent_peers_file {
|
||||
let known_peers = network.all_peers().collect::<Vec<_>>();
|
||||
if let Ok(known_peers) = serde_json::to_string_pretty(&known_peers) {
|
||||
trace!(target: "reth::cli", peers_file =?file_path, num_peers=%known_peers.len(), "Saving current peers");
|
||||
let parent_dir = file_path.parent().map(fs::create_dir_all).transpose();
|
||||
match parent_dir.and_then(|_| fs::write(&file_path, known_peers)) {
|
||||
Ok(_) => {
|
||||
info!(target: "reth::cli", peers_file=?file_path, "Wrote network peers to file");
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "reth::cli", %err, peers_file=?file_path, "Failed to write network peers to file");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a single header from network
|
||||
pub async fn get_single_header<Client>(
|
||||
client: Client,
|
||||
id: BlockHashOrNumber,
|
||||
) -> Result<SealedHeader>
|
||||
where
|
||||
Client: HeadersClient,
|
||||
{
|
||||
let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id };
|
||||
|
||||
let (peer_id, response) =
|
||||
client.get_headers_with_priority(request, Priority::High).await?.split();
|
||||
|
||||
if response.len() != 1 {
|
||||
client.report_bad_message(peer_id);
|
||||
eyre::bail!("Invalid number of headers received. Expected: 1. Received: {}", response.len())
|
||||
}
|
||||
|
||||
let header = response.into_iter().next().unwrap().seal_slow();
|
||||
|
||||
let valid = match id {
|
||||
BlockHashOrNumber::Hash(hash) => header.hash() == hash,
|
||||
BlockHashOrNumber::Number(number) => header.number == number,
|
||||
};
|
||||
|
||||
if !valid {
|
||||
client.report_bad_message(peer_id);
|
||||
eyre::bail!(
|
||||
"Received invalid header. Received: {:?}. Expected: {:?}",
|
||||
header.num_hash(),
|
||||
id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
/// Get a body from network based on header
|
||||
pub async fn get_single_body<Client>(
|
||||
client: Client,
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
header: SealedHeader,
|
||||
) -> Result<SealedBlock>
|
||||
where
|
||||
Client: BodiesClient,
|
||||
{
|
||||
let (peer_id, response) = client.get_block_body(header.hash()).await?.split();
|
||||
|
||||
if response.is_none() {
|
||||
client.report_bad_message(peer_id);
|
||||
eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0")
|
||||
}
|
||||
|
||||
let block = response.unwrap();
|
||||
let block = SealedBlock {
|
||||
header,
|
||||
body: block.transactions,
|
||||
ommers: block.ommers,
|
||||
withdrawals: block.withdrawals,
|
||||
requests: block.requests,
|
||||
};
|
||||
|
||||
validate_block_pre_execution(&block, &chain_spec)?;
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
145
crates/node/core/src/version.rs
Normal file
145
crates/node/core/src/version.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
//! Version information for reth.
|
||||
use reth_db_api::models::ClientVersion;
|
||||
use reth_rpc_types::engine::ClientCode;
|
||||
|
||||
/// The client code for Reth
|
||||
pub const CLIENT_CODE: ClientCode = ClientCode::RH;
|
||||
|
||||
/// The human readable name of the client
|
||||
pub const NAME_CLIENT: &str = "Reth";
|
||||
|
||||
/// The latest version from Cargo.toml.
|
||||
pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// The full SHA of the latest commit.
|
||||
pub const VERGEN_GIT_SHA_LONG: &str = env!("VERGEN_GIT_SHA");
|
||||
|
||||
/// The 8 character short SHA of the latest commit.
|
||||
pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, ..8);
|
||||
|
||||
/// The build timestamp.
|
||||
pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP");
|
||||
|
||||
/// The short version information for reth.
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The short SHA of the latest commit.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```text
|
||||
/// 0.1.0 (defa64b2)
|
||||
/// ```
|
||||
pub const SHORT_VERSION: &str = const_format::concatcp!(
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
env!("RETH_VERSION_SUFFIX"),
|
||||
" (",
|
||||
VERGEN_GIT_SHA,
|
||||
")"
|
||||
);
|
||||
|
||||
/// The long version information for reth.
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The long SHA of the latest commit.
|
||||
/// - The build datetime
|
||||
/// - The build features
|
||||
/// - The build profile
|
||||
///
|
||||
/// # Example:
|
||||
///
|
||||
/// ```text
|
||||
/// Version: 0.1.0
|
||||
/// Commit SHA: defa64b2
|
||||
/// Build Timestamp: 2023-05-19T01:47:19.815651705Z
|
||||
/// Build Features: jemalloc
|
||||
/// Build Profile: maxperf
|
||||
/// ```
|
||||
pub const LONG_VERSION: &str = const_format::concatcp!(
|
||||
"Version: ",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
env!("RETH_VERSION_SUFFIX"),
|
||||
"\n",
|
||||
"Commit SHA: ",
|
||||
VERGEN_GIT_SHA_LONG,
|
||||
"\n",
|
||||
"Build Timestamp: ",
|
||||
env!("VERGEN_BUILD_TIMESTAMP"),
|
||||
"\n",
|
||||
"Build Features: ",
|
||||
env!("VERGEN_CARGO_FEATURES"),
|
||||
"\n",
|
||||
"Build Profile: ",
|
||||
build_profile_name()
|
||||
);
|
||||
|
||||
/// The version information for reth formatted for P2P (devp2p).
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The target triple
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```text
|
||||
/// reth/v{major}.{minor}.{patch}-{sha1}/{target}
|
||||
/// ```
|
||||
/// e.g.: `reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin`
|
||||
pub(crate) const P2P_CLIENT_VERSION: &str = const_format::concatcp!(
|
||||
"reth/v",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
"-",
|
||||
VERGEN_GIT_SHA,
|
||||
"/",
|
||||
env!("VERGEN_CARGO_TARGET_TRIPLE")
|
||||
);
|
||||
|
||||
/// The default extradata used for payload building.
|
||||
///
|
||||
/// - The latest version from Cargo.toml
|
||||
/// - The OS identifier
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```text
|
||||
/// reth/v{major}.{minor}.{patch}/{OS}
|
||||
/// ```
|
||||
pub fn default_extradata() -> String {
|
||||
format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS)
|
||||
}
|
||||
|
||||
/// The default client version accessing the database.
|
||||
pub fn default_client_version() -> ClientVersion {
|
||||
ClientVersion {
|
||||
version: CARGO_PKG_VERSION.to_string(),
|
||||
git_sha: VERGEN_GIT_SHA.to_string(),
|
||||
build_timestamp: VERGEN_BUILD_TIMESTAMP.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn build_profile_name() -> &'static str {
|
||||
// Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime
|
||||
// We split on the path separator of the *host* machine, which may be different from
|
||||
// `std::path::MAIN_SEPARATOR_STR`.
|
||||
const OUT_DIR: &str = env!("OUT_DIR");
|
||||
let unix_parts = const_format::str_split!(OUT_DIR, '/');
|
||||
if unix_parts.len() >= 4 {
|
||||
unix_parts[unix_parts.len() - 4]
|
||||
} else {
|
||||
let win_parts = const_format::str_split!(OUT_DIR, '\\');
|
||||
win_parts[win_parts.len() - 4]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn assert_extradata_less_32bytes() {
|
||||
let extradata = default_extradata();
|
||||
assert!(
|
||||
extradata.as_bytes().len() <= 32,
|
||||
"extradata must be less than 32 bytes: {extradata}"
|
||||
)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user