mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-01-11 00:08:13 -05:00
Merge branch 'main' into new-approach3
This commit is contained in:
@@ -15,3 +15,7 @@ slow-timeout = { period = "2m", terminate-after = 10 }
|
||||
[[profile.default.overrides]]
|
||||
filter = "binary(e2e_testsuite)"
|
||||
slow-timeout = { period = "2m", terminate-after = 3 }
|
||||
|
||||
[[profile.default.overrides]]
|
||||
filter = "package(reth-era) and binary(it)"
|
||||
slow-timeout = { period = "2m", terminate-after = 10 }
|
||||
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -40,5 +40,6 @@ crates/tasks/ @mattsse
|
||||
crates/tokio-util/ @fgimenez
|
||||
crates/transaction-pool/ @mattsse @yongkangc
|
||||
crates/trie/ @Rjected @shekhirin @mediocregopher
|
||||
bin/reth-bench-compare/ @mediocregopher @shekhirin @yongkangc
|
||||
etc/ @Rjected @shekhirin
|
||||
.github/ @gakonst @DaniPopes
|
||||
|
||||
2
.github/workflows/integration.yml
vendored
2
.github/workflows/integration.yml
vendored
@@ -78,4 +78,4 @@ jobs:
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: run era1 files integration tests
|
||||
run: cargo nextest run --package reth-era --test it -- --ignored
|
||||
run: cargo nextest run --release --package reth-era --test it -- --ignored
|
||||
|
||||
7
.github/workflows/lint.yml
vendored
7
.github/workflows/lint.yml
vendored
@@ -92,7 +92,12 @@ jobs:
|
||||
run: .github/assets/check_rv32imac.sh
|
||||
|
||||
crate-checks:
|
||||
name: crate-checks (${{ matrix.partition }}/${{ matrix.total_partitions }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
partition: [1, 2]
|
||||
total_partitions: [2]
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
@@ -102,7 +107,7 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- run: cargo hack check --workspace
|
||||
- run: cargo hack check --workspace --partition ${{ matrix.partition }}/${{ matrix.total_partitions }}
|
||||
|
||||
msrv:
|
||||
name: MSRV
|
||||
|
||||
614
Cargo.lock
generated
614
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
62
Cargo.toml
62
Cargo.toml
@@ -1,5 +1,5 @@
|
||||
[workspace.package]
|
||||
version = "1.9.2"
|
||||
version = "1.9.3"
|
||||
edition = "2024"
|
||||
rust-version = "1.88"
|
||||
license = "MIT OR Apache-2.0"
|
||||
@@ -483,7 +483,7 @@ revm-inspectors = "0.32.0"
|
||||
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] }
|
||||
alloy-chains = { version = "0.2.5", default-features = false }
|
||||
alloy-evm = { version = "0.23.1", default-features = false }
|
||||
alloy-evm = { version = "0.24.2", default-features = false }
|
||||
alloy-dyn-abi = "1.4.1"
|
||||
alloy-eip2124 = { version = "0.2.0", default-features = false }
|
||||
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
|
||||
@@ -493,35 +493,35 @@ alloy-trie = { version = "0.9.1", default-features = false }
|
||||
|
||||
alloy-hardforks = "0.4.4"
|
||||
|
||||
alloy-consensus = { version = "1.1.0", default-features = false }
|
||||
alloy-contract = { version = "1.1.0", default-features = false }
|
||||
alloy-eips = { version = "1.1.0", default-features = false }
|
||||
alloy-genesis = { version = "1.1.0", default-features = false }
|
||||
alloy-json-rpc = { version = "1.1.0", default-features = false }
|
||||
alloy-network = { version = "1.1.0", default-features = false }
|
||||
alloy-network-primitives = { version = "1.1.0", default-features = false }
|
||||
alloy-provider = { version = "1.1.0", features = ["reqwest"], default-features = false }
|
||||
alloy-pubsub = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-client = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types = { version = "1.1.0", features = ["eth"], default-features = false }
|
||||
alloy-rpc-types-admin = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-anvil = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-beacon = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-debug = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-engine = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-mev = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-trace = { version = "1.1.0", default-features = false }
|
||||
alloy-rpc-types-txpool = { version = "1.1.0", default-features = false }
|
||||
alloy-serde = { version = "1.1.0", default-features = false }
|
||||
alloy-signer = { version = "1.1.0", default-features = false }
|
||||
alloy-signer-local = { version = "1.1.0", default-features = false }
|
||||
alloy-transport = { version = "1.1.0" }
|
||||
alloy-transport-http = { version = "1.1.0", features = ["reqwest-rustls-tls"], default-features = false }
|
||||
alloy-transport-ipc = { version = "1.1.0", default-features = false }
|
||||
alloy-transport-ws = { version = "1.1.0", default-features = false }
|
||||
alloy-consensus = { version = "1.1.2", default-features = false }
|
||||
alloy-contract = { version = "1.1.2", default-features = false }
|
||||
alloy-eips = { version = "1.1.2", default-features = false }
|
||||
alloy-genesis = { version = "1.1.2", default-features = false }
|
||||
alloy-json-rpc = { version = "1.1.2", default-features = false }
|
||||
alloy-network = { version = "1.1.2", default-features = false }
|
||||
alloy-network-primitives = { version = "1.1.2", default-features = false }
|
||||
alloy-provider = { version = "1.1.2", features = ["reqwest"], default-features = false }
|
||||
alloy-pubsub = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-client = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types = { version = "1.1.2", features = ["eth"], default-features = false }
|
||||
alloy-rpc-types-admin = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-anvil = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-beacon = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-debug = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-engine = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-mev = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-trace = { version = "1.1.2", default-features = false }
|
||||
alloy-rpc-types-txpool = { version = "1.1.2", default-features = false }
|
||||
alloy-serde = { version = "1.1.2", default-features = false }
|
||||
alloy-signer = { version = "1.1.2", default-features = false }
|
||||
alloy-signer-local = { version = "1.1.2", default-features = false }
|
||||
alloy-transport = { version = "1.1.2" }
|
||||
alloy-transport-http = { version = "1.1.2", features = ["reqwest-rustls-tls"], default-features = false }
|
||||
alloy-transport-ipc = { version = "1.1.2", default-features = false }
|
||||
alloy-transport-ws = { version = "1.1.2", default-features = false }
|
||||
# op
|
||||
alloy-op-evm = { version = "0.23.1", default-features = false }
|
||||
alloy-op-evm = { version = "0.24.2", default-features = false }
|
||||
alloy-op-hardforks = "0.4.3"
|
||||
op-alloy-rpc-types = { version = "0.22.0", default-features = false }
|
||||
op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false }
|
||||
@@ -728,6 +728,8 @@ vergen = "9.0.4"
|
||||
visibility = "0.1.1"
|
||||
walkdir = "2.3.3"
|
||||
vergen-git2 = "1.0.5"
|
||||
# networking
|
||||
ipnet = "2.11"
|
||||
|
||||
[patch.crates-io]
|
||||
alloy-consensus = { git = "https://github.com/Soubhik-10/alloy", branch = "bal" }
|
||||
|
||||
@@ -103,7 +103,8 @@ impl BenchmarkRunner {
|
||||
cmd.args(["--wait-time", wait_time]);
|
||||
}
|
||||
|
||||
cmd.stdout(std::process::Stdio::piped())
|
||||
cmd.env("RUST_LOG_STYLE", "never")
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
@@ -190,7 +191,8 @@ impl BenchmarkRunner {
|
||||
cmd.args(["--wait-time", wait_time]);
|
||||
}
|
||||
|
||||
cmd.stdout(std::process::Stdio::piped())
|
||||
cmd.env("RUST_LOG_STYLE", "never")
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
|
||||
@@ -134,6 +134,16 @@ pub(crate) struct Args {
|
||||
#[command(flatten)]
|
||||
pub traces: TraceArgs,
|
||||
|
||||
/// Maximum queue size for OTLP Batch Span Processor (traces).
|
||||
/// Higher values prevent trace drops when benchmarking many blocks.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "OTLP_BUFFER_SIZE",
|
||||
default_value = "32768",
|
||||
help_heading = "Tracing"
|
||||
)]
|
||||
pub otlp_max_queue_size: usize,
|
||||
|
||||
/// Additional arguments to pass to baseline reth node command
|
||||
///
|
||||
/// Example: `--baseline-args "--debug.tip 0xabc..."`
|
||||
|
||||
@@ -6,6 +6,7 @@ use csv::Reader;
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
@@ -36,6 +37,7 @@ pub(crate) struct BenchmarkResults {
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub(crate) struct CombinedLatencyRow {
|
||||
pub block_number: u64,
|
||||
pub transaction_count: u64,
|
||||
pub gas_used: u64,
|
||||
pub new_payload_latency: u128,
|
||||
}
|
||||
@@ -44,17 +46,26 @@ pub(crate) struct CombinedLatencyRow {
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub(crate) struct TotalGasRow {
|
||||
pub block_number: u64,
|
||||
pub transaction_count: u64,
|
||||
pub gas_used: u64,
|
||||
pub time: u128,
|
||||
}
|
||||
|
||||
/// Summary statistics for a benchmark run
|
||||
/// Summary statistics for a benchmark run.
|
||||
///
|
||||
/// Latencies are derived from per-block `engine_newPayload` timings (converted from µs to ms):
|
||||
/// - `mean_new_payload_latency_ms`: arithmetic mean latency across blocks.
|
||||
/// - `median_new_payload_latency_ms`: p50 latency across blocks.
|
||||
/// - `p90_new_payload_latency_ms` / `p99_new_payload_latency_ms`: tail latencies across blocks.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub(crate) struct BenchmarkSummary {
|
||||
pub total_blocks: u64,
|
||||
pub total_gas_used: u64,
|
||||
pub total_duration_ms: u128,
|
||||
pub avg_new_payload_latency_ms: f64,
|
||||
pub mean_new_payload_latency_ms: f64,
|
||||
pub median_new_payload_latency_ms: f64,
|
||||
pub p90_new_payload_latency_ms: f64,
|
||||
pub p99_new_payload_latency_ms: f64,
|
||||
pub gas_per_second: f64,
|
||||
pub blocks_per_second: f64,
|
||||
pub min_block_number: u64,
|
||||
@@ -80,10 +91,26 @@ pub(crate) struct RefInfo {
|
||||
pub end_timestamp: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
/// Summary of the comparison between references
|
||||
/// Summary of the comparison between references.
|
||||
///
|
||||
/// Percent deltas are `(feature - baseline) / baseline * 100`:
|
||||
/// - `new_payload_latency_p50_change_percent` / p90 / p99: percent changes of the respective
|
||||
/// per-block percentiles.
|
||||
/// - `per_block_latency_change_mean_percent` / `per_block_latency_change_median_percent` are the
|
||||
/// mean and median of per-block percent deltas (feature vs baseline), capturing block-level
|
||||
/// drift.
|
||||
/// - `new_payload_total_latency_change_percent` is the percent change of the total newPayload time
|
||||
/// across the run.
|
||||
///
|
||||
/// Positive means slower/higher; negative means faster/lower.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct ComparisonSummary {
|
||||
pub new_payload_latency_change_percent: f64,
|
||||
pub per_block_latency_change_mean_percent: f64,
|
||||
pub per_block_latency_change_median_percent: f64,
|
||||
pub new_payload_total_latency_change_percent: f64,
|
||||
pub new_payload_latency_p50_change_percent: f64,
|
||||
pub new_payload_latency_p90_change_percent: f64,
|
||||
pub new_payload_latency_p99_change_percent: f64,
|
||||
pub gas_per_second_change_percent: f64,
|
||||
pub blocks_per_second_change_percent: f64,
|
||||
}
|
||||
@@ -92,6 +119,8 @@ pub(crate) struct ComparisonSummary {
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct BlockComparison {
|
||||
pub block_number: u64,
|
||||
pub transaction_count: u64,
|
||||
pub gas_used: u64,
|
||||
pub baseline_new_payload_latency: u128,
|
||||
pub feature_new_payload_latency: u128,
|
||||
pub new_payload_latency_change_percent: f64,
|
||||
@@ -184,10 +213,12 @@ impl ComparisonGenerator {
|
||||
let feature =
|
||||
self.feature_results.as_ref().ok_or_else(|| eyre!("Feature results not loaded"))?;
|
||||
|
||||
// Generate comparison
|
||||
let comparison_summary =
|
||||
self.calculate_comparison_summary(&baseline.summary, &feature.summary)?;
|
||||
let per_block_comparisons = self.calculate_per_block_comparisons(baseline, feature)?;
|
||||
let comparison_summary = self.calculate_comparison_summary(
|
||||
&baseline.summary,
|
||||
&feature.summary,
|
||||
&per_block_comparisons,
|
||||
)?;
|
||||
|
||||
let report = ComparisonReport {
|
||||
timestamp: self.timestamp.clone(),
|
||||
@@ -277,7 +308,11 @@ impl ComparisonGenerator {
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Calculate summary statistics for a benchmark run
|
||||
/// Calculate summary statistics for a benchmark run.
|
||||
///
|
||||
/// Computes latency statistics from per-block `new_payload_latency` values in `combined_data`
|
||||
/// (converting from µs to ms), and throughput metrics using the total run duration from
|
||||
/// `total_gas_data`. Percentiles (p50/p90/p99) use linear interpolation on sorted latencies.
|
||||
fn calculate_summary(
|
||||
&self,
|
||||
combined_data: &[CombinedLatencyRow],
|
||||
@@ -292,9 +327,16 @@ impl ComparisonGenerator {
|
||||
|
||||
let total_duration_ms = total_gas_data.last().unwrap().time / 1000; // Convert microseconds to milliseconds
|
||||
|
||||
let avg_new_payload_latency_ms: f64 =
|
||||
combined_data.iter().map(|r| r.new_payload_latency as f64 / 1000.0).sum::<f64>() /
|
||||
total_blocks as f64;
|
||||
let latencies_ms: Vec<f64> =
|
||||
combined_data.iter().map(|r| r.new_payload_latency as f64 / 1000.0).collect();
|
||||
let mean_new_payload_latency_ms: f64 =
|
||||
latencies_ms.iter().sum::<f64>() / total_blocks as f64;
|
||||
|
||||
let mut sorted_latencies_ms = latencies_ms;
|
||||
sorted_latencies_ms.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
|
||||
let median_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.5);
|
||||
let p90_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.9);
|
||||
let p99_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.99);
|
||||
|
||||
let total_duration_seconds = total_duration_ms as f64 / 1000.0;
|
||||
let gas_per_second = if total_duration_seconds > f64::EPSILON {
|
||||
@@ -316,7 +358,10 @@ impl ComparisonGenerator {
|
||||
total_blocks,
|
||||
total_gas_used,
|
||||
total_duration_ms,
|
||||
avg_new_payload_latency_ms,
|
||||
mean_new_payload_latency_ms,
|
||||
median_new_payload_latency_ms,
|
||||
p90_new_payload_latency_ms,
|
||||
p99_new_payload_latency_ms,
|
||||
gas_per_second,
|
||||
blocks_per_second,
|
||||
min_block_number,
|
||||
@@ -329,6 +374,7 @@ impl ComparisonGenerator {
|
||||
&self,
|
||||
baseline: &BenchmarkSummary,
|
||||
feature: &BenchmarkSummary,
|
||||
per_block_comparisons: &[BlockComparison],
|
||||
) -> Result<ComparisonSummary> {
|
||||
let calc_percent_change = |baseline: f64, feature: f64| -> f64 {
|
||||
if baseline.abs() > f64::EPSILON {
|
||||
@@ -338,10 +384,43 @@ impl ComparisonGenerator {
|
||||
}
|
||||
};
|
||||
|
||||
let per_block_percent_changes: Vec<f64> =
|
||||
per_block_comparisons.iter().map(|c| c.new_payload_latency_change_percent).collect();
|
||||
let per_block_latency_change_mean_percent = if per_block_percent_changes.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
per_block_percent_changes.iter().sum::<f64>() / per_block_percent_changes.len() as f64
|
||||
};
|
||||
let per_block_latency_change_median_percent = if per_block_percent_changes.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
let mut sorted = per_block_percent_changes;
|
||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
|
||||
percentile(&sorted, 0.5)
|
||||
};
|
||||
|
||||
let baseline_total_latency_ms =
|
||||
baseline.mean_new_payload_latency_ms * baseline.total_blocks as f64;
|
||||
let feature_total_latency_ms =
|
||||
feature.mean_new_payload_latency_ms * feature.total_blocks as f64;
|
||||
let new_payload_total_latency_change_percent =
|
||||
calc_percent_change(baseline_total_latency_ms, feature_total_latency_ms);
|
||||
|
||||
Ok(ComparisonSummary {
|
||||
new_payload_latency_change_percent: calc_percent_change(
|
||||
baseline.avg_new_payload_latency_ms,
|
||||
feature.avg_new_payload_latency_ms,
|
||||
per_block_latency_change_mean_percent,
|
||||
per_block_latency_change_median_percent,
|
||||
new_payload_total_latency_change_percent,
|
||||
new_payload_latency_p50_change_percent: calc_percent_change(
|
||||
baseline.median_new_payload_latency_ms,
|
||||
feature.median_new_payload_latency_ms,
|
||||
),
|
||||
new_payload_latency_p90_change_percent: calc_percent_change(
|
||||
baseline.p90_new_payload_latency_ms,
|
||||
feature.p90_new_payload_latency_ms,
|
||||
),
|
||||
new_payload_latency_p99_change_percent: calc_percent_change(
|
||||
baseline.p99_new_payload_latency_ms,
|
||||
feature.p99_new_payload_latency_ms,
|
||||
),
|
||||
gas_per_second_change_percent: calc_percent_change(
|
||||
baseline.gas_per_second,
|
||||
@@ -378,6 +457,8 @@ impl ComparisonGenerator {
|
||||
|
||||
let comparison = BlockComparison {
|
||||
block_number: feature_row.block_number,
|
||||
transaction_count: feature_row.transaction_count,
|
||||
gas_used: feature_row.gas_used,
|
||||
baseline_new_payload_latency: baseline_row.new_payload_latency,
|
||||
feature_new_payload_latency: feature_row.new_payload_latency,
|
||||
new_payload_latency_change_percent: calc_percent_change(
|
||||
@@ -443,9 +524,38 @@ impl ComparisonGenerator {
|
||||
let summary = &report.comparison_summary;
|
||||
|
||||
println!("Performance Changes:");
|
||||
println!(" NewPayload Latency: {:+.2}%", summary.new_payload_latency_change_percent);
|
||||
println!(" Gas/Second: {:+.2}%", summary.gas_per_second_change_percent);
|
||||
println!(" Blocks/Second: {:+.2}%", summary.blocks_per_second_change_percent);
|
||||
println!(
|
||||
" NewPayload Latency per-block mean change: {:+.2}%",
|
||||
summary.per_block_latency_change_mean_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency per-block median change: {:+.2}%",
|
||||
summary.per_block_latency_change_median_percent
|
||||
);
|
||||
println!(
|
||||
" Total newPayload time change: {:+.2}%",
|
||||
summary.new_payload_total_latency_change_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency p50: {:+.2}%",
|
||||
summary.new_payload_latency_p50_change_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency p90: {:+.2}%",
|
||||
summary.new_payload_latency_p90_change_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency p99: {:+.2}%",
|
||||
summary.new_payload_latency_p99_change_percent
|
||||
);
|
||||
println!(
|
||||
" Gas/Second: {:+.2}%",
|
||||
summary.gas_per_second_change_percent
|
||||
);
|
||||
println!(
|
||||
" Blocks/Second: {:+.2}%",
|
||||
summary.blocks_per_second_change_percent
|
||||
);
|
||||
println!();
|
||||
|
||||
println!("Baseline Summary:");
|
||||
@@ -458,7 +568,14 @@ impl ComparisonGenerator {
|
||||
baseline.total_gas_used,
|
||||
baseline.total_duration_ms as f64 / 1000.0
|
||||
);
|
||||
println!(" Avg NewPayload: {:.2}ms", baseline.avg_new_payload_latency_ms);
|
||||
println!(" NewPayload latency (ms):");
|
||||
println!(
|
||||
" mean: {:.2}, p50: {:.2}, p90: {:.2}, p99: {:.2}",
|
||||
baseline.mean_new_payload_latency_ms,
|
||||
baseline.median_new_payload_latency_ms,
|
||||
baseline.p90_new_payload_latency_ms,
|
||||
baseline.p99_new_payload_latency_ms
|
||||
);
|
||||
if let (Some(start), Some(end)) =
|
||||
(&report.baseline.start_timestamp, &report.baseline.end_timestamp)
|
||||
{
|
||||
@@ -480,7 +597,14 @@ impl ComparisonGenerator {
|
||||
feature.total_gas_used,
|
||||
feature.total_duration_ms as f64 / 1000.0
|
||||
);
|
||||
println!(" Avg NewPayload: {:.2}ms", feature.avg_new_payload_latency_ms);
|
||||
println!(" NewPayload latency (ms):");
|
||||
println!(
|
||||
" mean: {:.2}, p50: {:.2}, p90: {:.2}, p99: {:.2}",
|
||||
feature.mean_new_payload_latency_ms,
|
||||
feature.median_new_payload_latency_ms,
|
||||
feature.p90_new_payload_latency_ms,
|
||||
feature.p99_new_payload_latency_ms
|
||||
);
|
||||
if let (Some(start), Some(end)) =
|
||||
(&report.feature.start_timestamp, &report.feature.end_timestamp)
|
||||
{
|
||||
@@ -493,3 +617,29 @@ impl ComparisonGenerator {
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate percentile using linear interpolation on a sorted slice.
|
||||
///
|
||||
/// Computes `rank = percentile × (n - 1)` where n is the array length. If the rank falls
|
||||
/// between two indices, linearly interpolates between those values. For example, with 100 values,
|
||||
/// p90 computes rank = 0.9 × 99 = 89.1, then returns `values[89] × 0.9 + values[90] × 0.1`.
|
||||
///
|
||||
/// Returns 0.0 for empty input.
|
||||
fn percentile(sorted_values: &[f64], percentile: f64) -> f64 {
|
||||
if sorted_values.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
let clamped = percentile.clamp(0.0, 1.0);
|
||||
let max_index = sorted_values.len() - 1;
|
||||
let rank = clamped * max_index as f64;
|
||||
let lower = rank.floor() as usize;
|
||||
let upper = rank.ceil() as usize;
|
||||
|
||||
if lower == upper {
|
||||
sorted_values[lower]
|
||||
} else {
|
||||
let weight = rank - lower as f64;
|
||||
sorted_values[lower].mul_add(1.0 - weight, sorted_values[upper] * weight)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ pub(crate) struct NodeManager {
|
||||
additional_reth_args: Vec<String>,
|
||||
comparison_dir: Option<PathBuf>,
|
||||
tracing_endpoint: Option<String>,
|
||||
otlp_max_queue_size: usize,
|
||||
}
|
||||
|
||||
impl NodeManager {
|
||||
@@ -46,6 +47,7 @@ impl NodeManager {
|
||||
additional_reth_args: args.reth_args.clone(),
|
||||
comparison_dir: None,
|
||||
tracing_endpoint: args.traces.otlp.as_ref().map(|u| u.to_string()),
|
||||
otlp_max_queue_size: args.otlp_max_queue_size,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,6 +205,9 @@ impl NodeManager {
|
||||
cmd.arg("--");
|
||||
cmd.args(reth_args);
|
||||
|
||||
// Set environment variable to disable log styling
|
||||
cmd.env("RUST_LOG_STYLE", "never");
|
||||
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
@@ -210,17 +215,22 @@ impl NodeManager {
|
||||
fn create_direct_command(&self, reth_args: &[String]) -> Command {
|
||||
let binary_path = &reth_args[0];
|
||||
|
||||
if self.use_sudo {
|
||||
let mut cmd = if self.use_sudo {
|
||||
info!("Starting reth node with sudo...");
|
||||
let mut cmd = Command::new("sudo");
|
||||
cmd.args(reth_args);
|
||||
cmd
|
||||
let mut sudo_cmd = Command::new("sudo");
|
||||
sudo_cmd.args(reth_args);
|
||||
sudo_cmd
|
||||
} else {
|
||||
info!("Starting reth node...");
|
||||
let mut cmd = Command::new(binary_path);
|
||||
cmd.args(&reth_args[1..]); // Skip the binary path since it's the command
|
||||
cmd
|
||||
}
|
||||
let mut reth_cmd = Command::new(binary_path);
|
||||
reth_cmd.args(&reth_args[1..]); // Skip the binary path since it's the command
|
||||
reth_cmd
|
||||
};
|
||||
|
||||
// Set environment variable to disable log styling
|
||||
cmd.env("RUST_LOG_STYLE", "never");
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
/// Start a reth node using the specified binary path and return the process handle
|
||||
@@ -259,7 +269,9 @@ impl NodeManager {
|
||||
|
||||
// Set high queue size to prevent trace dropping during benchmarks
|
||||
if self.tracing_endpoint.is_some() {
|
||||
cmd.env("OTEL_BLRP_MAX_QUEUE_SIZE", "10000");
|
||||
cmd.env("OTEL_BSP_MAX_QUEUE_SIZE", self.otlp_max_queue_size.to_string()); // Traces
|
||||
cmd.env("OTEL_BLRP_MAX_QUEUE_SIZE", "10000"); // Logs
|
||||
|
||||
// Set service name to differentiate baseline vs feature runs in Jaeger
|
||||
cmd.env("OTEL_SERVICE_NAME", format!("reth-{}", ref_type));
|
||||
}
|
||||
@@ -485,6 +497,9 @@ impl NodeManager {
|
||||
|
||||
cmd.args(["to-block", &block_number.to_string()]);
|
||||
|
||||
// Set environment variable to disable log styling
|
||||
cmd.env("RUST_LOG_STYLE", "never");
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing reth unwind command: {:?}", cmd);
|
||||
|
||||
|
||||
@@ -79,22 +79,13 @@ impl Command {
|
||||
break;
|
||||
}
|
||||
};
|
||||
let header = block.header.clone();
|
||||
|
||||
let (version, params) = match block_to_new_payload(block, is_optimism) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to convert block to new payload: {e}");
|
||||
let _ = error_sender.send(e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
let head_block_hash = header.hash;
|
||||
let safe_block_hash =
|
||||
block_provider.get_block_by_number(header.number.saturating_sub(32).into());
|
||||
let head_block_hash = block.header.hash;
|
||||
let safe_block_hash = block_provider
|
||||
.get_block_by_number(block.header.number.saturating_sub(32).into());
|
||||
|
||||
let finalized_block_hash =
|
||||
block_provider.get_block_by_number(header.number.saturating_sub(64).into());
|
||||
let finalized_block_hash = block_provider
|
||||
.get_block_by_number(block.header.number.saturating_sub(64).into());
|
||||
|
||||
let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,);
|
||||
|
||||
@@ -110,14 +101,7 @@ impl Command {
|
||||
|
||||
next_block += 1;
|
||||
if let Err(e) = sender
|
||||
.send((
|
||||
header,
|
||||
version,
|
||||
params,
|
||||
head_block_hash,
|
||||
safe_block_hash,
|
||||
finalized_block_hash,
|
||||
))
|
||||
.send((block, head_block_hash, safe_block_hash, finalized_block_hash))
|
||||
.await
|
||||
{
|
||||
tracing::error!("Failed to send block data: {e}");
|
||||
@@ -131,15 +115,16 @@ impl Command {
|
||||
let total_benchmark_duration = Instant::now();
|
||||
let mut total_wait_time = Duration::ZERO;
|
||||
|
||||
while let Some((header, version, params, head, safe, finalized)) = {
|
||||
while let Some((block, head, safe, finalized)) = {
|
||||
let wait_start = Instant::now();
|
||||
let result = receiver.recv().await;
|
||||
total_wait_time += wait_start.elapsed();
|
||||
result
|
||||
} {
|
||||
// just put gas used here
|
||||
let gas_used = header.gas_used;
|
||||
let block_number = header.number;
|
||||
let gas_used = block.header.gas_used;
|
||||
let block_number = block.header.number;
|
||||
let transaction_count = block.transactions.len() as u64;
|
||||
|
||||
debug!(target: "reth-bench", ?block_number, "Sending payload",);
|
||||
|
||||
@@ -150,6 +135,7 @@ impl Command {
|
||||
finalized_block_hash: finalized,
|
||||
};
|
||||
|
||||
let (version, params) = block_to_new_payload(block, is_optimism)?;
|
||||
let start = Instant::now();
|
||||
call_new_payload(&auth_provider, version, params).await?;
|
||||
|
||||
@@ -160,8 +146,13 @@ impl Command {
|
||||
// calculate the total duration and the fcu latency, record
|
||||
let total_latency = start.elapsed();
|
||||
let fcu_latency = total_latency - new_payload_result.latency;
|
||||
let combined_result =
|
||||
CombinedResult { block_number, new_payload_result, fcu_latency, total_latency };
|
||||
let combined_result = CombinedResult {
|
||||
block_number,
|
||||
transaction_count,
|
||||
new_payload_result,
|
||||
fcu_latency,
|
||||
total_latency,
|
||||
};
|
||||
|
||||
// current duration since the start of the benchmark minus the time
|
||||
// waiting for blocks
|
||||
@@ -174,7 +165,8 @@ impl Command {
|
||||
tokio::time::sleep(self.wait_time).await;
|
||||
|
||||
// record the current result
|
||||
let gas_row = TotalGasRow { block_number, gas_used, time: current_duration };
|
||||
let gas_row =
|
||||
TotalGasRow { block_number, transaction_count, gas_used, time: current_duration };
|
||||
results.push((gas_row, combined_result));
|
||||
}
|
||||
|
||||
|
||||
@@ -72,19 +72,9 @@ impl Command {
|
||||
break;
|
||||
}
|
||||
};
|
||||
let header = block.header.clone();
|
||||
|
||||
let (version, params) = match block_to_new_payload(block, is_optimism) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to convert block to new payload: {e}");
|
||||
let _ = error_sender.send(e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
next_block += 1;
|
||||
if let Err(e) = sender.send((header, version, params)).await {
|
||||
if let Err(e) = sender.send(block).await {
|
||||
tracing::error!("Failed to send block data: {e}");
|
||||
break;
|
||||
}
|
||||
@@ -96,23 +86,24 @@ impl Command {
|
||||
let total_benchmark_duration = Instant::now();
|
||||
let mut total_wait_time = Duration::ZERO;
|
||||
|
||||
while let Some((header, version, params)) = {
|
||||
while let Some(block) = {
|
||||
let wait_start = Instant::now();
|
||||
let result = receiver.recv().await;
|
||||
total_wait_time += wait_start.elapsed();
|
||||
result
|
||||
} {
|
||||
// just put gas used here
|
||||
let gas_used = header.gas_used;
|
||||
|
||||
let block_number = header.number;
|
||||
let block_number = block.header.number;
|
||||
let transaction_count = block.transactions.len() as u64;
|
||||
let gas_used = block.header.gas_used;
|
||||
|
||||
debug!(
|
||||
target: "reth-bench",
|
||||
number=?header.number,
|
||||
number=?block.header.number,
|
||||
"Sending payload to engine",
|
||||
);
|
||||
|
||||
let (version, params) = block_to_new_payload(block, is_optimism)?;
|
||||
|
||||
let start = Instant::now();
|
||||
call_new_payload(&auth_provider, version, params).await?;
|
||||
|
||||
@@ -124,7 +115,8 @@ impl Command {
|
||||
let current_duration = total_benchmark_duration.elapsed() - total_wait_time;
|
||||
|
||||
// record the current result
|
||||
let row = TotalGasRow { block_number, gas_used, time: current_duration };
|
||||
let row =
|
||||
TotalGasRow { block_number, transaction_count, gas_used, time: current_duration };
|
||||
results.push((row, new_payload_result));
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,8 @@ impl Serialize for NewPayloadResult {
|
||||
pub(crate) struct CombinedResult {
|
||||
/// The block number of the block being processed.
|
||||
pub(crate) block_number: u64,
|
||||
/// The number of transactions in the block.
|
||||
pub(crate) transaction_count: u64,
|
||||
/// The `newPayload` result.
|
||||
pub(crate) new_payload_result: NewPayloadResult,
|
||||
/// The latency of the `forkchoiceUpdated` call.
|
||||
@@ -108,10 +110,11 @@ impl Serialize for CombinedResult {
|
||||
let fcu_latency = self.fcu_latency.as_micros();
|
||||
let new_payload_latency = self.new_payload_result.latency.as_micros();
|
||||
let total_latency = self.total_latency.as_micros();
|
||||
let mut state = serializer.serialize_struct("CombinedResult", 5)?;
|
||||
let mut state = serializer.serialize_struct("CombinedResult", 6)?;
|
||||
|
||||
// flatten the new payload result because this is meant for CSV writing
|
||||
state.serialize_field("block_number", &self.block_number)?;
|
||||
state.serialize_field("transaction_count", &self.transaction_count)?;
|
||||
state.serialize_field("gas_used", &self.new_payload_result.gas_used)?;
|
||||
state.serialize_field("new_payload_latency", &new_payload_latency)?;
|
||||
state.serialize_field("fcu_latency", &fcu_latency)?;
|
||||
@@ -125,6 +128,8 @@ impl Serialize for CombinedResult {
|
||||
pub(crate) struct TotalGasRow {
|
||||
/// The block number of the block being processed.
|
||||
pub(crate) block_number: u64,
|
||||
/// The number of transactions in the block.
|
||||
pub(crate) transaction_count: u64,
|
||||
/// The total gas used in the block.
|
||||
pub(crate) gas_used: u64,
|
||||
/// Time since the start of the benchmark.
|
||||
@@ -172,8 +177,9 @@ impl Serialize for TotalGasRow {
|
||||
{
|
||||
// convert the time to microseconds
|
||||
let time = self.time.as_micros();
|
||||
let mut state = serializer.serialize_struct("TotalGasRow", 3)?;
|
||||
let mut state = serializer.serialize_struct("TotalGasRow", 4)?;
|
||||
state.serialize_field("block_number", &self.block_number)?;
|
||||
state.serialize_field("transaction_count", &self.transaction_count)?;
|
||||
state.serialize_field("gas_used", &self.gas_used)?;
|
||||
state.serialize_field("time", &time)?;
|
||||
state.end()
|
||||
@@ -188,7 +194,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_write_total_gas_row_csv() {
|
||||
let row = TotalGasRow { block_number: 1, gas_used: 1_000, time: Duration::from_secs(1) };
|
||||
let row = TotalGasRow {
|
||||
block_number: 1,
|
||||
transaction_count: 10,
|
||||
gas_used: 1_000,
|
||||
time: Duration::from_secs(1),
|
||||
};
|
||||
|
||||
let mut writer = Writer::from_writer(vec![]);
|
||||
writer.serialize(row).unwrap();
|
||||
@@ -198,11 +209,11 @@ mod tests {
|
||||
let mut result = result.as_slice().lines();
|
||||
|
||||
// assert header
|
||||
let expected_first_line = "block_number,gas_used,time";
|
||||
let expected_first_line = "block_number,transaction_count,gas_used,time";
|
||||
let first_line = result.next().unwrap().unwrap();
|
||||
assert_eq!(first_line, expected_first_line);
|
||||
|
||||
let expected_second_line = "1,1000,1000000";
|
||||
let expected_second_line = "1,10,1000,1000000";
|
||||
let second_line = result.next().unwrap().unwrap();
|
||||
assert_eq!(second_line, expected_second_line);
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ use reth_primitives_traits::{
|
||||
SignedTransaction,
|
||||
};
|
||||
use reth_storage_api::StateProviderBox;
|
||||
use reth_trie::{updates::TrieUpdates, HashedPostState};
|
||||
use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted};
|
||||
use std::{collections::BTreeMap, sync::Arc, time::Instant};
|
||||
use tokio::sync::{broadcast, watch};
|
||||
|
||||
@@ -725,10 +725,10 @@ pub struct ExecutedBlock<N: NodePrimitives = EthPrimitives> {
|
||||
pub recovered_block: Arc<RecoveredBlock<N::Block>>,
|
||||
/// Block's execution outcome.
|
||||
pub execution_output: Arc<ExecutionOutcome<N::Receipt>>,
|
||||
/// Block's hashed state.
|
||||
pub hashed_state: Arc<HashedPostState>,
|
||||
/// Trie updates that result from calculating the state root for the block.
|
||||
pub trie_updates: Arc<TrieUpdates>,
|
||||
/// Block's sorted hashed state.
|
||||
pub hashed_state: Arc<HashedPostStateSorted>,
|
||||
/// Sorted trie updates that result from calculating the state root for the block.
|
||||
pub trie_updates: Arc<TrieUpdatesSorted>,
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> Default for ExecutedBlock<N> {
|
||||
@@ -763,13 +763,13 @@ impl<N: NodePrimitives> ExecutedBlock<N> {
|
||||
|
||||
/// Returns a reference to the hashed state result of the execution outcome
|
||||
#[inline]
|
||||
pub fn hashed_state(&self) -> &HashedPostState {
|
||||
pub fn hashed_state(&self) -> &HashedPostStateSorted {
|
||||
&self.hashed_state
|
||||
}
|
||||
|
||||
/// Returns a reference to the trie updates resulting from the execution outcome
|
||||
#[inline]
|
||||
pub fn trie_updates(&self) -> &TrieUpdates {
|
||||
pub fn trie_updates(&self) -> &TrieUpdatesSorted {
|
||||
&self.trie_updates
|
||||
}
|
||||
|
||||
@@ -875,8 +875,8 @@ mod tests {
|
||||
StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider,
|
||||
};
|
||||
use reth_trie::{
|
||||
AccountProof, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof,
|
||||
StorageProof, TrieInput,
|
||||
updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof,
|
||||
MultiProofTargets, StorageMultiProof, StorageProof, TrieInput,
|
||||
};
|
||||
|
||||
fn create_mock_state(
|
||||
|
||||
@@ -53,7 +53,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
|
||||
/// Return lazy-loaded trie state aggregated from in-memory blocks.
|
||||
fn trie_input(&self) -> &TrieInput {
|
||||
self.trie_input.get_or_init(|| {
|
||||
TrieInput::from_blocks(
|
||||
TrieInput::from_blocks_sorted(
|
||||
self.in_memory
|
||||
.iter()
|
||||
.rev()
|
||||
|
||||
@@ -23,7 +23,7 @@ use reth_primitives_traits::{
|
||||
SignedTransaction,
|
||||
};
|
||||
use reth_storage_api::NodePrimitivesProvider;
|
||||
use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState};
|
||||
use reth_trie::{root::state_root_unhashed, updates::TrieUpdatesSorted, HashedPostStateSorted};
|
||||
use revm_database::BundleState;
|
||||
use revm_state::AccountInfo;
|
||||
use std::{
|
||||
@@ -217,8 +217,8 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
block_number,
|
||||
vec![Requests::default()],
|
||||
)),
|
||||
hashed_state: Arc::new(HashedPostState::default()),
|
||||
trie_updates: Arc::new(TrieUpdates::default()),
|
||||
hashed_state: Arc::new(HashedPostStateSorted::default()),
|
||||
trie_updates: Arc::new(TrieUpdatesSorted::default()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1399,72 +1399,72 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
EthereumHardfork::Frontier,
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Homestead,
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Dao,
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Tangerine,
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::SpuriousDragon,
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Byzantium,
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Constantinople,
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Petersburg,
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Istanbul,
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::MuirGlacier,
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Berlin,
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::London,
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::ArrowGlacier,
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::GrayGlacier,
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Shanghai,
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Cancun,
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Prague,
|
||||
ForkId {
|
||||
hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]),
|
||||
hash: ForkHash(hex!("0xc376cf8b")),
|
||||
next: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1479,60 +1479,60 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
EthereumHardfork::Frontier,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Homestead,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Tangerine,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::SpuriousDragon,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Byzantium,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Constantinople,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Petersburg,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Istanbul,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Berlin,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::London,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Paris,
|
||||
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
|
||||
ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Shanghai,
|
||||
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
|
||||
ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Cancun,
|
||||
ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 },
|
||||
ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Prague,
|
||||
ForkId {
|
||||
hash: ForkHash([0xed, 0x88, 0xb5, 0xfd]),
|
||||
hash: ForkHash(hex!("0xed88b5fd")),
|
||||
next: sepolia::SEPOLIA_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1547,71 +1547,71 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
),
|
||||
(
|
||||
Head { number: 1150000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
),
|
||||
(
|
||||
Head { number: 1920000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
),
|
||||
(
|
||||
Head { number: 2463000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
),
|
||||
(
|
||||
Head { number: 2675000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
),
|
||||
(
|
||||
Head { number: 4370000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
),
|
||||
(
|
||||
Head { number: 7280000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
),
|
||||
(
|
||||
Head { number: 9069000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
),
|
||||
(
|
||||
Head { number: 9200000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
),
|
||||
(
|
||||
Head { number: 12244000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
),
|
||||
(
|
||||
Head { number: 12965000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
),
|
||||
(
|
||||
Head { number: 13773000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
),
|
||||
(
|
||||
Head { number: 15050000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
),
|
||||
// First Shanghai block
|
||||
(
|
||||
Head { number: 20000000, timestamp: 1681338455, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
),
|
||||
// First Cancun block
|
||||
(
|
||||
Head { number: 20000001, timestamp: 1710338135, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 20000004, timestamp: 1746612311, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]),
|
||||
hash: ForkHash(hex!("0xc376cf8b")),
|
||||
next: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1638,13 +1638,13 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xbe, 0xf7, 0x1d, 0x30]), next: 1742999832 },
|
||||
ForkId { hash: ForkHash(hex!("0xbef71d30")), next: 1742999832 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 0, timestamp: 1742999833, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0x09, 0x29, 0xe2, 0x4e]),
|
||||
hash: ForkHash(hex!("0x0929e24e")),
|
||||
next: hoodi::HOODI_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1671,43 +1671,43 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 },
|
||||
ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 },
|
||||
),
|
||||
// First MergeNetsplit block
|
||||
(
|
||||
Head { number: 123, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 },
|
||||
ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 },
|
||||
),
|
||||
// Last MergeNetsplit block
|
||||
(
|
||||
Head { number: 123, timestamp: 1696000703, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 },
|
||||
ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 },
|
||||
),
|
||||
// First Shanghai block
|
||||
(
|
||||
Head { number: 123, timestamp: 1696000704, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfd, 0x4f, 0x01, 0x6b]), next: 1707305664 },
|
||||
ForkId { hash: ForkHash(hex!("0xfd4f016b")), next: 1707305664 },
|
||||
),
|
||||
// Last Shanghai block
|
||||
(
|
||||
Head { number: 123, timestamp: 1707305663, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfd, 0x4f, 0x01, 0x6b]), next: 1707305664 },
|
||||
ForkId { hash: ForkHash(hex!("0xfd4f016b")), next: 1707305664 },
|
||||
),
|
||||
// First Cancun block
|
||||
(
|
||||
Head { number: 123, timestamp: 1707305664, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9b, 0x19, 0x2a, 0xd0]), next: 1740434112 },
|
||||
ForkId { hash: ForkHash(hex!("0x9b192ad0")), next: 1740434112 },
|
||||
),
|
||||
// Last Cancun block
|
||||
(
|
||||
Head { number: 123, timestamp: 1740434111, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9b, 0x19, 0x2a, 0xd0]), next: 1740434112 },
|
||||
ForkId { hash: ForkHash(hex!("0x9b192ad0")), next: 1740434112 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 123, timestamp: 1740434112, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0xdf, 0xbd, 0x9b, 0xed]),
|
||||
hash: ForkHash(hex!("0xdfbd9bed")),
|
||||
next: holesky::HOLESKY_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1734,45 +1734,45 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
Head { number: 1735370, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
Head { number: 1735371, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
|
||||
ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 },
|
||||
),
|
||||
(
|
||||
Head { number: 1735372, timestamp: 1677557087, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
|
||||
ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 },
|
||||
),
|
||||
// First Shanghai block
|
||||
(
|
||||
Head { number: 1735373, timestamp: 1677557088, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
|
||||
ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 },
|
||||
),
|
||||
// Last Shanghai block
|
||||
(
|
||||
Head { number: 1735374, timestamp: 1706655071, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
|
||||
ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 },
|
||||
),
|
||||
// First Cancun block
|
||||
(
|
||||
Head { number: 1735375, timestamp: 1706655072, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 },
|
||||
ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 },
|
||||
),
|
||||
// Last Cancun block
|
||||
(
|
||||
Head { number: 1735376, timestamp: 1741159775, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 },
|
||||
ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 1735377, timestamp: 1741159776, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0xed, 0x88, 0xb5, 0xfd]),
|
||||
hash: ForkHash(hex!("0xed88b5fd")),
|
||||
next: sepolia::SEPOLIA_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1798,7 +1798,7 @@ Post-merge hard forks (timestamp based):
|
||||
&DEV,
|
||||
&[(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0b, 0x1a, 0x4e, 0xf7]), next: 0 },
|
||||
ForkId { hash: ForkHash(hex!("0x0b1a4ef7")), next: 0 },
|
||||
)],
|
||||
)
|
||||
}
|
||||
@@ -1814,128 +1814,128 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
), // Unsynced
|
||||
(
|
||||
Head { number: 1149999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
), // Last Frontier block
|
||||
(
|
||||
Head { number: 1150000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
), // First Homestead block
|
||||
(
|
||||
Head { number: 1919999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
), // Last Homestead block
|
||||
(
|
||||
Head { number: 1920000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
), // First DAO block
|
||||
(
|
||||
Head { number: 2462999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
), // Last DAO block
|
||||
(
|
||||
Head { number: 2463000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
), // First Tangerine block
|
||||
(
|
||||
Head { number: 2674999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
), // Last Tangerine block
|
||||
(
|
||||
Head { number: 2675000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
), // First Spurious block
|
||||
(
|
||||
Head { number: 4369999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
), // Last Spurious block
|
||||
(
|
||||
Head { number: 4370000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
), // First Byzantium block
|
||||
(
|
||||
Head { number: 7279999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
), // Last Byzantium block
|
||||
(
|
||||
Head { number: 7280000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
), // First and last Constantinople, first Petersburg block
|
||||
(
|
||||
Head { number: 9068999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
), // Last Petersburg block
|
||||
(
|
||||
Head { number: 9069000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
), // First Istanbul and first Muir Glacier block
|
||||
(
|
||||
Head { number: 9199999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
), // Last Istanbul and first Muir Glacier block
|
||||
(
|
||||
Head { number: 9200000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
), // First Muir Glacier block
|
||||
(
|
||||
Head { number: 12243999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
), // Last Muir Glacier block
|
||||
(
|
||||
Head { number: 12244000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
), // First Berlin block
|
||||
(
|
||||
Head { number: 12964999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
), // Last Berlin block
|
||||
(
|
||||
Head { number: 12965000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
), // First London block
|
||||
(
|
||||
Head { number: 13772999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
), // Last London block
|
||||
(
|
||||
Head { number: 13773000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
), // First Arrow Glacier block
|
||||
(
|
||||
Head { number: 15049999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
), // Last Arrow Glacier block
|
||||
(
|
||||
Head { number: 15050000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
), // First Gray Glacier block
|
||||
(
|
||||
Head { number: 19999999, timestamp: 1667999999, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
), // Last Gray Glacier block
|
||||
(
|
||||
Head { number: 20000000, timestamp: 1681338455, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
), // Last Shanghai block
|
||||
(
|
||||
Head { number: 20000001, timestamp: 1710338134, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
), // First Cancun block
|
||||
(
|
||||
Head { number: 20000002, timestamp: 1710338135, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
), // Last Cancun block
|
||||
(
|
||||
Head { number: 20000003, timestamp: 1746612310, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
), // First Prague block
|
||||
(
|
||||
Head { number: 20000004, timestamp: 1746612311, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]),
|
||||
hash: ForkHash(hex!("0xc376cf8b")),
|
||||
next: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -2405,7 +2405,7 @@ Post-merge hard forks (timestamp based):
|
||||
let chainspec = ChainSpec::from(genesis);
|
||||
|
||||
// make sure we are at ForkHash("bc0c2605") with Head post-cancun
|
||||
let expected_forkid = ForkId { hash: ForkHash([0xbc, 0x0c, 0x26, 0x05]), next: 0 };
|
||||
let expected_forkid = ForkId { hash: ForkHash(hex!("0xbc0c2605")), next: 0 };
|
||||
let got_forkid =
|
||||
chainspec.fork_id(&Head { number: 73, timestamp: 840, ..Default::default() });
|
||||
|
||||
@@ -2515,7 +2515,7 @@ Post-merge hard forks (timestamp based):
|
||||
assert_eq!(genesis_hash, expected_hash);
|
||||
|
||||
// check that the forkhash is correct
|
||||
let expected_forkhash = ForkHash(hex!("8062457a"));
|
||||
let expected_forkhash = ForkHash(hex!("0x8062457a"));
|
||||
assert_eq!(ForkHash::from(genesis_hash), expected_forkhash);
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_config::{config::EtlConfig, Config};
|
||||
use reth_consensus::noop::NoopConsensus;
|
||||
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
|
||||
use reth_db_common::init::init_genesis;
|
||||
use reth_db_common::init::init_genesis_with_settings;
|
||||
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
|
||||
use reth_eth_wire::NetPrimitivesFor;
|
||||
use reth_evm::{noop::NoopEvmConfig, ConfigureEvm};
|
||||
@@ -17,7 +17,7 @@ use reth_node_builder::{
|
||||
Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter,
|
||||
};
|
||||
use reth_node_core::{
|
||||
args::{DatabaseArgs, DatadirArgs},
|
||||
args::{DatabaseArgs, DatadirArgs, StaticFilesArgs},
|
||||
dirs::{ChainPath, DataDirPath},
|
||||
};
|
||||
use reth_provider::{
|
||||
@@ -57,6 +57,10 @@ pub struct EnvironmentArgs<C: ChainSpecParser> {
|
||||
/// All database related arguments
|
||||
#[command(flatten)]
|
||||
pub db: DatabaseArgs,
|
||||
|
||||
/// All static files related arguments
|
||||
#[command(flatten)]
|
||||
pub static_files: StaticFilesArgs,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
@@ -97,16 +101,16 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
Arc::new(init_db(db_path, self.db.database_args())?),
|
||||
StaticFileProvider::read_write(sf_path)?,
|
||||
),
|
||||
AccessRights::RO => (
|
||||
AccessRights::RO | AccessRights::RoInconsistent => (
|
||||
Arc::new(open_db_read_only(&db_path, self.db.database_args())?),
|
||||
StaticFileProvider::read_only(sf_path, false)?,
|
||||
),
|
||||
};
|
||||
|
||||
let provider_factory = self.create_provider_factory(&config, db, sfp)?;
|
||||
let provider_factory = self.create_provider_factory(&config, db, sfp, access)?;
|
||||
if access.is_read_write() {
|
||||
debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||
init_genesis(&provider_factory)?;
|
||||
init_genesis_with_settings(&provider_factory, self.static_files.to_settings())?;
|
||||
}
|
||||
|
||||
Ok(Environment { config, provider_factory, data_dir })
|
||||
@@ -122,11 +126,11 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
config: &Config,
|
||||
db: Arc<DatabaseEnv>,
|
||||
static_file_provider: StaticFileProvider<N::Primitives>,
|
||||
access: AccessRights,
|
||||
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>>
|
||||
where
|
||||
C: ChainSpecParser<ChainSpec = N::ChainSpec>,
|
||||
{
|
||||
let has_receipt_pruning = config.prune.has_receipts_pruning();
|
||||
let prune_modes = config.prune.segments.clone();
|
||||
let factory = ProviderFactory::<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>::new(
|
||||
db,
|
||||
@@ -136,9 +140,9 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
.with_prune_modes(prune_modes.clone());
|
||||
|
||||
// Check for consistency between database and static files.
|
||||
if let Some(unwind_target) = factory
|
||||
.static_file_provider()
|
||||
.check_consistency(&factory.provider()?, has_receipt_pruning)?
|
||||
if !access.is_read_only_inconsistent() &&
|
||||
let Some(unwind_target) =
|
||||
factory.static_file_provider().check_consistency(&factory.provider()?)?
|
||||
{
|
||||
if factory.db_ref().is_read_only()? {
|
||||
warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal.");
|
||||
@@ -199,6 +203,8 @@ pub enum AccessRights {
|
||||
RW,
|
||||
/// Read-only access
|
||||
RO,
|
||||
/// Read-only access with possibly inconsistent data
|
||||
RoInconsistent,
|
||||
}
|
||||
|
||||
impl AccessRights {
|
||||
@@ -206,6 +212,12 @@ impl AccessRights {
|
||||
pub const fn is_read_write(&self) -> bool {
|
||||
matches!(self, Self::RW)
|
||||
}
|
||||
|
||||
/// Returns `true` if it requires read-only access to the environment with possibly inconsistent
|
||||
/// data.
|
||||
pub const fn is_read_only_inconsistent(&self) -> bool {
|
||||
matches!(self, Self::RoInconsistent)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper alias to satisfy `FullNodeTypes` bound on [`Node`] trait generic.
|
||||
|
||||
@@ -22,13 +22,14 @@ impl Command {
|
||||
let config = if self.default {
|
||||
Config::default()
|
||||
} else {
|
||||
let path = self.config.clone().unwrap_or_default();
|
||||
// Check if the file exists
|
||||
let path = match self.config.as_ref() {
|
||||
Some(path) => path,
|
||||
None => bail!("No config file provided. Use --config <FILE> or pass --default"),
|
||||
};
|
||||
if !path.exists() {
|
||||
bail!("Config file does not exist: {}", path.display());
|
||||
}
|
||||
// Read the configuration file
|
||||
Config::from_path(&path)
|
||||
Config::from_path(path)
|
||||
.wrap_err_with(|| format!("Could not load config file: {}", path.display()))?
|
||||
};
|
||||
println!("{}", toml::to_string_pretty(&config)?);
|
||||
|
||||
@@ -6,8 +6,9 @@ use reth_db_api::{
|
||||
transaction::{DbTx, DbTxMut},
|
||||
TableViewer, Tables,
|
||||
};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use reth_provider::{ProviderFactory, StaticFileProviderFactory};
|
||||
use reth_provider::StaticFileProviderFactory;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
|
||||
/// The arguments for the `reth db clear` command
|
||||
@@ -19,16 +20,13 @@ pub struct Command {
|
||||
|
||||
impl Command {
|
||||
/// Execute `db clear` command
|
||||
pub fn execute<N: NodeTypesWithDB>(
|
||||
self,
|
||||
provider_factory: ProviderFactory<N>,
|
||||
) -> eyre::Result<()> {
|
||||
pub fn execute<N: NodeTypesWithDB>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
match self.subcommand {
|
||||
Subcommands::Mdbx { table } => {
|
||||
table.view(&ClearViewer { db: provider_factory.db_ref() })?
|
||||
table.view(&ClearViewer { db: tool.provider_factory.db_ref() })?
|
||||
}
|
||||
Subcommands::StaticFile { segment } => {
|
||||
let static_file_provider = provider_factory.static_file_provider();
|
||||
let static_file_provider = tool.provider_factory.static_file_provider();
|
||||
let static_files = iter_static_files(static_file_provider.directory())?;
|
||||
|
||||
if let Some(segment_static_files) = static_files.get(&segment) {
|
||||
|
||||
@@ -3,7 +3,7 @@ use alloy_primitives::hex;
|
||||
use clap::Parser;
|
||||
use eyre::WrapErr;
|
||||
use reth_chainspec::EthereumHardforks;
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_db::{transaction::DbTx, DatabaseEnv};
|
||||
use reth_db_api::{database::Database, table::Table, RawValue, TableViewer, Tables};
|
||||
use reth_db_common::{DbTool, ListFilter};
|
||||
use reth_node_builder::{NodeTypes, NodeTypesWithDBAdapter};
|
||||
@@ -96,6 +96,9 @@ impl<N: NodeTypes> TableViewer<()> for ListTableViewer<'_, N> {
|
||||
|
||||
fn view<T: Table>(&self) -> Result<(), Self::Error> {
|
||||
self.tool.provider_factory.db_ref().view(|tx| {
|
||||
// We may be using the tui for a long time
|
||||
tx.disable_long_read_transaction_safety();
|
||||
|
||||
let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?;
|
||||
let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?;
|
||||
let total_entries = stats.entries();
|
||||
|
||||
@@ -14,6 +14,8 @@ mod diff;
|
||||
mod get;
|
||||
mod list;
|
||||
mod repair_trie;
|
||||
mod settings;
|
||||
mod static_file_header;
|
||||
mod stats;
|
||||
/// DB List TUI
|
||||
mod tui;
|
||||
@@ -51,16 +53,21 @@ pub enum Subcommands {
|
||||
Clear(clear::Command),
|
||||
/// Verifies trie consistency and outputs any inconsistencies
|
||||
RepairTrie(repair_trie::Command),
|
||||
/// Reads and displays the static file segment header
|
||||
StaticFileHeader(static_file_header::Command),
|
||||
/// Lists current and local database versions
|
||||
Version,
|
||||
/// Returns the full database path
|
||||
Path,
|
||||
/// Manage storage settings
|
||||
Settings(settings::Command),
|
||||
}
|
||||
|
||||
/// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command
|
||||
macro_rules! db_ro_exec {
|
||||
($env:expr, $tool:ident, $N:ident, $command:block) => {
|
||||
let Environment { provider_factory, .. } = $env.init::<$N>(AccessRights::RO)?;
|
||||
/// Initializes a provider factory with specified access rights, and then execute with the provided
|
||||
/// command
|
||||
macro_rules! db_exec {
|
||||
($env:expr, $tool:ident, $N:ident, $access_rights:expr, $command:block) => {
|
||||
let Environment { provider_factory, .. } = $env.init::<$N>($access_rights)?;
|
||||
|
||||
let $tool = DbTool::new(provider_factory)?;
|
||||
$command;
|
||||
@@ -88,27 +95,32 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
||||
match self.command {
|
||||
// TODO: We'll need to add this on the DB trait.
|
||||
Subcommands::Stats(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
let access_rights = if command.skip_consistency_checks {
|
||||
AccessRights::RoInconsistent
|
||||
} else {
|
||||
AccessRights::RO
|
||||
};
|
||||
db_exec!(self.env, tool, N, access_rights, {
|
||||
command.execute(data_dir, &tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::List(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Checksum(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Diff(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Get(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
@@ -130,19 +142,26 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
||||
}
|
||||
}
|
||||
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
let tool = DbTool::new(provider_factory)?;
|
||||
tool.drop(db_path, static_files_path, exex_wal_path)?;
|
||||
db_exec!(self.env, tool, N, AccessRights::RW, {
|
||||
tool.drop(db_path, static_files_path, exex_wal_path)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Clear(command) => {
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
command.execute(provider_factory)?;
|
||||
db_exec!(self.env, tool, N, AccessRights::RW, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::RepairTrie(command) => {
|
||||
let access_rights =
|
||||
if command.dry_run { AccessRights::RO } else { AccessRights::RW };
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(access_rights)?;
|
||||
command.execute(provider_factory)?;
|
||||
db_exec!(self.env, tool, N, access_rights, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::StaticFileHeader(command) => {
|
||||
db_exec!(self.env, tool, N, AccessRights::RoInconsistent, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Version => {
|
||||
let local_db_version = match get_db_version(&db_path) {
|
||||
@@ -162,6 +181,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
||||
Subcommands::Path => {
|
||||
println!("{}", db_path.display());
|
||||
}
|
||||
Subcommands::Settings(command) => {
|
||||
db_exec!(self.env, tool, N, command.access_rights(), {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -5,8 +5,9 @@ use reth_db_api::{
|
||||
tables,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use reth_provider::{providers::ProviderNodeTypes, ProviderFactory, StageCheckpointReader};
|
||||
use reth_provider::{providers::ProviderNodeTypes, StageCheckpointReader};
|
||||
use reth_stages::StageId;
|
||||
use reth_trie::{
|
||||
verify::{Output, Verifier},
|
||||
@@ -29,23 +30,20 @@ pub struct Command {
|
||||
|
||||
impl Command {
|
||||
/// Execute `db repair-trie` command
|
||||
pub fn execute<N: ProviderNodeTypes>(
|
||||
self,
|
||||
provider_factory: ProviderFactory<N>,
|
||||
) -> eyre::Result<()> {
|
||||
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
if self.dry_run {
|
||||
verify_only(provider_factory)?
|
||||
verify_only(tool)?
|
||||
} else {
|
||||
verify_and_repair(provider_factory)?
|
||||
verify_and_repair(tool)?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_only<N: NodeTypesWithDB>(provider_factory: ProviderFactory<N>) -> eyre::Result<()> {
|
||||
fn verify_only<N: NodeTypesWithDB>(tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Get a database transaction directly from the database
|
||||
let db = provider_factory.db_ref();
|
||||
let db = tool.provider_factory.db_ref();
|
||||
let mut tx = db.tx()?;
|
||||
tx.disable_long_read_transaction_safety();
|
||||
|
||||
@@ -114,11 +112,9 @@ fn verify_checkpoints(provider: impl StageCheckpointReader) -> eyre::Result<()>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_and_repair<N: ProviderNodeTypes>(
|
||||
provider_factory: ProviderFactory<N>,
|
||||
) -> eyre::Result<()> {
|
||||
fn verify_and_repair<N: ProviderNodeTypes>(tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Get a read-write database provider
|
||||
let mut provider_rw = provider_factory.provider_rw()?;
|
||||
let mut provider_rw = tool.provider_factory.provider_rw()?;
|
||||
|
||||
// Check that a pipeline sync isn't in progress.
|
||||
verify_checkpoints(provider_rw.as_ref())?;
|
||||
|
||||
109
crates/cli/commands/src/db/settings.rs
Normal file
109
crates/cli/commands/src/db/settings.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
//! `reth db settings` command for managing storage settings
|
||||
|
||||
use clap::{ArgAction, Parser, Subcommand};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_provider::{
|
||||
providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, MetadataProvider,
|
||||
MetadataWriter, StorageSettings,
|
||||
};
|
||||
|
||||
use crate::common::AccessRights;
|
||||
|
||||
/// `reth db settings` subcommand
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
#[command(subcommand)]
|
||||
command: Subcommands,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Returns database access rights required for the command.
|
||||
pub fn access_rights(&self) -> AccessRights {
|
||||
match self.command {
|
||||
Subcommands::Get => AccessRights::RO,
|
||||
Subcommands::Set(_) => AccessRights::RW,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Subcommand)]
|
||||
enum Subcommands {
|
||||
/// Get current storage settings from database
|
||||
Get,
|
||||
/// Set storage settings in database
|
||||
#[clap(subcommand)]
|
||||
Set(SetCommand),
|
||||
}
|
||||
|
||||
/// Set storage settings
|
||||
#[derive(Debug, Clone, Copy, Subcommand)]
|
||||
#[clap(rename_all = "snake_case")]
|
||||
pub enum SetCommand {
|
||||
/// Store receipts in static files instead of the database
|
||||
ReceiptsInStaticFiles {
|
||||
#[clap(action(ArgAction::Set))]
|
||||
value: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute the command
|
||||
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
match self.command {
|
||||
Subcommands::Get => self.get(tool),
|
||||
Subcommands::Set(cmd) => self.set(cmd, tool),
|
||||
}
|
||||
}
|
||||
|
||||
fn get<N: ProviderNodeTypes>(&self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Read storage settings
|
||||
let provider = tool.provider_factory.provider()?;
|
||||
let storage_settings = provider.storage_settings()?;
|
||||
|
||||
// Display settings
|
||||
match storage_settings {
|
||||
Some(settings) => {
|
||||
println!("Current storage settings:");
|
||||
println!("{settings:#?}");
|
||||
}
|
||||
None => {
|
||||
println!("No storage settings found.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set<N: ProviderNodeTypes>(&self, cmd: SetCommand, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Read storage settings
|
||||
let provider_rw = tool.provider_factory.database_provider_rw()?;
|
||||
// Destruct settings struct to not miss adding support for new fields
|
||||
let settings = provider_rw.storage_settings()?;
|
||||
if settings.is_none() {
|
||||
println!("No storage settings found, creating new settings.");
|
||||
}
|
||||
|
||||
let mut settings @ StorageSettings { receipts_in_static_files: _ } =
|
||||
settings.unwrap_or_default();
|
||||
|
||||
// Update the setting based on the key
|
||||
match cmd {
|
||||
SetCommand::ReceiptsInStaticFiles { value } => {
|
||||
if settings.receipts_in_static_files == value {
|
||||
println!("receipts_in_static_files is already set to {}", value);
|
||||
return Ok(());
|
||||
}
|
||||
settings.receipts_in_static_files = value;
|
||||
println!("Set receipts_in_static_files = {}", value);
|
||||
}
|
||||
}
|
||||
|
||||
// Write updated settings
|
||||
provider_rw.write_storage_settings(settings)?;
|
||||
provider_rw.commit()?;
|
||||
|
||||
println!("Storage settings updated successfully.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
63
crates/cli/commands/src/db/static_file_header.rs
Normal file
63
crates/cli/commands/src/db/static_file_header.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory};
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use std::path::PathBuf;
|
||||
use tracing::warn;
|
||||
|
||||
/// The arguments for the `reth db static-file-header` command
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct Command {
|
||||
#[command(subcommand)]
|
||||
source: Source,
|
||||
}
|
||||
|
||||
/// Source for locating the static file
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Source {
|
||||
/// Query by segment and block number
|
||||
Block {
|
||||
/// Static file segment
|
||||
#[arg(value_enum)]
|
||||
segment: StaticFileSegment,
|
||||
/// Block number to query
|
||||
block: u64,
|
||||
},
|
||||
/// Query by path to static file
|
||||
Path {
|
||||
/// Path to the static file
|
||||
path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db static-file-header` command
|
||||
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
let static_file_provider = tool.provider_factory.static_file_provider();
|
||||
if let Err(err) = static_file_provider.check_consistency(&tool.provider_factory.provider()?)
|
||||
{
|
||||
warn!("Error checking consistency of static files: {err}");
|
||||
}
|
||||
|
||||
// Get the provider based on the source
|
||||
let provider = match self.source {
|
||||
Source::Path { path } => {
|
||||
static_file_provider.get_segment_provider_for_path(&path)?.ok_or_else(|| {
|
||||
eyre::eyre!("Could not find static file segment for path: {}", path.display())
|
||||
})?
|
||||
}
|
||||
Source::Block { segment, block } => {
|
||||
static_file_provider.get_segment_provider(segment, block)?
|
||||
}
|
||||
};
|
||||
|
||||
let header = provider.user_header();
|
||||
|
||||
println!("Segment: {}", header.segment());
|
||||
println!("Expected Block Range: {}", header.expected_block_range());
|
||||
println!("Block Range: {:?}", header.block_range());
|
||||
println!("Transaction Range: {:?}", header.tx_range());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,10 @@ use std::{sync::Arc, time::Duration};
|
||||
#[derive(Parser, Debug)]
|
||||
/// The arguments for the `reth db stats` command
|
||||
pub struct Command {
|
||||
/// Skip consistency checks for static files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
pub(crate) skip_consistency_checks: bool,
|
||||
|
||||
/// Show only the total size for static files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
detailed_sizes: bool,
|
||||
|
||||
@@ -10,7 +10,7 @@ use reth_node_builder::NodeBuilder;
|
||||
use reth_node_core::{
|
||||
args::{
|
||||
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs,
|
||||
NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs,
|
||||
NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs,
|
||||
},
|
||||
node_config::NodeConfig,
|
||||
version,
|
||||
@@ -110,6 +110,10 @@ pub struct NodeCommand<C: ChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs
|
||||
#[command(flatten, next_help_heading = "ERA")]
|
||||
pub era: EraArgs,
|
||||
|
||||
/// All static files related arguments
|
||||
#[command(flatten, next_help_heading = "Static Files")]
|
||||
pub static_files: StaticFilesArgs,
|
||||
|
||||
/// Additional cli arguments
|
||||
#[command(flatten, next_help_heading = "Extension")]
|
||||
pub ext: Ext,
|
||||
@@ -145,7 +149,7 @@ where
|
||||
where
|
||||
L: Launcher<C, Ext>,
|
||||
{
|
||||
tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting reth");
|
||||
tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting {}", version::version_metadata().name_client);
|
||||
|
||||
let Self {
|
||||
datadir,
|
||||
@@ -162,9 +166,10 @@ where
|
||||
db,
|
||||
dev,
|
||||
pruning,
|
||||
ext,
|
||||
engine,
|
||||
era,
|
||||
static_files,
|
||||
ext,
|
||||
} = self;
|
||||
|
||||
// set up node config
|
||||
@@ -184,6 +189,7 @@ where
|
||||
pruning,
|
||||
engine,
|
||||
era,
|
||||
static_files,
|
||||
};
|
||||
|
||||
let data_dir = node_config.datadir();
|
||||
|
||||
@@ -60,7 +60,7 @@ impl Command {
|
||||
if self.v5 {
|
||||
info!("Starting discv5");
|
||||
let config = Config::builder(self.addr).build();
|
||||
let (_discv5, updates, _local_enr_discv5) = Discv5::start(&sk, config).await?;
|
||||
let (_discv5, updates) = Discv5::start(&sk, config).await?;
|
||||
discv5_updates = Some(updates);
|
||||
};
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ use backon::{ConstantBuilder, Retryable};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_util::{get_secret_key, hash_or_num_value_parser};
|
||||
use reth_cli_util::hash_or_num_value_parser;
|
||||
use reth_config::Config;
|
||||
use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder};
|
||||
use reth_network_p2p::bodies::client::BodiesClient;
|
||||
@@ -183,9 +183,7 @@ impl<C: ChainSpecParser> DownloadArgs<C> {
|
||||
config.peers.trusted_nodes_only = self.network.trusted_only;
|
||||
|
||||
let default_secret_key_path = data_dir.p2p_secret();
|
||||
let secret_key_path =
|
||||
self.network.p2p_secret_key.clone().unwrap_or(default_secret_key_path);
|
||||
let p2p_secret_key = get_secret_key(&secret_key_path)?;
|
||||
let p2p_secret_key = self.network.secret_key(default_secret_key_path)?;
|
||||
let rlpx_socket = (self.network.addr, self.network.port).into();
|
||||
let boot_nodes = self.chain.bootnodes().unwrap_or_default();
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ use clap::Parser;
|
||||
use eyre::WrapErr;
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_util::cancellation::CancellationToken;
|
||||
use reth_consensus::FullConsensus;
|
||||
use reth_evm::{execute::Executor, ConfigureEvm};
|
||||
use reth_primitives_traits::{format_gas_throughput, BlockBody, GotExpected};
|
||||
@@ -44,6 +45,10 @@ pub struct Command<C: ChainSpecParser> {
|
||||
/// Number of tasks to run in parallel
|
||||
#[arg(long, default_value = "10")]
|
||||
num_tasks: u64,
|
||||
|
||||
/// Continues with execution when an invalid block is encountered and collects these blocks.
|
||||
#[arg(long)]
|
||||
skip_invalid_blocks: bool,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> Command<C> {
|
||||
@@ -61,11 +66,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
{
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
|
||||
|
||||
let provider = provider_factory.database_provider_ro()?;
|
||||
let components = components(provider_factory.chain_spec());
|
||||
|
||||
let min_block = self.from;
|
||||
let best_block = provider.best_block_number()?;
|
||||
let best_block = DatabaseProviderFactory::database_provider_ro(&provider_factory)?
|
||||
.best_block_number()?;
|
||||
let mut max_block = best_block;
|
||||
if let Some(to) = self.to {
|
||||
if to > best_block {
|
||||
@@ -95,7 +100,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
}
|
||||
};
|
||||
|
||||
let skip_invalid_blocks = self.skip_invalid_blocks;
|
||||
let (stats_tx, mut stats_rx) = mpsc::unbounded_channel();
|
||||
let (info_tx, mut info_rx) = mpsc::unbounded_channel();
|
||||
let cancellation = CancellationToken::new();
|
||||
let _guard = cancellation.drop_guard();
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
for i in 0..self.num_tasks {
|
||||
@@ -109,17 +118,40 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
let consensus = components.consensus().clone();
|
||||
let db_at = db_at.clone();
|
||||
let stats_tx = stats_tx.clone();
|
||||
let info_tx = info_tx.clone();
|
||||
let cancellation = cancellation.clone();
|
||||
tasks.spawn_blocking(move || {
|
||||
let mut executor = evm_config.batch_executor(db_at(start_block - 1));
|
||||
for block in start_block..end_block {
|
||||
let mut executor_created = Instant::now();
|
||||
let executor_lifetime = Duration::from_secs(120);
|
||||
|
||||
'blocks: for block in start_block..end_block {
|
||||
if cancellation.is_cancelled() {
|
||||
// exit if the program is being terminated
|
||||
break
|
||||
}
|
||||
|
||||
let block = provider_factory
|
||||
.recovered_block(block.into(), TransactionVariant::NoHash)?
|
||||
.unwrap();
|
||||
let result = executor.execute_one(&block)?;
|
||||
|
||||
let result = match executor.execute_one(&block) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
if skip_invalid_blocks {
|
||||
executor = evm_config.batch_executor(db_at(block.number()));
|
||||
let _ = info_tx.send((block, eyre::Report::new(err)));
|
||||
continue
|
||||
}
|
||||
return Err(err.into())
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = consensus
|
||||
.validate_block_post_execution(&block, &result)
|
||||
.wrap_err_with(|| format!("Failed to validate block {}", block.number()))
|
||||
.wrap_err_with(|| {
|
||||
format!("Failed to validate block {} {}", block.number(), block.hash())
|
||||
})
|
||||
{
|
||||
let correct_receipts =
|
||||
provider_factory.receipts_by_block(block.number().into())?.unwrap();
|
||||
@@ -155,6 +187,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
};
|
||||
|
||||
error!(number=?block.number(), ?mismatch, "Gas usage mismatch");
|
||||
if skip_invalid_blocks {
|
||||
executor = evm_config.batch_executor(db_at(block.number()));
|
||||
let _ = info_tx.send((block, err));
|
||||
continue 'blocks;
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
@@ -166,9 +203,12 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
}
|
||||
let _ = stats_tx.send(block.gas_used());
|
||||
|
||||
// Reset DB once in a while to avoid OOM
|
||||
if executor.size_hint() > 1_000_000 {
|
||||
// Reset DB once in a while to avoid OOM or read tx timeouts
|
||||
if executor.size_hint() > 1_000_000 ||
|
||||
executor_created.elapsed() > executor_lifetime
|
||||
{
|
||||
executor = evm_config.batch_executor(db_at(block.number()));
|
||||
executor_created = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,6 +223,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
let mut last_logged_gas = 0;
|
||||
let mut last_logged_blocks = 0;
|
||||
let mut last_logged_time = Instant::now();
|
||||
let mut invalid_blocks = Vec::new();
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(10));
|
||||
|
||||
@@ -192,6 +233,10 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
total_executed_blocks += 1;
|
||||
total_executed_gas += gas_used;
|
||||
}
|
||||
Some((block, err)) = info_rx.recv() => {
|
||||
error!(?err, block=?block.num_hash(), "Invalid block");
|
||||
invalid_blocks.push(block.num_hash());
|
||||
}
|
||||
result = tasks.join_next() => {
|
||||
if let Some(result) = result {
|
||||
if matches!(result, Err(_) | Ok(Err(_))) {
|
||||
@@ -222,12 +267,25 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
start_block = min_block,
|
||||
end_block = max_block,
|
||||
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
|
||||
"Re-executed successfully"
|
||||
);
|
||||
if invalid_blocks.is_empty() {
|
||||
info!(
|
||||
start_block = min_block,
|
||||
end_block = max_block,
|
||||
%total_executed_blocks,
|
||||
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
|
||||
"Re-executed successfully"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
start_block = min_block,
|
||||
end_block = max_block,
|
||||
%total_executed_blocks,
|
||||
invalid_block_count = invalid_blocks.len(),
|
||||
?invalid_blocks,
|
||||
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
|
||||
"Re-executed with invalid blocks"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
//! Database debugging tool
|
||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use itertools::Itertools;
|
||||
use reth_chainspec::EthChainSpec;
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, DatabaseError};
|
||||
use reth_db::{mdbx::tx::Tx, DatabaseError};
|
||||
use reth_db_api::{
|
||||
tables,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
@@ -15,7 +14,9 @@ use reth_db_common::{
|
||||
};
|
||||
use reth_node_api::{HeaderTy, ReceiptTy, TxTy};
|
||||
use reth_node_core::args::StageEnum;
|
||||
use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, TrieWriter};
|
||||
use reth_provider::{
|
||||
DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, TrieWriter,
|
||||
};
|
||||
use reth_prune::PruneSegment;
|
||||
use reth_stages::StageId;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
@@ -47,18 +48,37 @@ impl<C: ChainSpecParser> Command<C> {
|
||||
_ => None,
|
||||
};
|
||||
|
||||
// Delete static file segment data before inserting the genesis header below
|
||||
// Calling `StaticFileProviderRW::prune_*` will instruct the writer to prune rows only
|
||||
// when `StaticFileProviderRW::commit` is called. We need to do that instead of
|
||||
// deleting the jar files, otherwise if the task were to be interrupted after we
|
||||
// have deleted them, BUT before we have committed the checkpoints to the database, we'd
|
||||
// lose essential data.
|
||||
if let Some(static_file_segment) = static_file_segment {
|
||||
let static_file_provider = tool.provider_factory.static_file_provider();
|
||||
let static_files = iter_static_files(static_file_provider.directory())?;
|
||||
if let Some(segment_static_files) = static_files.get(&static_file_segment) {
|
||||
// Delete static files from the highest to the lowest block range
|
||||
for (block_range, _) in segment_static_files
|
||||
.iter()
|
||||
.sorted_by_key(|(block_range, _)| block_range.start())
|
||||
.rev()
|
||||
{
|
||||
static_file_provider.delete_jar(static_file_segment, block_range.start())?;
|
||||
if let Some(highest_block) =
|
||||
static_file_provider.get_highest_static_file_block(static_file_segment)
|
||||
{
|
||||
let mut writer = static_file_provider.latest_writer(static_file_segment)?;
|
||||
|
||||
match static_file_segment {
|
||||
StaticFileSegment::Headers => {
|
||||
// Prune all headers leaving genesis intact.
|
||||
writer.prune_headers(highest_block)?;
|
||||
}
|
||||
StaticFileSegment::Transactions => {
|
||||
let to_delete = static_file_provider
|
||||
.get_highest_static_file_tx(static_file_segment)
|
||||
.map(|tx| tx + 1)
|
||||
.unwrap_or_default();
|
||||
writer.prune_transactions(to_delete, 0)?;
|
||||
}
|
||||
StaticFileSegment::Receipts => {
|
||||
let to_delete = static_file_provider
|
||||
.get_highest_static_file_tx(static_file_segment)
|
||||
.map(|receipt| receipt + 1)
|
||||
.unwrap_or_default();
|
||||
writer.prune_receipts(to_delete, 0)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,6 +84,9 @@ pub struct Command<C: ChainSpecParser> {
|
||||
/// Commits the changes in the database. WARNING: potentially destructive.
|
||||
///
|
||||
/// Useful when you want to run diagnostics on the database.
|
||||
///
|
||||
/// NOTE: This flag is currently required for the headers, bodies, and execution stages because
|
||||
/// they use static files and must commit to properly unwind and run.
|
||||
// TODO: We should consider allowing to run hooks at the end of the stage run,
|
||||
// e.g. query the DB size, or any table data.
|
||||
#[arg(long, short)]
|
||||
@@ -105,6 +108,14 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
Comp: CliNodeComponents<N>,
|
||||
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
|
||||
{
|
||||
// Quit early if the stages requires a commit and `--commit` is not provided.
|
||||
if self.requires_commit() && !self.commit {
|
||||
return Err(eyre::eyre!(
|
||||
"The stage {} requires overwriting existing static files and must commit, but `--commit` was not provided. Please pass `--commit` and try again.",
|
||||
self.stage.to_string()
|
||||
));
|
||||
}
|
||||
|
||||
// Raise the fd limit of the process.
|
||||
// Does not do anything on windows.
|
||||
let _ = fdlimit::raise_fd_limit();
|
||||
@@ -383,4 +394,13 @@ impl<C: ChainSpecParser> Command<C> {
|
||||
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
|
||||
Some(&self.env.chain)
|
||||
}
|
||||
|
||||
/// Returns whether or not the configured stage requires committing.
|
||||
///
|
||||
/// This is the case for stages that mainly modify static files, as there is no way to unwind
|
||||
/// these stages without committing anyways. This is because static files do not have
|
||||
/// transactions and we cannot change the view of headers without writing.
|
||||
pub fn requires_commit(&self) -> bool {
|
||||
matches!(self.stage, StageEnum::Headers | StageEnum::Bodies | StageEnum::Execution)
|
||||
}
|
||||
}
|
||||
|
||||
103
crates/cli/util/src/cancellation.rs
Normal file
103
crates/cli/util/src/cancellation.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
//! Thread-safe cancellation primitives for cooperative task cancellation.
|
||||
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
/// A thread-safe cancellation token that can be shared across threads.
|
||||
///
|
||||
/// This token allows cooperative cancellation by providing a way to signal
|
||||
/// cancellation and check cancellation status. The token can be cloned and
|
||||
/// shared across multiple threads, with all clones sharing the same cancellation state.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use reth_cli_util::cancellation::CancellationToken;
|
||||
/// use std::{thread, time::Duration};
|
||||
///
|
||||
/// let token = CancellationToken::new();
|
||||
/// let worker_token = token.clone();
|
||||
///
|
||||
/// let handle = thread::spawn(move || {
|
||||
/// while !worker_token.is_cancelled() {
|
||||
/// // Do work...
|
||||
/// thread::sleep(Duration::from_millis(100));
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// // Cancel from main thread
|
||||
/// token.cancel();
|
||||
/// handle.join().unwrap();
|
||||
/// ```
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CancellationToken {
|
||||
cancelled: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl CancellationToken {
|
||||
/// Creates a new cancellation token in the non-cancelled state.
|
||||
pub fn new() -> Self {
|
||||
Self { cancelled: Arc::new(AtomicBool::new(false)) }
|
||||
}
|
||||
|
||||
/// Signals cancellation to all holders of this token and its clones.
|
||||
///
|
||||
/// Once cancelled, the token cannot be reset. This operation is thread-safe
|
||||
/// and can be called multiple times without issue.
|
||||
pub fn cancel(&self) {
|
||||
self.cancelled.store(true, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Checks whether cancellation has been requested.
|
||||
///
|
||||
/// Returns `true` if [`cancel`](Self::cancel) has been called on this token
|
||||
/// or any of its clones.
|
||||
pub fn is_cancelled(&self) -> bool {
|
||||
self.cancelled.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Creates a guard that automatically cancels this token when dropped.
|
||||
///
|
||||
/// This is useful for ensuring cancellation happens when a scope exits,
|
||||
/// either normally or via panic.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use reth_cli_util::cancellation::CancellationToken;
|
||||
///
|
||||
/// let token = CancellationToken::new();
|
||||
/// {
|
||||
/// let _guard = token.drop_guard();
|
||||
/// assert!(!token.is_cancelled());
|
||||
/// // Guard dropped here, triggering cancellation
|
||||
/// }
|
||||
/// assert!(token.is_cancelled());
|
||||
/// ```
|
||||
pub fn drop_guard(&self) -> CancellationGuard {
|
||||
CancellationGuard { token: self.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CancellationToken {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// A guard that cancels its associated [`CancellationToken`] when dropped.
|
||||
///
|
||||
/// Created by calling [`CancellationToken::drop_guard`]. When this guard is dropped,
|
||||
/// it automatically calls [`cancel`](CancellationToken::cancel) on the token.
|
||||
#[derive(Debug)]
|
||||
pub struct CancellationGuard {
|
||||
token: CancellationToken,
|
||||
}
|
||||
|
||||
impl Drop for CancellationGuard {
|
||||
fn drop(&mut self) {
|
||||
self.token.cancel();
|
||||
}
|
||||
}
|
||||
@@ -9,10 +9,11 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
pub mod allocator;
|
||||
pub mod cancellation;
|
||||
|
||||
/// Helper function to load a secret key from a file.
|
||||
pub mod load_secret_key;
|
||||
pub use load_secret_key::get_secret_key;
|
||||
pub use load_secret_key::{get_secret_key, parse_secret_key_from_hex};
|
||||
|
||||
/// Cli parsers functions.
|
||||
pub mod parsers;
|
||||
|
||||
@@ -30,6 +30,10 @@ pub enum SecretKeyError {
|
||||
/// Path to the secret key file.
|
||||
secret_file: PathBuf,
|
||||
},
|
||||
|
||||
/// Invalid hex string format.
|
||||
#[error("invalid hex string: {0}")]
|
||||
InvalidHexString(String),
|
||||
}
|
||||
|
||||
/// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it
|
||||
@@ -60,3 +64,75 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result<SecretKey, SecretKeyErro
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a [`SecretKey`] from a hex string.
|
||||
///
|
||||
/// The hex string can optionally start with "0x".
|
||||
pub fn parse_secret_key_from_hex(hex_str: &str) -> Result<SecretKey, SecretKeyError> {
|
||||
// Remove "0x" prefix if present
|
||||
let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str);
|
||||
|
||||
// Decode the hex string
|
||||
let bytes = alloy_primitives::hex::decode(hex_str)
|
||||
.map_err(|e| SecretKeyError::InvalidHexString(e.to_string()))?;
|
||||
|
||||
// Parse into SecretKey
|
||||
SecretKey::from_slice(&bytes).map_err(SecretKeyError::SecretKeyDecodeError)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_without_prefix() {
|
||||
// Valid 32-byte hex string (64 characters)
|
||||
let hex = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret_key = result.unwrap();
|
||||
assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), hex);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_with_0x_prefix() {
|
||||
// Valid 32-byte hex string with 0x prefix
|
||||
let hex = "0x4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret_key = result.unwrap();
|
||||
let expected = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f";
|
||||
assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_invalid_length() {
|
||||
// Invalid length (not 32 bytes)
|
||||
let hex = "4c0883a69102937d";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_invalid_chars() {
|
||||
// Invalid hex characters
|
||||
let hex = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(SecretKeyError::InvalidHexString(_)) = result {
|
||||
// Expected error type
|
||||
} else {
|
||||
panic!("Expected InvalidHexString error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_empty() {
|
||||
let hex = "";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,16 @@ pub fn parse_duration_from_secs_or_ms(
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to format a [Duration] to the format that can be parsed by
|
||||
/// [`parse_duration_from_secs_or_ms`].
|
||||
pub fn format_duration_as_secs_or_ms(duration: Duration) -> String {
|
||||
if duration.as_millis().is_multiple_of(1000) {
|
||||
format!("{}", duration.as_secs())
|
||||
} else {
|
||||
format!("{}ms", duration.as_millis())
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse [`BlockHashOrNumber`]
|
||||
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
|
||||
match B256::from_str(value) {
|
||||
|
||||
@@ -126,7 +126,8 @@ pub fn install() {
|
||||
libc::sigaltstack(&raw const alt_stack, ptr::null_mut());
|
||||
|
||||
let mut sa: libc::sigaction = mem::zeroed();
|
||||
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
|
||||
sa.sa_sigaction =
|
||||
print_stack_trace as unsafe extern "C" fn(libc::c_int) as libc::sighandler_t;
|
||||
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
|
||||
libc::sigemptyset(&raw mut sa.sa_mask);
|
||||
libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut());
|
||||
|
||||
@@ -15,6 +15,7 @@ workspace = true
|
||||
reth-network-types.workspace = true
|
||||
reth-prune-types.workspace = true
|
||||
reth-stages-types.workspace = true
|
||||
reth-static-file-types.workspace = true
|
||||
|
||||
# serde
|
||||
serde = { workspace = true, optional = true }
|
||||
@@ -22,7 +23,7 @@ humantime-serde = { workspace = true, optional = true }
|
||||
|
||||
# toml
|
||||
toml = { workspace = true, optional = true }
|
||||
eyre = { workspace = true, optional = true }
|
||||
eyre.workspace = true
|
||||
|
||||
# value objects
|
||||
url.workspace = true
|
||||
@@ -31,7 +32,6 @@ url.workspace = true
|
||||
serde = [
|
||||
"dep:serde",
|
||||
"dep:toml",
|
||||
"dep:eyre",
|
||||
"dep:humantime-serde",
|
||||
"reth-network-types/serde",
|
||||
"reth-prune-types/serde",
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
use reth_network_types::{PeersConfig, SessionsConfig};
|
||||
use reth_prune_types::PruneModes;
|
||||
use reth_stages_types::ExecutionStageThresholds;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
time::Duration,
|
||||
};
|
||||
@@ -29,6 +31,9 @@ pub struct Config {
|
||||
pub peers: PeersConfig,
|
||||
/// Configuration for peer sessions.
|
||||
pub sessions: SessionsConfig,
|
||||
/// Configuration for static files.
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub static_files: StaticFilesConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -411,6 +416,68 @@ impl EtlConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Static files configuration.
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub struct StaticFilesConfig {
|
||||
/// Number of blocks per file for each segment.
|
||||
pub blocks_per_file: BlocksPerFileConfig,
|
||||
}
|
||||
|
||||
/// Configuration for the number of blocks per file for each segment.
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub struct BlocksPerFileConfig {
|
||||
/// Number of blocks per file for the headers segment.
|
||||
pub headers: Option<u64>,
|
||||
/// Number of blocks per file for the transactions segment.
|
||||
pub transactions: Option<u64>,
|
||||
/// Number of blocks per file for the receipts segment.
|
||||
pub receipts: Option<u64>,
|
||||
}
|
||||
|
||||
impl StaticFilesConfig {
|
||||
/// Validates the static files configuration.
|
||||
///
|
||||
/// Returns an error if any blocks per file value is zero.
|
||||
pub fn validate(&self) -> eyre::Result<()> {
|
||||
let BlocksPerFileConfig { headers, transactions, receipts } = self.blocks_per_file;
|
||||
eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
|
||||
eyre::ensure!(
|
||||
transactions != Some(0),
|
||||
"Transactions segment blocks per file must be greater than 0"
|
||||
);
|
||||
eyre::ensure!(
|
||||
receipts != Some(0),
|
||||
"Receipts segment blocks per file must be greater than 0"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Converts the blocks per file configuration into a [`HashMap`] per segment.
|
||||
pub fn as_blocks_per_file_map(&self) -> HashMap<StaticFileSegment, u64> {
|
||||
let BlocksPerFileConfig { headers, transactions, receipts } = self.blocks_per_file;
|
||||
|
||||
let mut map = HashMap::new();
|
||||
// Iterating over all possible segments allows us to do an exhaustive match here,
|
||||
// to not forget to configure new segments in the future.
|
||||
for segment in StaticFileSegment::iter() {
|
||||
let blocks_per_file = match segment {
|
||||
StaticFileSegment::Headers => headers,
|
||||
StaticFileSegment::Transactions => transactions,
|
||||
StaticFileSegment::Receipts => receipts,
|
||||
};
|
||||
|
||||
if let Some(blocks_per_file) = blocks_per_file {
|
||||
map.insert(segment, blocks_per_file);
|
||||
}
|
||||
}
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
/// History stage configuration.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
//! Collection of methods for block validation.
|
||||
|
||||
use alloy_consensus::{
|
||||
constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH,
|
||||
};
|
||||
use alloy_consensus::{BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH};
|
||||
use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams};
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks};
|
||||
use reth_consensus::{ConsensusError, TxGasLimitTooHighErr};
|
||||
@@ -306,9 +304,12 @@ pub fn validate_4844_header_standalone<H: BlockHeader>(
|
||||
/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block.
|
||||
/// This must be 32 bytes or fewer; formally Hx.
|
||||
#[inline]
|
||||
pub fn validate_header_extra_data<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> {
|
||||
pub fn validate_header_extra_data<H: BlockHeader>(
|
||||
header: &H,
|
||||
max_size: usize,
|
||||
) -> Result<(), ConsensusError> {
|
||||
let extra_data_len = header.extra_data().len();
|
||||
if extra_data_len > MAXIMUM_EXTRA_DATA_SIZE {
|
||||
if extra_data_len > max_size {
|
||||
Err(ConsensusError::ExtraDataExceedsMax { len: extra_data_len })
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -546,4 +547,21 @@ mod tests {
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_header_extra_data_with_custom_limit() {
|
||||
// Test with default 32 bytes - should pass
|
||||
let header_32 = Header { extra_data: Bytes::from(vec![0; 32]), ..Default::default() };
|
||||
assert!(validate_header_extra_data(&header_32, 32).is_ok());
|
||||
|
||||
// Test exceeding default - should fail
|
||||
let header_33 = Header { extra_data: Bytes::from(vec![0; 33]), ..Default::default() };
|
||||
assert_eq!(
|
||||
validate_header_extra_data(&header_33, 32),
|
||||
Err(ConsensusError::ExtraDataExceedsMax { len: 33 })
|
||||
);
|
||||
|
||||
// Test with custom larger limit - should pass
|
||||
assert!(validate_header_extra_data(&header_33, 64).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
//! configurations through closures that modify `NodeConfig` and `TreeConfig`.
|
||||
|
||||
use crate::{node::NodeTestContext, wallet::Wallet, NodeBuilderHelper, NodeHelperType, TmpDB};
|
||||
use futures_util::future::TryJoinAll;
|
||||
use reth_chainspec::EthChainSpec;
|
||||
use reth_engine_local::LocalPayloadAttributesBuilder;
|
||||
use reth_node_builder::{
|
||||
@@ -15,7 +16,7 @@ use reth_provider::providers::BlockchainProvider;
|
||||
use reth_rpc_server_types::RpcModuleSelection;
|
||||
use reth_tasks::TaskManager;
|
||||
use std::sync::Arc;
|
||||
use tracing::{span, Level};
|
||||
use tracing::{span, Instrument, Level};
|
||||
|
||||
/// Type alias for tree config modifier closure
|
||||
type TreeConfigModifier =
|
||||
@@ -122,66 +123,71 @@ where
|
||||
reth_node_api::TreeConfig::default()
|
||||
};
|
||||
|
||||
let mut nodes: Vec<NodeTestContext<_, _>> = Vec::with_capacity(self.num_nodes);
|
||||
let mut nodes = (0..self.num_nodes)
|
||||
.map(async |idx| {
|
||||
// Create base node config
|
||||
let base_config = NodeConfig::new(self.chain_spec.clone())
|
||||
.with_network(network_config.clone())
|
||||
.with_unused_ports()
|
||||
.with_rpc(
|
||||
RpcServerArgs::default()
|
||||
.with_unused_ports()
|
||||
.with_http()
|
||||
.with_http_api(RpcModuleSelection::All),
|
||||
);
|
||||
|
||||
// Apply node config modifier if present
|
||||
let node_config = if let Some(modifier) = &self.node_config_modifier {
|
||||
modifier(base_config)
|
||||
} else {
|
||||
base_config
|
||||
};
|
||||
|
||||
let span = span!(Level::INFO, "node", idx);
|
||||
let node = N::default();
|
||||
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
|
||||
.testing_node(exec.clone())
|
||||
.with_types_and_provider::<N, BlockchainProvider<_>>()
|
||||
.with_components(node.components_builder())
|
||||
.with_add_ons(node.add_ons())
|
||||
.launch_with_fn(|builder| {
|
||||
let launcher = EngineNodeLauncher::new(
|
||||
builder.task_executor().clone(),
|
||||
builder.config().datadir(),
|
||||
tree_config.clone(),
|
||||
);
|
||||
builder.launch_with(launcher)
|
||||
})
|
||||
.instrument(span)
|
||||
.await?;
|
||||
|
||||
let node = NodeTestContext::new(node, self.attributes_generator).await?;
|
||||
|
||||
let genesis = node.block_hash(0);
|
||||
node.update_forkchoice(genesis, genesis).await?;
|
||||
|
||||
eyre::Ok(node)
|
||||
})
|
||||
.collect::<TryJoinAll<_>>()
|
||||
.await?;
|
||||
|
||||
for idx in 0..self.num_nodes {
|
||||
// Create base node config
|
||||
let base_config = NodeConfig::new(self.chain_spec.clone())
|
||||
.with_network(network_config.clone())
|
||||
.with_unused_ports()
|
||||
.with_rpc(
|
||||
RpcServerArgs::default()
|
||||
.with_unused_ports()
|
||||
.with_http()
|
||||
.with_http_api(RpcModuleSelection::All),
|
||||
);
|
||||
|
||||
// Apply node config modifier if present
|
||||
let node_config = if let Some(modifier) = &self.node_config_modifier {
|
||||
modifier(base_config)
|
||||
} else {
|
||||
base_config
|
||||
};
|
||||
|
||||
let span = span!(Level::INFO, "node", idx);
|
||||
let _enter = span.enter();
|
||||
let node = N::default();
|
||||
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
|
||||
.testing_node(exec.clone())
|
||||
.with_types_and_provider::<N, BlockchainProvider<_>>()
|
||||
.with_components(node.components_builder())
|
||||
.with_add_ons(node.add_ons())
|
||||
.launch_with_fn(|builder| {
|
||||
let launcher = EngineNodeLauncher::new(
|
||||
builder.task_executor().clone(),
|
||||
builder.config().datadir(),
|
||||
tree_config.clone(),
|
||||
);
|
||||
builder.launch_with(launcher)
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut node = NodeTestContext::new(node, self.attributes_generator).await?;
|
||||
|
||||
let genesis = node.block_hash(0);
|
||||
node.update_forkchoice(genesis, genesis).await?;
|
||||
|
||||
let (prev, current) = nodes.split_at_mut(idx);
|
||||
let current = current.first_mut().unwrap();
|
||||
// Connect nodes if requested
|
||||
if self.connect_nodes {
|
||||
if let Some(previous_node) = nodes.last_mut() {
|
||||
previous_node.connect(&mut node).await;
|
||||
if let Some(prev_idx) = idx.checked_sub(1) {
|
||||
prev[prev_idx].connect(current).await;
|
||||
}
|
||||
|
||||
// Connect last node with the first if there are more than two
|
||||
if idx + 1 == self.num_nodes &&
|
||||
self.num_nodes > 2 &&
|
||||
let Some(first_node) = nodes.first_mut()
|
||||
let Some(first) = prev.first_mut()
|
||||
{
|
||||
node.connect(first_node).await;
|
||||
current.connect(first).await;
|
||||
}
|
||||
}
|
||||
|
||||
nodes.push(node);
|
||||
}
|
||||
|
||||
Ok((nodes, tasks, Wallet::default().with_chain_id(self.chain_spec.chain().into())))
|
||||
|
||||
@@ -278,7 +278,7 @@ where
|
||||
let bundle_state_sorted = sort_bundle_state_for_comparison(re_executed_state);
|
||||
let output_state_sorted = sort_bundle_state_for_comparison(original_state);
|
||||
let filename = format!("{}.bundle_state.diff", block_prefix);
|
||||
let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?;
|
||||
let diff_path = self.save_diff(filename, &output_state_sorted, &bundle_state_sorted)?;
|
||||
|
||||
warn!(
|
||||
target: "engine::invalid_block_hooks::witness",
|
||||
@@ -308,13 +308,13 @@ where
|
||||
if let Some((original_updates, original_root)) = trie_updates {
|
||||
if re_executed_root != original_root {
|
||||
let filename = format!("{}.state_root.diff", block_prefix);
|
||||
let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?;
|
||||
let diff_path = self.save_diff(filename, &original_root, &re_executed_root)?;
|
||||
warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution");
|
||||
}
|
||||
|
||||
if re_executed_root != block.state_root() {
|
||||
let filename = format!("{}.header_state_root.diff", block_prefix);
|
||||
let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?;
|
||||
let diff_path = self.save_diff(filename, &block.state_root(), &re_executed_root)?;
|
||||
warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root");
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ fn default_account_worker_count() -> usize {
|
||||
}
|
||||
|
||||
/// The size of proof targets chunk to spawn in one multiproof calculation.
|
||||
pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10;
|
||||
pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60;
|
||||
|
||||
/// Default number of reserved CPU cores for non-reth processes.
|
||||
///
|
||||
|
||||
@@ -82,9 +82,12 @@ impl EngineApiMetrics {
|
||||
let tx = tx?;
|
||||
let span =
|
||||
debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash());
|
||||
let _enter = span.enter();
|
||||
let enter = span.entered();
|
||||
trace!(target: "engine::tree", "Executing transaction");
|
||||
executor.execute_transaction(tx)?;
|
||||
let gas_used = executor.execute_transaction(tx)?;
|
||||
|
||||
// record the tx gas used
|
||||
enter.record("gas_used", gas_used);
|
||||
}
|
||||
executor.finish().map(|(evm, result)| (evm.into_db(), result))
|
||||
};
|
||||
@@ -248,6 +251,8 @@ pub(crate) struct NewPayloadStatusMetrics {
|
||||
pub(crate) new_payload_total_gas: Histogram,
|
||||
/// The gas per second of valid new payload messages received.
|
||||
pub(crate) new_payload_gas_per_second: Histogram,
|
||||
/// The gas per second for the last new payload call.
|
||||
pub(crate) new_payload_gas_per_second_last: Gauge,
|
||||
/// Latency for the new payload calls.
|
||||
pub(crate) new_payload_latency: Histogram,
|
||||
/// Latency for the last new payload call.
|
||||
@@ -271,7 +276,9 @@ impl NewPayloadStatusMetrics {
|
||||
PayloadStatusEnum::Valid => {
|
||||
self.new_payload_valid.increment(1);
|
||||
self.new_payload_total_gas.record(gas_used as f64);
|
||||
self.new_payload_gas_per_second.record(gas_used as f64 / elapsed.as_secs_f64());
|
||||
let gas_per_second = gas_used as f64 / elapsed.as_secs_f64();
|
||||
self.new_payload_gas_per_second.record(gas_per_second);
|
||||
self.new_payload_gas_per_second_last.set(gas_per_second);
|
||||
}
|
||||
PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1),
|
||||
PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1),
|
||||
|
||||
@@ -1134,6 +1134,15 @@ where
|
||||
if self.engine_kind.is_opstack() ||
|
||||
self.config.always_process_payload_attributes_on_canonical_head()
|
||||
{
|
||||
// We need to effectively unwind the _canonical_ chain to the FCU's head, which is
|
||||
// part of the canonical chain. We need to update the latest block state to reflect
|
||||
// the canonical ancestor. This ensures that state providers and the transaction
|
||||
// pool operate with the correct chain state after forkchoice update processing, and
|
||||
// new payloads built on the reorg'd head will be added to the tree immediately.
|
||||
if self.config.unwind_canonical_header() {
|
||||
self.update_latest_block_to_canonical_ancestor(&canonical_header)?;
|
||||
}
|
||||
|
||||
if let Some(attr) = attrs {
|
||||
debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head");
|
||||
// Clone only when we actually need to process the attributes
|
||||
@@ -1145,17 +1154,6 @@ where
|
||||
);
|
||||
return Ok(Some(TreeOutcome::new(updated)));
|
||||
}
|
||||
|
||||
// At this point, no alternative block has been triggered, so we need effectively
|
||||
// unwind the _canonical_ chain to the FCU's head, which is part of the canonical
|
||||
// chain. We need to update the latest block state to reflect the
|
||||
// canonical ancestor. This ensures that state providers and the
|
||||
// transaction pool operate with the correct chain state after
|
||||
// forkchoice update processing.
|
||||
|
||||
if self.config.unwind_canonical_header() {
|
||||
self.update_latest_block_to_canonical_ancestor(&canonical_header)?;
|
||||
}
|
||||
}
|
||||
|
||||
// According to the Engine API specification, client software MAY skip an update of the
|
||||
@@ -1805,8 +1803,8 @@ where
|
||||
Ok(Some(ExecutedBlock {
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)),
|
||||
execution_output: Arc::new(execution_output),
|
||||
hashed_state: Arc::new(hashed_state),
|
||||
trie_updates: Arc::new(trie_updates.into()),
|
||||
hashed_state: Arc::new(hashed_state.into_sorted()),
|
||||
trie_updates: Arc::new(trie_updates),
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//! Configured sparse trie enum for switching between serial and parallel implementations.
|
||||
|
||||
use alloy_primitives::B256;
|
||||
use reth_trie::{Nibbles, TrieNode};
|
||||
use reth_trie::{Nibbles, ProofTrieNode, TrieMasks, TrieNode};
|
||||
use reth_trie_sparse::{
|
||||
errors::SparseTrieResult, provider::TrieNodeProvider, LeafLookup, LeafLookupError,
|
||||
RevealedSparseNode, SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks,
|
||||
SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates,
|
||||
};
|
||||
use reth_trie_sparse_parallel::ParallelSparseTrie;
|
||||
use std::borrow::Cow;
|
||||
@@ -83,7 +83,7 @@ impl SparseTrieInterface for ConfiguredSparseTrie {
|
||||
}
|
||||
}
|
||||
|
||||
fn reveal_nodes(&mut self, nodes: Vec<RevealedSparseNode>) -> SparseTrieResult<()> {
|
||||
fn reveal_nodes(&mut self, nodes: Vec<ProofTrieNode>) -> SparseTrieResult<()> {
|
||||
match self {
|
||||
Self::Serial(trie) => trie.reveal_nodes(nodes),
|
||||
Self::Parallel(trie) => trie.reveal_nodes(nodes),
|
||||
|
||||
@@ -46,7 +46,7 @@ use std::{
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
use tracing::{debug, debug_span, instrument, warn};
|
||||
use tracing::{debug, debug_span, instrument, warn, Span};
|
||||
|
||||
mod configured_sparse_trie;
|
||||
pub mod executor;
|
||||
@@ -209,7 +209,7 @@ where
|
||||
+ Send
|
||||
+ 'static,
|
||||
{
|
||||
let span = tracing::Span::current();
|
||||
let span = Span::current();
|
||||
let (to_sparse_trie, sparse_trie_rx) = channel();
|
||||
|
||||
// We rely on the cursor factory to provide whatever DB overlay is necessary to see a
|
||||
@@ -249,8 +249,9 @@ where
|
||||
);
|
||||
|
||||
// spawn multi-proof task
|
||||
let parent_span = span.clone();
|
||||
self.executor.spawn_blocking(move || {
|
||||
let _enter = span.entered();
|
||||
let _enter = parent_span.entered();
|
||||
multi_proof_task.run();
|
||||
});
|
||||
|
||||
@@ -265,6 +266,7 @@ where
|
||||
prewarm_handle,
|
||||
state_root: Some(state_root_rx),
|
||||
transactions: execution_rx,
|
||||
_span: span,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,6 +291,7 @@ where
|
||||
prewarm_handle,
|
||||
state_root: None,
|
||||
transactions: execution_rx,
|
||||
_span: Span::current(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,9 +371,7 @@ where
|
||||
// spawn pre-warm task
|
||||
{
|
||||
let to_prewarm_task = to_prewarm_task.clone();
|
||||
let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task");
|
||||
self.executor.spawn_blocking(move || {
|
||||
let _enter = span.entered();
|
||||
prewarm_task.run(transactions, to_prewarm_task);
|
||||
});
|
||||
}
|
||||
@@ -434,7 +435,7 @@ where
|
||||
sparse_state_trie,
|
||||
);
|
||||
|
||||
let span = tracing::Span::current();
|
||||
let span = Span::current();
|
||||
self.executor.spawn_blocking(move || {
|
||||
let _enter = span.entered();
|
||||
|
||||
@@ -466,10 +467,12 @@ pub struct PayloadHandle<Tx, Err> {
|
||||
to_multi_proof: Option<CrossbeamSender<MultiProofMessage>>,
|
||||
// must include the receiver of the state root wired to the sparse trie
|
||||
prewarm_handle: CacheTaskHandle,
|
||||
/// Receiver for the state root
|
||||
state_root: Option<mpsc::Receiver<Result<StateRootComputeOutcome, ParallelStateRootError>>>,
|
||||
/// Stream of block transactions
|
||||
transactions: mpsc::Receiver<Result<Tx, Err>>,
|
||||
/// Receiver for the state root
|
||||
state_root: Option<mpsc::Receiver<Result<StateRootComputeOutcome, ParallelStateRootError>>>,
|
||||
/// Span for tracing
|
||||
_span: Span,
|
||||
}
|
||||
|
||||
impl<Tx, Err> PayloadHandle<Tx, Err> {
|
||||
@@ -478,7 +481,12 @@ impl<Tx, Err> PayloadHandle<Tx, Err> {
|
||||
/// # Panics
|
||||
///
|
||||
/// If payload processing was started without background tasks.
|
||||
#[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)]
|
||||
#[instrument(
|
||||
level = "debug",
|
||||
target = "engine::tree::payload_processor",
|
||||
name = "await_state_root",
|
||||
skip_all
|
||||
)]
|
||||
pub fn state_root(&mut self) -> Result<StateRootComputeOutcome, ParallelStateRootError> {
|
||||
self.state_root
|
||||
.take()
|
||||
|
||||
@@ -13,9 +13,8 @@ use metrics::{Gauge, Histogram};
|
||||
use reth_metrics::Metrics;
|
||||
use reth_revm::state::EvmState;
|
||||
use reth_trie::{
|
||||
added_removed_keys::MultiAddedRemovedKeys, prefix_set::TriePrefixSetsMut,
|
||||
updates::TrieUpdatesSorted, DecodedMultiProof, HashedPostState, HashedPostStateSorted,
|
||||
HashedStorage, MultiProofTargets, TrieInput,
|
||||
added_removed_keys::MultiAddedRemovedKeys, DecodedMultiProof, HashedPostState, HashedStorage,
|
||||
MultiProofTargets,
|
||||
};
|
||||
use reth_trie_parallel::{
|
||||
proof::ParallelProof,
|
||||
@@ -27,6 +26,10 @@ use reth_trie_parallel::{
|
||||
use std::{collections::BTreeMap, ops::DerefMut, sync::Arc, time::Instant};
|
||||
use tracing::{debug, error, instrument, trace};
|
||||
|
||||
/// The default max targets, for limiting the number of account and storage proof targets to be
|
||||
/// fetched by a single worker.
|
||||
const DEFAULT_MAX_TARGETS_FOR_CHUNKING: usize = 300;
|
||||
|
||||
/// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the
|
||||
/// state.
|
||||
#[derive(Default, Debug)]
|
||||
@@ -56,35 +59,6 @@ impl SparseTrieUpdate {
|
||||
}
|
||||
}
|
||||
|
||||
/// Common configuration for multi proof tasks
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub(crate) struct MultiProofConfig {
|
||||
/// The sorted collection of cached in-memory intermediate trie nodes that
|
||||
/// can be reused for computation.
|
||||
pub nodes_sorted: Arc<TrieUpdatesSorted>,
|
||||
/// The sorted in-memory overlay hashed state.
|
||||
pub state_sorted: Arc<HashedPostStateSorted>,
|
||||
/// The collection of prefix sets for the computation. Since the prefix sets _always_
|
||||
/// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here,
|
||||
/// if we have cached nodes for them.
|
||||
pub prefix_sets: Arc<TriePrefixSetsMut>,
|
||||
}
|
||||
|
||||
impl MultiProofConfig {
|
||||
/// Creates a new state root config from the trie input.
|
||||
///
|
||||
/// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the
|
||||
/// [`TrieInput`].
|
||||
pub(crate) fn from_input(mut input: TrieInput) -> (TrieInput, Self) {
|
||||
let config = Self {
|
||||
nodes_sorted: Arc::new(input.nodes.drain_into_sorted()),
|
||||
state_sorted: Arc::new(input.state.drain_into_sorted()),
|
||||
prefix_sets: Arc::new(input.prefix_sets.clone()),
|
||||
};
|
||||
(input.cleared(), config)
|
||||
}
|
||||
}
|
||||
|
||||
/// Messages used internally by the multi proof task.
|
||||
#[derive(Debug)]
|
||||
pub(super) enum MultiProofMessage {
|
||||
@@ -704,6 +678,10 @@ pub(super) struct MultiProofTask {
|
||||
multiproof_manager: MultiproofManager,
|
||||
/// multi proof task metrics
|
||||
metrics: MultiProofTaskMetrics,
|
||||
/// If this number is exceeded and chunking is enabled, then this will override whether or not
|
||||
/// there are any active workers and force chunking across workers. This is to prevent tasks
|
||||
/// which are very long from hitting a single worker.
|
||||
max_targets_for_chunking: usize,
|
||||
}
|
||||
|
||||
impl MultiProofTask {
|
||||
@@ -732,6 +710,7 @@ impl MultiProofTask {
|
||||
proof_result_tx,
|
||||
),
|
||||
metrics,
|
||||
max_targets_for_chunking: DEFAULT_MAX_TARGETS_FOR_CHUNKING,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -921,10 +900,14 @@ impl MultiProofTask {
|
||||
|
||||
let mut spawned_proof_targets = MultiProofTargets::default();
|
||||
|
||||
// Chunk regardless if there are many proof targets
|
||||
let many_proof_targets =
|
||||
not_fetched_state_update.chunking_length() > self.max_targets_for_chunking;
|
||||
|
||||
// Only chunk if multiple account or storage workers are available to take advantage of
|
||||
// parallelism.
|
||||
let should_chunk = self.multiproof_manager.proof_worker_handle.available_account_workers() >
|
||||
1 ||
|
||||
let should_chunk = many_proof_targets ||
|
||||
self.multiproof_manager.proof_worker_handle.available_account_workers() > 1 ||
|
||||
self.multiproof_manager.proof_worker_handle.available_storage_workers() > 1;
|
||||
|
||||
let mut dispatch = |hashed_state_update| {
|
||||
|
||||
@@ -40,7 +40,7 @@ use std::{
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
use tracing::{debug, debug_span, instrument, trace, warn};
|
||||
use tracing::{debug, debug_span, instrument, trace, warn, Span};
|
||||
|
||||
/// A wrapper for transactions that includes their index in the block.
|
||||
#[derive(Clone)]
|
||||
@@ -87,6 +87,8 @@ where
|
||||
to_multi_proof: Option<CrossbeamSender<MultiProofMessage>>,
|
||||
/// Receiver for events produced by tx execution
|
||||
actions_rx: Receiver<PrewarmTaskEvent>,
|
||||
/// Parent span for tracing
|
||||
parent_span: Span,
|
||||
}
|
||||
|
||||
impl<N, P, Evm> PrewarmCacheTask<N, P, Evm>
|
||||
@@ -122,6 +124,7 @@ where
|
||||
transaction_count_hint,
|
||||
to_multi_proof,
|
||||
actions_rx,
|
||||
parent_span: Span::current(),
|
||||
},
|
||||
actions_tx,
|
||||
)
|
||||
@@ -140,7 +143,7 @@ where
|
||||
let ctx = self.ctx.clone();
|
||||
let max_concurrency = self.max_concurrency;
|
||||
let transaction_count_hint = self.transaction_count_hint;
|
||||
let span = tracing::Span::current();
|
||||
let span = Span::current();
|
||||
|
||||
self.executor.spawn_blocking(move || {
|
||||
let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered();
|
||||
@@ -284,9 +287,10 @@ where
|
||||
/// This will execute the transactions until all transactions have been processed or the task
|
||||
/// was cancelled.
|
||||
#[instrument(
|
||||
parent = &self.parent_span,
|
||||
level = "debug",
|
||||
target = "engine::tree::payload_processor::prewarm",
|
||||
name = "prewarm",
|
||||
name = "prewarm and caching",
|
||||
skip_all
|
||||
)]
|
||||
pub(super) fn run(
|
||||
@@ -452,7 +456,7 @@ where
|
||||
.entered();
|
||||
txs.recv()
|
||||
} {
|
||||
let _enter =
|
||||
let enter =
|
||||
debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash())
|
||||
.entered();
|
||||
|
||||
@@ -484,7 +488,11 @@ where
|
||||
};
|
||||
metrics.execution_duration.record(start.elapsed());
|
||||
|
||||
drop(_enter);
|
||||
// record some basic information about the transactions
|
||||
enter.record("gas_used", res.result.gas_used());
|
||||
enter.record("is_success", res.result.is_success());
|
||||
|
||||
drop(enter);
|
||||
|
||||
// If the task was cancelled, stop execution, send an empty result to notify the task,
|
||||
// and exit.
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::tree::{
|
||||
error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError},
|
||||
executor::WorkloadExecutor,
|
||||
instrumented_state::InstrumentedStateProvider,
|
||||
payload_processor::{multiproof::MultiProofConfig, PayloadProcessor},
|
||||
payload_processor::PayloadProcessor,
|
||||
precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap},
|
||||
sparse_trie::StateRootComputeOutcome,
|
||||
EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, StateProviderBuilder,
|
||||
@@ -38,7 +38,7 @@ use reth_provider::{
|
||||
StateRootProvider, TrieReader,
|
||||
};
|
||||
use reth_revm::db::State;
|
||||
use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput};
|
||||
use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInputSorted};
|
||||
use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError};
|
||||
use std::{collections::HashMap, sync::Arc, time::Instant};
|
||||
use tracing::{debug, debug_span, error, info, instrument, trace, warn};
|
||||
@@ -121,8 +121,6 @@ where
|
||||
metrics: EngineApiMetrics,
|
||||
/// Validator for the payload.
|
||||
validator: V,
|
||||
/// A cleared trie input, kept around to be reused so allocations can be minimized.
|
||||
trie_input: Option<TrieInput>,
|
||||
}
|
||||
|
||||
impl<N, P, Evm, V> BasicEngineValidator<P, Evm, V>
|
||||
@@ -166,11 +164,11 @@ where
|
||||
invalid_block_hook,
|
||||
metrics: EngineApiMetrics::default(),
|
||||
validator,
|
||||
trie_input: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a [`BlockOrPayload`] to a recovered block.
|
||||
#[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)]
|
||||
pub fn convert_to_block<T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = N>>>(
|
||||
&self,
|
||||
input: BlockOrPayload<T>,
|
||||
@@ -375,7 +373,7 @@ where
|
||||
debug!(
|
||||
target: "engine::tree::payload_validator",
|
||||
?strategy,
|
||||
"Deciding which state root algorithm to run"
|
||||
"Decided which state root algorithm to run"
|
||||
);
|
||||
|
||||
// use prewarming background task
|
||||
@@ -412,7 +410,7 @@ where
|
||||
Err(err) => return self.handle_execution_error(input, err, &parent_block),
|
||||
};
|
||||
|
||||
// after executing the block we can stop executing transactions
|
||||
// After executing the block we can stop prewarming transactions
|
||||
handle.stop_prewarming_execution();
|
||||
|
||||
let block = self.convert_to_block(input)?;
|
||||
@@ -422,10 +420,7 @@ where
|
||||
block
|
||||
);
|
||||
|
||||
debug!(target: "engine::tree::payload_validator", "Calculating block state root");
|
||||
|
||||
let root_time = Instant::now();
|
||||
|
||||
let mut maybe_state_root = None;
|
||||
|
||||
match strategy {
|
||||
@@ -530,8 +525,8 @@ where
|
||||
Ok(ExecutedBlock {
|
||||
recovered_block: Arc::new(block),
|
||||
execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))),
|
||||
hashed_state: Arc::new(hashed_state),
|
||||
trie_updates: Arc::new(trie_output),
|
||||
hashed_state: Arc::new(hashed_state.into_sorted()),
|
||||
trie_updates: Arc::new(trie_output.into_sorted()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -553,6 +548,7 @@ where
|
||||
|
||||
/// Validate if block is correct and satisfies all the consensus rules that concern the header
|
||||
/// and block body itself.
|
||||
#[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)]
|
||||
fn validate_block_inner(&self, block: &RecoveredBlock<N::Block>) -> Result<(), ConsensusError> {
|
||||
if let Err(e) = self.consensus.validate_header(block.sealed_header()) {
|
||||
error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash());
|
||||
@@ -643,26 +639,24 @@ where
|
||||
hashed_state: &HashedPostState,
|
||||
state: &EngineApiTreeState<N>,
|
||||
) -> Result<(B256, TrieUpdates), ParallelStateRootError> {
|
||||
let (mut input, block_hash) = self.compute_trie_input(parent_hash, state, None)?;
|
||||
let (mut input, block_hash) = self.compute_trie_input(parent_hash, state)?;
|
||||
|
||||
// Extend with block we are validating root for.
|
||||
input.append_ref(hashed_state);
|
||||
// Extend state overlay with current block's sorted state.
|
||||
input.prefix_sets.extend(hashed_state.construct_prefix_sets());
|
||||
let sorted_hashed_state = hashed_state.clone().into_sorted();
|
||||
Arc::make_mut(&mut input.state).extend_ref(&sorted_hashed_state);
|
||||
|
||||
// Convert the TrieInput into a MultProofConfig, since everything uses the sorted
|
||||
// forms of the state/trie fields.
|
||||
let (_, multiproof_config) = MultiProofConfig::from_input(input);
|
||||
let TrieInputSorted { nodes, state, prefix_sets: prefix_sets_mut } = input;
|
||||
|
||||
let factory = OverlayStateProviderFactory::new(self.provider.clone())
|
||||
.with_block_hash(Some(block_hash))
|
||||
.with_trie_overlay(Some(multiproof_config.nodes_sorted))
|
||||
.with_hashed_state_overlay(Some(multiproof_config.state_sorted));
|
||||
.with_trie_overlay(Some(nodes))
|
||||
.with_hashed_state_overlay(Some(state));
|
||||
|
||||
// The `hashed_state` argument is already taken into account as part of the overlay, but we
|
||||
// need to use the prefix sets which were generated from it to indicate to the
|
||||
// ParallelStateRoot which parts of the trie need to be recomputed.
|
||||
let prefix_sets = Arc::into_inner(multiproof_config.prefix_sets)
|
||||
.expect("MultiProofConfig was never cloned")
|
||||
.freeze();
|
||||
let prefix_sets = prefix_sets_mut.freeze();
|
||||
|
||||
ParallelStateRoot::new(factory, prefix_sets).incremental_root_with_updates()
|
||||
}
|
||||
@@ -673,6 +667,7 @@ where
|
||||
/// - parent header validation
|
||||
/// - post-execution consensus validation
|
||||
/// - state-root based post-execution validation
|
||||
#[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)]
|
||||
fn validate_post_execution<T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = N>>>(
|
||||
&self,
|
||||
block: &RecoveredBlock<N::Block>,
|
||||
@@ -692,21 +687,32 @@ where
|
||||
}
|
||||
|
||||
// now validate against the parent
|
||||
let _enter = debug_span!(target: "engine::tree::payload_validator", "validate_header_against_parent").entered();
|
||||
if let Err(e) =
|
||||
self.consensus.validate_header_against_parent(block.sealed_header(), parent_block)
|
||||
{
|
||||
warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash());
|
||||
return Err(e.into())
|
||||
}
|
||||
drop(_enter);
|
||||
|
||||
// Validate block post-execution rules
|
||||
let _enter =
|
||||
debug_span!(target: "engine::tree::payload_validator", "validate_block_post_execution")
|
||||
.entered();
|
||||
if let Err(err) = self.consensus.validate_block_post_execution(block, output) {
|
||||
// call post-block hook
|
||||
self.on_invalid_block(parent_block, block, output, None, ctx.state_mut());
|
||||
return Err(err.into())
|
||||
}
|
||||
drop(_enter);
|
||||
|
||||
let _enter =
|
||||
debug_span!(target: "engine::tree::payload_validator", "hashed_post_state").entered();
|
||||
let hashed_state = self.provider.hashed_post_state(&output.state);
|
||||
drop(_enter);
|
||||
|
||||
let _enter = debug_span!(target: "engine::tree::payload_validator", "validate_block_post_execution_with_hashed_state").entered();
|
||||
if let Err(err) =
|
||||
self.validator.validate_block_post_execution_with_hashed_state(&hashed_state, block)
|
||||
{
|
||||
@@ -758,26 +764,23 @@ where
|
||||
> {
|
||||
match strategy {
|
||||
StateRootStrategy::StateRootTask => {
|
||||
// get allocated trie input if it exists
|
||||
let allocated_trie_input = self.trie_input.take();
|
||||
|
||||
// Compute trie input
|
||||
let trie_input_start = Instant::now();
|
||||
let (trie_input, block_hash) =
|
||||
self.compute_trie_input(parent_hash, state, allocated_trie_input)?;
|
||||
let (trie_input, block_hash) = self.compute_trie_input(parent_hash, state)?;
|
||||
|
||||
// Convert the TrieInput into a MultProofConfig, since everything uses the sorted
|
||||
// forms of the state/trie fields.
|
||||
let (trie_input, multiproof_config) = MultiProofConfig::from_input(trie_input);
|
||||
self.trie_input.replace(trie_input);
|
||||
self.metrics
|
||||
.block_validation
|
||||
.trie_input_duration
|
||||
.record(trie_input_start.elapsed().as_secs_f64());
|
||||
|
||||
// Create OverlayStateProviderFactory with sorted trie data for multiproofs
|
||||
let TrieInputSorted { nodes, state, .. } = trie_input;
|
||||
|
||||
// Create OverlayStateProviderFactory with the multiproof config, for use with
|
||||
// multiproofs.
|
||||
let multiproof_provider_factory =
|
||||
OverlayStateProviderFactory::new(self.provider.clone())
|
||||
.with_block_hash(Some(block_hash))
|
||||
.with_trie_overlay(Some(multiproof_config.nodes_sorted))
|
||||
.with_hashed_state_overlay(Some(multiproof_config.state_sorted));
|
||||
.with_trie_overlay(Some(nodes))
|
||||
.with_hashed_state_overlay(Some(state));
|
||||
|
||||
// Use state root task only if prefix sets are empty, otherwise proof generation is
|
||||
// too expensive because it requires walking all paths in every proof.
|
||||
@@ -851,23 +854,14 @@ where
|
||||
}
|
||||
|
||||
/// Determines the state root computation strategy based on configuration.
|
||||
#[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)]
|
||||
fn plan_state_root_computation(&self) -> StateRootStrategy {
|
||||
let strategy = if self.config.state_root_fallback() {
|
||||
const fn plan_state_root_computation(&self) -> StateRootStrategy {
|
||||
if self.config.state_root_fallback() {
|
||||
StateRootStrategy::Synchronous
|
||||
} else if self.config.use_state_root_task() {
|
||||
StateRootStrategy::StateRootTask
|
||||
} else {
|
||||
StateRootStrategy::Parallel
|
||||
};
|
||||
|
||||
debug!(
|
||||
target: "engine::tree::payload_validator",
|
||||
?strategy,
|
||||
"Planned state root computation strategy"
|
||||
);
|
||||
|
||||
strategy
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when an invalid block is encountered during validation.
|
||||
@@ -889,14 +883,14 @@ where
|
||||
/// Computes the trie input at the provided parent hash, as well as the block number of the
|
||||
/// highest persisted ancestor.
|
||||
///
|
||||
/// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that
|
||||
/// serves as an overlay to the database blocks.
|
||||
/// The goal of this function is to take in-memory blocks and generate a [`TrieInputSorted`]
|
||||
/// that serves as an overlay to the database blocks.
|
||||
///
|
||||
/// It works as follows:
|
||||
/// 1. Collect in-memory blocks that are descendants of the provided parent hash using
|
||||
/// [`crate::tree::TreeState::blocks_by_hash`]. This returns the highest persisted ancestor
|
||||
/// hash (`block_hash`) and the list of in-memory descendant blocks.
|
||||
/// 2. Extend the `TrieInput` with the contents of these in-memory blocks (from oldest to
|
||||
/// 2. Extend the `TrieInputSorted` with the contents of these in-memory blocks (from oldest to
|
||||
/// newest) to build the overlay state and trie updates that sit on top of the database view
|
||||
/// anchored at `block_hash`.
|
||||
#[instrument(
|
||||
@@ -909,11 +903,7 @@ where
|
||||
&self,
|
||||
parent_hash: B256,
|
||||
state: &EngineApiTreeState<N>,
|
||||
allocated_trie_input: Option<TrieInput>,
|
||||
) -> ProviderResult<(TrieInput, B256)> {
|
||||
// get allocated trie input or use a default trie input
|
||||
let mut input = allocated_trie_input.unwrap_or_default();
|
||||
|
||||
) -> ProviderResult<(TrieInputSorted, B256)> {
|
||||
let (block_hash, blocks) =
|
||||
state.tree_state.blocks_by_hash(parent_hash).unwrap_or_else(|| (parent_hash, vec![]));
|
||||
|
||||
@@ -923,10 +913,24 @@ where
|
||||
debug!(target: "engine::tree::payload_validator", historical = ?block_hash, blocks = blocks.len(), "Parent found in memory");
|
||||
}
|
||||
|
||||
// Extend with contents of parent in-memory blocks.
|
||||
input.extend_with_blocks(
|
||||
blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())),
|
||||
);
|
||||
// Extend with contents of parent in-memory blocks directly in sorted form.
|
||||
let mut input = TrieInputSorted::default();
|
||||
let mut blocks_iter = blocks.iter().rev().peekable();
|
||||
|
||||
if let Some(first) = blocks_iter.next() {
|
||||
input.state = Arc::clone(&first.hashed_state);
|
||||
input.nodes = Arc::clone(&first.trie_updates);
|
||||
|
||||
// Only clone and mutate if there are more in-memory blocks.
|
||||
if blocks_iter.peek().is_some() {
|
||||
let state_mut = Arc::make_mut(&mut input.state);
|
||||
let nodes_mut = Arc::make_mut(&mut input.nodes);
|
||||
for block in blocks_iter {
|
||||
state_mut.extend_ref(block.hashed_state());
|
||||
nodes_mut.extend_ref(block.trie_updates());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((input, block_hash))
|
||||
}
|
||||
|
||||
@@ -274,7 +274,12 @@ mod tests {
|
||||
#[test]
|
||||
fn test_precompile_cache_basic() {
|
||||
let dyn_precompile: DynPrecompile = (|_input: PrecompileInput<'_>| -> PrecompileResult {
|
||||
Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false })
|
||||
Ok(PrecompileOutput {
|
||||
gas_used: 0,
|
||||
gas_refunded: 0,
|
||||
bytes: Bytes::default(),
|
||||
reverted: false,
|
||||
})
|
||||
})
|
||||
.into();
|
||||
|
||||
@@ -283,6 +288,7 @@ mod tests {
|
||||
|
||||
let output = PrecompileOutput {
|
||||
gas_used: 50,
|
||||
gas_refunded: 0,
|
||||
bytes: alloy_primitives::Bytes::copy_from_slice(b"cached_result"),
|
||||
reverted: false,
|
||||
};
|
||||
@@ -315,6 +321,7 @@ mod tests {
|
||||
|
||||
Ok(PrecompileOutput {
|
||||
gas_used: 5000,
|
||||
gas_refunded: 0,
|
||||
bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_1"),
|
||||
reverted: false,
|
||||
})
|
||||
@@ -329,6 +336,7 @@ mod tests {
|
||||
|
||||
Ok(PrecompileOutput {
|
||||
gas_used: 7000,
|
||||
gas_refunded: 0,
|
||||
bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_2"),
|
||||
reverted: false,
|
||||
})
|
||||
|
||||
@@ -826,8 +826,8 @@ fn test_tree_state_on_new_head_deep_fork() {
|
||||
test_harness.tree.state.tree_state.insert_executed(ExecutedBlock {
|
||||
recovered_block: Arc::new(block.clone()),
|
||||
execution_output: Arc::new(ExecutionOutcome::default()),
|
||||
hashed_state: Arc::new(HashedPostState::default()),
|
||||
trie_updates: Arc::new(TrieUpdates::default()),
|
||||
hashed_state: Arc::new(HashedPostState::default().into_sorted()),
|
||||
trie_updates: Arc::new(TrieUpdates::default().into_sorted()),
|
||||
});
|
||||
}
|
||||
test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash());
|
||||
@@ -836,8 +836,8 @@ fn test_tree_state_on_new_head_deep_fork() {
|
||||
test_harness.tree.state.tree_state.insert_executed(ExecutedBlock {
|
||||
recovered_block: Arc::new(block.clone()),
|
||||
execution_output: Arc::new(ExecutionOutcome::default()),
|
||||
hashed_state: Arc::new(HashedPostState::default()),
|
||||
trie_updates: Arc::new(TrieUpdates::default()),
|
||||
hashed_state: Arc::new(HashedPostState::default().into_sorted()),
|
||||
trie_updates: Arc::new(TrieUpdates::default().into_sorted()),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ alloy-primitives.workspace = true
|
||||
|
||||
# reth
|
||||
reth-fs-util.workspace = true
|
||||
reth-era.workspace = true
|
||||
|
||||
# http
|
||||
bytes.workspace = true
|
||||
|
||||
@@ -3,14 +3,18 @@ use bytes::Bytes;
|
||||
use eyre::{eyre, OptionExt};
|
||||
use futures_util::{stream::StreamExt, Stream, TryStreamExt};
|
||||
use reqwest::{Client, IntoUrl, Url};
|
||||
use reth_era::common::file_ops::EraFileType;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::{future::Future, path::Path, str::FromStr};
|
||||
use tokio::{
|
||||
fs::{self, File},
|
||||
io::{self, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWriteExt},
|
||||
join, try_join,
|
||||
try_join,
|
||||
};
|
||||
|
||||
/// Downloaded index page filename
|
||||
const INDEX_HTML_FILE: &str = "index.html";
|
||||
|
||||
/// Accesses the network over HTTP.
|
||||
pub trait HttpClient {
|
||||
/// Makes an HTTP GET request to `url`. Returns a stream of response body bytes.
|
||||
@@ -41,6 +45,7 @@ pub struct EraClient<Http> {
|
||||
client: Http,
|
||||
url: Url,
|
||||
folder: Box<Path>,
|
||||
era_type: EraFileType,
|
||||
}
|
||||
|
||||
impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
@@ -48,7 +53,8 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
|
||||
/// Constructs [`EraClient`] using `client` to download from `url` into `folder`.
|
||||
pub fn new(client: Http, url: Url, folder: impl Into<Box<Path>>) -> Self {
|
||||
Self { client, url, folder: folder.into() }
|
||||
let era_type = EraFileType::from_url(url.as_str());
|
||||
Self { client, url, folder: folder.into(), era_type }
|
||||
}
|
||||
|
||||
/// Performs a GET request on `url` and stores the response body into a file located within
|
||||
@@ -92,9 +98,11 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
}
|
||||
}
|
||||
|
||||
self.assert_checksum(number, actual_checksum?)
|
||||
.await
|
||||
.map_err(|e| eyre!("{e} for {file_name} at {}", path.display()))?;
|
||||
if self.era_type == EraFileType::Era1 {
|
||||
self.assert_checksum(number, actual_checksum?)
|
||||
.await
|
||||
.map_err(|e| eyre!("{e} for {file_name} at {}", path.display()))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(path.into_boxed_path())
|
||||
@@ -145,9 +153,11 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
pub async fn files_count(&self) -> usize {
|
||||
let mut count = 0usize;
|
||||
|
||||
let file_extension = self.era_type.extension().trim_start_matches('.');
|
||||
|
||||
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
|
||||
while let Ok(Some(entry)) = dir.next_entry().await {
|
||||
if entry.path().extension() == Some("era1".as_ref()) {
|
||||
if entry.path().extension() == Some(file_extension.as_ref()) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
@@ -156,46 +166,35 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
count
|
||||
}
|
||||
|
||||
/// Fetches the list of ERA1 files from `url` and stores it in a file located within `folder`.
|
||||
/// Fetches the list of ERA1/ERA files from `url` and stores it in a file located within
|
||||
/// `folder`.
|
||||
/// For era files, checksum.txt file does not exist, so the checksum verification is
|
||||
/// skipped.
|
||||
pub async fn fetch_file_list(&self) -> eyre::Result<()> {
|
||||
let (mut index, mut checksums) = try_join!(
|
||||
self.client.get(self.url.clone()),
|
||||
self.client.get(self.url.clone().join(Self::CHECKSUMS)?),
|
||||
)?;
|
||||
|
||||
let index_path = self.folder.to_path_buf().join("index.html");
|
||||
let index_path = self.folder.to_path_buf().join(INDEX_HTML_FILE);
|
||||
let checksums_path = self.folder.to_path_buf().join(Self::CHECKSUMS);
|
||||
|
||||
let (mut index_file, mut checksums_file) =
|
||||
try_join!(File::create(&index_path), File::create(&checksums_path))?;
|
||||
|
||||
loop {
|
||||
let (index, checksums) = join!(index.next(), checksums.next());
|
||||
let (index, checksums) = (index.transpose()?, checksums.transpose()?);
|
||||
|
||||
if index.is_none() && checksums.is_none() {
|
||||
break;
|
||||
}
|
||||
let index_file = &mut index_file;
|
||||
let checksums_file = &mut checksums_file;
|
||||
|
||||
// Only for era1, we download also checksums file
|
||||
if self.era_type == EraFileType::Era1 {
|
||||
let checksums_url = self.url.join(Self::CHECKSUMS)?;
|
||||
try_join!(
|
||||
async move {
|
||||
if let Some(index) = index {
|
||||
io::copy(&mut index.as_ref(), index_file).await?;
|
||||
}
|
||||
Ok::<(), eyre::Error>(())
|
||||
},
|
||||
async move {
|
||||
if let Some(checksums) = checksums {
|
||||
io::copy(&mut checksums.as_ref(), checksums_file).await?;
|
||||
}
|
||||
Ok::<(), eyre::Error>(())
|
||||
},
|
||||
self.download_file_to_path(self.url.clone(), &index_path),
|
||||
self.download_file_to_path(checksums_url, &checksums_path)
|
||||
)?;
|
||||
} else {
|
||||
// Download only index file
|
||||
self.download_file_to_path(self.url.clone(), &index_path).await?;
|
||||
}
|
||||
|
||||
let file = File::open(&index_path).await?;
|
||||
// Parse and extract era filenames from index.html
|
||||
self.extract_era_filenames(&index_path).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extracts ERA filenames from `index.html` and writes them to the index file
|
||||
async fn extract_era_filenames(&self, index_path: &Path) -> eyre::Result<()> {
|
||||
let file = File::open(index_path).await?;
|
||||
let reader = io::BufReader::new(file);
|
||||
let mut lines = reader.lines();
|
||||
|
||||
@@ -203,21 +202,36 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
let file = File::create(&path).await?;
|
||||
let mut writer = io::BufWriter::new(file);
|
||||
|
||||
let ext = self.era_type.extension();
|
||||
let ext_len = ext.len();
|
||||
|
||||
while let Some(line) = lines.next_line().await? {
|
||||
if let Some(j) = line.find(".era1") &&
|
||||
if let Some(j) = line.find(ext) &&
|
||||
let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-')
|
||||
{
|
||||
let era = &line[i + 1..j + 5];
|
||||
let era = &line[i + 1..j + ext_len];
|
||||
writer.write_all(era.as_bytes()).await?;
|
||||
writer.write_all(b"\n").await?;
|
||||
}
|
||||
}
|
||||
|
||||
writer.flush().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper to download a file to a specified path
|
||||
async fn download_file_to_path(&self, url: Url, path: &Path) -> eyre::Result<()> {
|
||||
let mut stream = self.client.get(url).await?;
|
||||
let mut file = File::create(path).await?;
|
||||
|
||||
while let Some(item) = stream.next().await.transpose()? {
|
||||
io::copy(&mut item.as_ref(), &mut file).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns ERA1 file name that is ordered at `number`.
|
||||
/// Returns ERA1/ERA file name that is ordered at `number`.
|
||||
pub async fn number_to_file_name(&self, number: usize) -> eyre::Result<Option<String>> {
|
||||
let path = self.folder.to_path_buf().join("index");
|
||||
let file = File::open(&path).await?;
|
||||
@@ -235,18 +249,23 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
||||
|
||||
match File::open(path).await {
|
||||
Ok(file) => {
|
||||
let number = self
|
||||
.file_name_to_number(name)
|
||||
.ok_or_else(|| eyre!("Cannot parse ERA number from {name}"))?;
|
||||
if self.era_type == EraFileType::Era1 {
|
||||
let number = self
|
||||
.file_name_to_number(name)
|
||||
.ok_or_else(|| eyre!("Cannot parse ERA number from {name}"))?;
|
||||
|
||||
let actual_checksum = checksum(file).await?;
|
||||
let is_verified = self.verify_checksum(number, actual_checksum).await?;
|
||||
let actual_checksum = checksum(file).await?;
|
||||
let is_verified = self.verify_checksum(number, actual_checksum).await?;
|
||||
|
||||
if !is_verified {
|
||||
fs::remove_file(path).await?;
|
||||
if !is_verified {
|
||||
fs::remove_file(path).await?;
|
||||
}
|
||||
|
||||
Ok(is_verified)
|
||||
} else {
|
||||
// For era files, we skip checksum verification, as checksum.txt does not exist
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
Ok(is_verified)
|
||||
}
|
||||
Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(false),
|
||||
Err(e) => Err(e)?,
|
||||
|
||||
@@ -12,6 +12,8 @@ pub fn read_dir(
|
||||
start_from: BlockNumber,
|
||||
) -> eyre::Result<impl Stream<Item = eyre::Result<EraLocalMeta>> + Send + Sync + 'static + Unpin> {
|
||||
let mut checksums = None;
|
||||
|
||||
// read all the files in the given dir and also read the checksums file
|
||||
let mut entries = fs::read_dir(dir)?
|
||||
.filter_map(|entry| {
|
||||
(|| {
|
||||
@@ -29,6 +31,7 @@ pub fn read_dir(
|
||||
return Ok(Some((number, path.into_boxed_path())));
|
||||
}
|
||||
}
|
||||
|
||||
if path.file_name() == Some("checksums.txt".as_ref()) {
|
||||
let file = fs::open(path)?;
|
||||
let reader = io::BufReader::new(file);
|
||||
@@ -43,9 +46,15 @@ pub fn read_dir(
|
||||
.collect::<eyre::Result<Vec<_>>>()?;
|
||||
let mut checksums = checksums.ok_or_eyre("Missing file `checksums.txt` in the `dir`")?;
|
||||
|
||||
let start_index = start_from as usize / BLOCKS_PER_FILE;
|
||||
for _ in 0..start_index {
|
||||
// skip the first entries in the checksums iterator so that both iters align
|
||||
checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?;
|
||||
}
|
||||
|
||||
entries.sort_by(|(left, _), (right, _)| left.cmp(right));
|
||||
|
||||
Ok(stream::iter(entries.into_iter().skip(start_from as usize / BLOCKS_PER_FILE).map(
|
||||
Ok(stream::iter(entries.into_iter().skip_while(move |(n, _)| *n < start_index).map(
|
||||
move |(_, path)| {
|
||||
let expected_checksum =
|
||||
checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?;
|
||||
|
||||
@@ -61,21 +61,21 @@ impl HttpClient for FailingClient {
|
||||
let url = url.into_url().unwrap();
|
||||
|
||||
Ok(futures::stream::iter(vec![Ok(match url.as_str() {
|
||||
"https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::NIMBUS),
|
||||
"https://era1.ethportal.net/" => Bytes::from_static(crate::ETH_PORTAL),
|
||||
"https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ITHACA),
|
||||
"https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::ERA1_NIMBUS),
|
||||
"https://era1.ethportal.net/" => Bytes::from_static(crate::ERA1_ETH_PORTAL),
|
||||
"https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ERA1_ITHACA),
|
||||
"https://mainnet.era1.nimbus.team/checksums.txt" |
|
||||
"https://era1.ethportal.net/checksums.txt" |
|
||||
"https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS),
|
||||
"https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" |
|
||||
"https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" |
|
||||
"https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => {
|
||||
Bytes::from_static(crate::MAINNET_0)
|
||||
Bytes::from_static(crate::ERA1_MAINNET_0)
|
||||
}
|
||||
"https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" |
|
||||
"https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" |
|
||||
"https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => {
|
||||
Bytes::from_static(crate::MAINNET_1)
|
||||
Bytes::from_static(crate::ERA1_MAINNET_1)
|
||||
}
|
||||
v => unimplemented!("Unexpected URL \"{v}\""),
|
||||
})]))
|
||||
|
||||
@@ -10,7 +10,7 @@ use test_case::test_case;
|
||||
#[test_case("https://era1.ethportal.net/"; "ethportal")]
|
||||
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
|
||||
#[tokio::test]
|
||||
async fn test_getting_file_url_after_fetching_file_list(url: &str) {
|
||||
async fn test_getting_era1_file_url_after_fetching_file_list(url: &str) {
|
||||
let base_url = Url::from_str(url).unwrap();
|
||||
let folder = tempdir().unwrap();
|
||||
let folder = folder.path();
|
||||
@@ -48,3 +48,19 @@ async fn test_getting_file_after_fetching_file_list(url: &str) {
|
||||
let actual_count = client.files_count().await;
|
||||
assert_eq!(actual_count, expected_count);
|
||||
}
|
||||
|
||||
#[test_case("https://mainnet.era.nimbus.team/"; "nimbus")]
|
||||
#[tokio::test]
|
||||
async fn test_getting_era_file_url_after_fetching_file_list(url: &str) {
|
||||
let base_url = Url::from_str(url).unwrap();
|
||||
let folder = tempdir().unwrap();
|
||||
let folder = folder.path();
|
||||
let client = EraClient::new(StubClient, base_url.clone(), folder);
|
||||
|
||||
client.fetch_file_list().await.unwrap();
|
||||
|
||||
let expected_url = Some(base_url.join("mainnet-00000-4b363db9.era").unwrap());
|
||||
let actual_url = client.url(0).await.unwrap();
|
||||
|
||||
assert_eq!(actual_url, expected_url);
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use test_case::test_case;
|
||||
#[test_case("https://era1.ethportal.net/"; "ethportal")]
|
||||
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
|
||||
#[tokio::test]
|
||||
async fn test_getting_file_name_after_fetching_file_list(url: &str) {
|
||||
async fn test_getting_era1_file_name_after_fetching_file_list(url: &str) {
|
||||
let url = Url::from_str(url).unwrap();
|
||||
let folder = tempdir().unwrap();
|
||||
let folder = folder.path();
|
||||
@@ -23,3 +23,19 @@ async fn test_getting_file_name_after_fetching_file_list(url: &str) {
|
||||
|
||||
assert_eq!(actual, expected);
|
||||
}
|
||||
|
||||
#[test_case("https://mainnet.era.nimbus.team/"; "nimbus")]
|
||||
#[tokio::test]
|
||||
async fn test_getting_era_file_name_after_fetching_file_list(url: &str) {
|
||||
let url = Url::from_str(url).unwrap();
|
||||
let folder = tempdir().unwrap();
|
||||
let folder = folder.path();
|
||||
let client = EraClient::new(StubClient, url, folder);
|
||||
|
||||
client.fetch_file_list().await.unwrap();
|
||||
|
||||
let actual = client.number_to_file_name(500).await.unwrap();
|
||||
let expected = Some("mainnet-00500-87109713.era".to_owned());
|
||||
|
||||
assert_eq!(actual, expected);
|
||||
}
|
||||
|
||||
@@ -13,12 +13,20 @@ use futures::Stream;
|
||||
use reqwest::IntoUrl;
|
||||
use reth_era_downloader::HttpClient;
|
||||
|
||||
pub(crate) const NIMBUS: &[u8] = include_bytes!("../res/nimbus.html");
|
||||
pub(crate) const ETH_PORTAL: &[u8] = include_bytes!("../res/ethportal.html");
|
||||
pub(crate) const ITHACA: &[u8] = include_bytes!("../res/ithaca.html");
|
||||
pub(crate) const CHECKSUMS: &[u8] = include_bytes!("../res/checksums.txt");
|
||||
pub(crate) const MAINNET_0: &[u8] = include_bytes!("../res/mainnet-00000-5ec1ffb8.era1");
|
||||
pub(crate) const MAINNET_1: &[u8] = include_bytes!("../res/mainnet-00001-a5364e9a.era1");
|
||||
pub(crate) const ERA1_NIMBUS: &[u8] = include_bytes!("../res/era1-nimbus.html");
|
||||
pub(crate) const ERA1_ETH_PORTAL: &[u8] = include_bytes!("../res/ethportal.html");
|
||||
pub(crate) const ERA1_ITHACA: &[u8] = include_bytes!("../res/era1-ithaca.html");
|
||||
pub(crate) const ERA1_CHECKSUMS: &[u8] = include_bytes!("../res/checksums.txt");
|
||||
pub(crate) const ERA1_MAINNET_0: &[u8] =
|
||||
include_bytes!("../res/era1-files/mainnet-00000-5ec1ffb8.era1");
|
||||
pub(crate) const ERA1_MAINNET_1: &[u8] =
|
||||
include_bytes!("../res/era1-files/mainnet-00001-a5364e9a.era1");
|
||||
|
||||
pub(crate) const ERA_NIMBUS: &[u8] = include_bytes!("../res/era-nimbus.html");
|
||||
pub(crate) const ERA_MAINNET_0: &[u8] =
|
||||
include_bytes!("../res/era-files/mainnet-00000-4b363db9.era");
|
||||
pub(crate) const ERA_MAINNET_1: &[u8] =
|
||||
include_bytes!("../res/era-files/mainnet-00001-40cf2f3c.era");
|
||||
|
||||
/// An HTTP client pre-programmed with canned answers to received calls.
|
||||
/// Panics if it receives an unknown call.
|
||||
@@ -33,22 +41,32 @@ impl HttpClient for StubClient {
|
||||
let url = url.into_url().unwrap();
|
||||
|
||||
Ok(futures::stream::iter(vec![Ok(match url.as_str() {
|
||||
"https://mainnet.era1.nimbus.team/" => Bytes::from_static(NIMBUS),
|
||||
"https://era1.ethportal.net/" => Bytes::from_static(ETH_PORTAL),
|
||||
"https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ITHACA),
|
||||
// Era1 urls
|
||||
"https://mainnet.era1.nimbus.team/" => Bytes::from_static(ERA1_NIMBUS),
|
||||
"https://era1.ethportal.net/" => Bytes::from_static(ERA1_ETH_PORTAL),
|
||||
"https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ERA1_ITHACA),
|
||||
"https://mainnet.era1.nimbus.team/checksums.txt" |
|
||||
"https://era1.ethportal.net/checksums.txt" |
|
||||
"https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS),
|
||||
"https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(ERA1_CHECKSUMS),
|
||||
"https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" |
|
||||
"https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" |
|
||||
"https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => {
|
||||
Bytes::from_static(MAINNET_0)
|
||||
Bytes::from_static(ERA1_MAINNET_0)
|
||||
}
|
||||
"https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" |
|
||||
"https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" |
|
||||
"https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => {
|
||||
Bytes::from_static(MAINNET_1)
|
||||
Bytes::from_static(ERA1_MAINNET_1)
|
||||
}
|
||||
// Era urls
|
||||
"https://mainnet.era.nimbus.team/" => Bytes::from_static(ERA_NIMBUS),
|
||||
"https://mainnet.era.nimbus.team/mainnet-00000-4b363db9.era" => {
|
||||
Bytes::from_static(ERA_MAINNET_0)
|
||||
}
|
||||
"https://mainnet.era.nimbus.team/mainnet-00001-40cf2f3c.era" => {
|
||||
Bytes::from_static(ERA_MAINNET_1)
|
||||
}
|
||||
|
||||
v => unimplemented!("Unexpected URL \"{v}\""),
|
||||
})]))
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ async fn test_streaming_files_after_fetching_file_list(url: &str) {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_streaming_files_after_fetching_file_list_into_missing_folder_fails() {
|
||||
async fn test_streaming_era1_files_after_fetching_file_list_into_missing_folder_fails() {
|
||||
let base_url = Url::from_str("https://era.ithaca.xyz/era1/index.html").unwrap();
|
||||
let folder = tempdir().unwrap().path().to_owned();
|
||||
let client = EraClient::new(StubClient, base_url, folder);
|
||||
@@ -49,3 +49,20 @@ async fn test_streaming_files_after_fetching_file_list_into_missing_folder_fails
|
||||
|
||||
assert_eq!(actual_error, expected_error);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_streaming_era_files_after_fetching_file_list_into_missing_folder_fails() {
|
||||
let base_url = Url::from_str("https://mainnet.era.nimbus.team").unwrap(); //TODO: change once ithaca host era files
|
||||
let folder = tempdir().unwrap().path().to_owned();
|
||||
let client = EraClient::new(StubClient, base_url, folder);
|
||||
|
||||
let mut stream = EraStream::new(
|
||||
client,
|
||||
EraStreamConfig::default().with_max_files(2).with_max_concurrent_downloads(1),
|
||||
);
|
||||
|
||||
let actual_error = stream.next().await.unwrap().unwrap_err().to_string();
|
||||
let expected_error = "No such file or directory (os error 2)".to_owned();
|
||||
|
||||
assert_eq!(actual_error, expected_error);
|
||||
}
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
c
|
||||
@@ -0,0 +1 @@
|
||||
d
|
||||
1593
crates/era-downloader/tests/res/era-nimbus.html
Normal file
1593
crates/era-downloader/tests/res/era-nimbus.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -122,3 +122,45 @@ impl<T: StreamWriter<File>> FileWriter for T {
|
||||
Self::create(path, file)
|
||||
}
|
||||
}
|
||||
|
||||
/// Era file type identifier
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum EraFileType {
|
||||
/// Consensus layer ERA file, `.era`
|
||||
/// Contains beacon blocks and states
|
||||
Era,
|
||||
/// Execution layer ERA1 file, `.era1`
|
||||
/// Contains execution blocks pre-merge
|
||||
Era1,
|
||||
}
|
||||
|
||||
impl EraFileType {
|
||||
/// Get the file extension for this type, dot included
|
||||
pub const fn extension(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Era => ".era",
|
||||
Self::Era1 => ".era1",
|
||||
}
|
||||
}
|
||||
|
||||
/// Detect file type from a filename
|
||||
pub fn from_filename(filename: &str) -> Option<Self> {
|
||||
if filename.ends_with(".era") {
|
||||
Some(Self::Era)
|
||||
} else if filename.ends_with(".era1") {
|
||||
Some(Self::Era1)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Detect file type from URL
|
||||
/// By default, it assumes `Era` type
|
||||
pub fn from_url(url: &str) -> Self {
|
||||
if url.contains("era1") {
|
||||
Self::Era1
|
||||
} else {
|
||||
Self::Era
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,22 +53,18 @@
|
||||
//! ## [`CompressedReceipts`]
|
||||
//!
|
||||
//! ```rust
|
||||
//! use alloy_consensus::ReceiptWithBloom;
|
||||
//! use alloy_consensus::{Eip658Value, Receipt, ReceiptEnvelope, ReceiptWithBloom};
|
||||
//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedReceipts};
|
||||
//! use reth_ethereum_primitives::{Receipt, TxType};
|
||||
//!
|
||||
//! let receipt = Receipt {
|
||||
//! tx_type: TxType::Legacy,
|
||||
//! success: true,
|
||||
//! cumulative_gas_used: 21000,
|
||||
//! logs: vec![],
|
||||
//! };
|
||||
//! let receipt_with_bloom = ReceiptWithBloom { receipt, logs_bloom: Default::default() };
|
||||
//! let receipt =
|
||||
//! Receipt { status: Eip658Value::Eip658(true), cumulative_gas_used: 21000, logs: vec![] };
|
||||
//! let receipt_with_bloom = ReceiptWithBloom::new(receipt, Default::default());
|
||||
//! let enveloped_receipt = ReceiptEnvelope::Legacy(receipt_with_bloom);
|
||||
//! // Compress the receipt: rlp encoding and snappy compression
|
||||
//! let compressed_receipt_data = CompressedReceipts::from_encodable(&receipt_with_bloom)?;
|
||||
//! let compressed_receipt_data = CompressedReceipts::from_encodable(&enveloped_receipt)?;
|
||||
//! // Get raw receipt by decoding and decompressing compressed and encoded receipt
|
||||
//! let decompressed_receipt = compressed_receipt_data.decode::<ReceiptWithBloom>()?;
|
||||
//! assert_eq!(decompressed_receipt.receipt.cumulative_gas_used, 21000);
|
||||
//! let decompressed_receipt = compressed_receipt_data.decode::<ReceiptEnvelope>()?;
|
||||
//! assert_eq!(decompressed_receipt.cumulative_gas_used(), 21000);
|
||||
//! # Ok::<(), reth_era::e2s::error::E2sError>(())
|
||||
//! ``````
|
||||
|
||||
@@ -703,8 +699,8 @@ mod tests {
|
||||
.expect("Failed to compress receipt list");
|
||||
|
||||
// Decode the compressed receipts back
|
||||
// Note: most likely the decoding for real era files will be done to reach
|
||||
// `Vec<ReceiptWithBloom>``
|
||||
// Note: For real ERA1 files, use `Vec<ReceiptWithBloom>` before Era ~1520 or use
|
||||
// `Vec<ReceiptEnvelope>` after this era
|
||||
let decoded_receipts: Vec<Receipt> =
|
||||
compressed_receipts.decode().expect("Failed to decode compressed receipt list");
|
||||
|
||||
|
||||
@@ -14,138 +14,146 @@ use reth_era::{
|
||||
use reth_ethereum_primitives::TransactionSigned;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::{open_test_file, Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, MAINNET};
|
||||
use crate::{Era1TestDownloader, MAINNET};
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[ignore = "download intensive"]
|
||||
async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Result<()> {
|
||||
let downloader = Era1TestDownloader::new().await.expect("Failed to create downloader");
|
||||
// Helper function to test decompression and decoding for a specific file
|
||||
async fn test_file_decompression(
|
||||
downloader: &Era1TestDownloader,
|
||||
filename: &str,
|
||||
) -> eyre::Result<()> {
|
||||
println!("\nTesting file: {filename}");
|
||||
let file = downloader.open_era1_file(filename, MAINNET).await?;
|
||||
|
||||
for &filename in &ERA1_MAINNET_FILES_NAMES {
|
||||
println!("\nTesting file: {filename}");
|
||||
let file = open_test_file(filename, &downloader, MAINNET).await?;
|
||||
// Test block decompression across different positions in the file
|
||||
let test_block_indices = [
|
||||
0, // First block
|
||||
file.group.blocks.len() / 2, // Middle block
|
||||
file.group.blocks.len() - 1, // Last block
|
||||
];
|
||||
|
||||
// Test block decompression across different positions in the file
|
||||
let test_block_indices = [
|
||||
0, // First block
|
||||
file.group.blocks.len() / 2, // Middle block
|
||||
file.group.blocks.len() - 1, // Last block
|
||||
];
|
||||
for &block_idx in &test_block_indices {
|
||||
let block = &file.group.blocks[block_idx];
|
||||
let block_number = file.group.block_index.starting_number() + block_idx as u64;
|
||||
|
||||
for &block_idx in &test_block_indices {
|
||||
let block = &file.group.blocks[block_idx];
|
||||
let block_number = file.group.block_index.starting_number() + block_idx as u64;
|
||||
println!(
|
||||
"\n Testing block {}, compressed body size: {} bytes",
|
||||
block_number,
|
||||
block.body.data.len()
|
||||
);
|
||||
|
||||
println!(
|
||||
"\n Testing block {}, compressed body size: {} bytes",
|
||||
block_number,
|
||||
block.body.data.len()
|
||||
);
|
||||
// Test header decompression and decoding
|
||||
let header_data = block.header.decompress()?;
|
||||
assert!(
|
||||
!header_data.is_empty(),
|
||||
"Block {block_number} header decompression should produce non-empty data"
|
||||
);
|
||||
|
||||
// Test header decompression and decoding
|
||||
let header_data = block.header.decompress()?;
|
||||
assert!(
|
||||
!header_data.is_empty(),
|
||||
"Block {block_number} header decompression should produce non-empty data"
|
||||
);
|
||||
let header = block.header.decode_header()?;
|
||||
assert_eq!(header.number, block_number, "Decoded header should have correct block number");
|
||||
println!("Header decompression and decoding successful");
|
||||
|
||||
let header = block.header.decode_header()?;
|
||||
assert_eq!(
|
||||
header.number, block_number,
|
||||
"Decoded header should have correct block number"
|
||||
);
|
||||
println!("Header decompression and decoding successful");
|
||||
// Test body decompression
|
||||
let body_data = block.body.decompress()?;
|
||||
assert!(
|
||||
!body_data.is_empty(),
|
||||
"Block {block_number} body decompression should produce non-empty data"
|
||||
);
|
||||
println!("Body decompression successful ({} bytes)", body_data.len());
|
||||
|
||||
// Test body decompression
|
||||
let body_data = block.body.decompress()?;
|
||||
assert!(
|
||||
!body_data.is_empty(),
|
||||
"Block {block_number} body decompression should produce non-empty data"
|
||||
);
|
||||
println!("Body decompression successful ({} bytes)", body_data.len());
|
||||
|
||||
let decoded_body: BlockBody<TransactionSigned> =
|
||||
CompressedBody::decode_body_from_decompressed::<TransactionSigned, Header>(
|
||||
&body_data,
|
||||
)
|
||||
let decoded_body: BlockBody<TransactionSigned> =
|
||||
CompressedBody::decode_body_from_decompressed::<TransactionSigned, Header>(&body_data)
|
||||
.expect("Failed to decode body");
|
||||
|
||||
println!(
|
||||
"Body decoding successful: {} transactions, {} ommers, withdrawals: {}",
|
||||
decoded_body.transactions.len(),
|
||||
decoded_body.ommers.len(),
|
||||
decoded_body.withdrawals.is_some()
|
||||
);
|
||||
println!(
|
||||
"Body decoding successful: {} transactions, {} ommers, withdrawals: {}",
|
||||
decoded_body.transactions.len(),
|
||||
decoded_body.ommers.len(),
|
||||
decoded_body.withdrawals.is_some()
|
||||
);
|
||||
|
||||
// Test receipts decompression
|
||||
let receipts_data = block.receipts.decompress()?;
|
||||
assert!(
|
||||
!receipts_data.is_empty(),
|
||||
"Block {block_number} receipts decompression should produce non-empty data"
|
||||
);
|
||||
println!("Receipts decompression successful ({} bytes)", receipts_data.len());
|
||||
// Test receipts decompression
|
||||
let receipts_data = block.receipts.decompress()?;
|
||||
assert!(
|
||||
!receipts_data.is_empty(),
|
||||
"Block {block_number} receipts decompression should produce non-empty data"
|
||||
);
|
||||
println!("Receipts decompression successful ({} bytes)", receipts_data.len());
|
||||
|
||||
assert!(
|
||||
block.total_difficulty.value > U256::ZERO,
|
||||
"Block {block_number} should have non-zero difficulty"
|
||||
);
|
||||
println!("Total difficulty verified: {}", block.total_difficulty.value);
|
||||
}
|
||||
assert!(
|
||||
block.total_difficulty.value > U256::ZERO,
|
||||
"Block {block_number} should have non-zero difficulty"
|
||||
);
|
||||
println!("Total difficulty verified: {}", block.total_difficulty.value);
|
||||
}
|
||||
|
||||
// Test round-trip serialization
|
||||
println!("\n Testing data preservation roundtrip...");
|
||||
let mut buffer = Vec::new();
|
||||
{
|
||||
let mut writer = Era1Writer::new(&mut buffer);
|
||||
writer.write_file(&file)?;
|
||||
}
|
||||
// Test round-trip serialization
|
||||
println!("\n Testing data preservation roundtrip...");
|
||||
let mut buffer = Vec::new();
|
||||
{
|
||||
let mut writer = Era1Writer::new(&mut buffer);
|
||||
writer.write_file(&file)?;
|
||||
}
|
||||
|
||||
// Read back from buffer
|
||||
let reader = Era1Reader::new(Cursor::new(&buffer));
|
||||
let read_back_file = reader.read(file.id.network_name.clone())?;
|
||||
// Read back from buffer
|
||||
let reader = Era1Reader::new(Cursor::new(&buffer));
|
||||
let read_back_file = reader.read(file.id.network_name.clone())?;
|
||||
|
||||
// Verify basic properties are preserved
|
||||
assert_eq!(file.id.network_name, read_back_file.id.network_name);
|
||||
assert_eq!(file.id.start_block, read_back_file.id.start_block);
|
||||
assert_eq!(file.group.blocks.len(), read_back_file.group.blocks.len());
|
||||
assert_eq!(file.group.accumulator.root, read_back_file.group.accumulator.root);
|
||||
// Verify basic properties are preserved
|
||||
assert_eq!(file.id.network_name, read_back_file.id.network_name);
|
||||
assert_eq!(file.id.start_block, read_back_file.id.start_block);
|
||||
assert_eq!(file.group.blocks.len(), read_back_file.group.blocks.len());
|
||||
assert_eq!(file.group.accumulator.root, read_back_file.group.accumulator.root);
|
||||
|
||||
// Test data preservation for some blocks
|
||||
for &idx in &test_block_indices {
|
||||
let original_block = &file.group.blocks[idx];
|
||||
let read_back_block = &read_back_file.group.blocks[idx];
|
||||
let block_number = file.group.block_index.starting_number() + idx as u64;
|
||||
// Test data preservation for some blocks
|
||||
for &idx in &test_block_indices {
|
||||
let original_block = &file.group.blocks[idx];
|
||||
let read_back_block = &read_back_file.group.blocks[idx];
|
||||
let block_number = file.group.block_index.starting_number() + idx as u64;
|
||||
|
||||
println!("Block {block_number} details:");
|
||||
println!(" Header size: {} bytes", original_block.header.data.len());
|
||||
println!(" Body size: {} bytes", original_block.body.data.len());
|
||||
println!(" Receipts size: {} bytes", original_block.receipts.data.len());
|
||||
println!("Block {block_number} details:");
|
||||
println!(" Header size: {} bytes", original_block.header.data.len());
|
||||
println!(" Body size: {} bytes", original_block.body.data.len());
|
||||
println!(" Receipts size: {} bytes", original_block.receipts.data.len());
|
||||
|
||||
// Test that decompressed data is identical
|
||||
assert_eq!(
|
||||
original_block.header.decompress()?,
|
||||
read_back_block.header.decompress()?,
|
||||
"Header data should be identical for block {block_number}"
|
||||
);
|
||||
// Test that decompressed data is identical
|
||||
assert_eq!(
|
||||
original_block.header.decompress()?,
|
||||
read_back_block.header.decompress()?,
|
||||
"Header data should be identical for block {block_number}"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
original_block.body.decompress()?,
|
||||
read_back_block.body.decompress()?,
|
||||
"Body data should be identical for block {block_number}"
|
||||
);
|
||||
assert_eq!(
|
||||
original_block.body.decompress()?,
|
||||
read_back_block.body.decompress()?,
|
||||
"Body data should be identical for block {block_number}"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
original_block.receipts.decompress()?,
|
||||
read_back_block.receipts.decompress()?,
|
||||
"Receipts data should be identical for block {block_number}"
|
||||
);
|
||||
assert_eq!(
|
||||
original_block.receipts.decompress()?,
|
||||
read_back_block.receipts.decompress()?,
|
||||
"Receipts data should be identical for block {block_number}"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
original_block.total_difficulty.value, read_back_block.total_difficulty.value,
|
||||
"Total difficulty should be identical for block {block_number}"
|
||||
);
|
||||
}
|
||||
assert_eq!(
|
||||
original_block.total_difficulty.value, read_back_block.total_difficulty.value,
|
||||
"Total difficulty should be identical for block {block_number}"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test_case::test_case("mainnet-00000-5ec1ffb8.era1"; "era_dd_mainnet_0")]
|
||||
#[test_case::test_case("mainnet-00003-d8b8a40b.era1"; "era_dd_mainnet_3")]
|
||||
#[test_case::test_case("mainnet-00151-e322efe1.era1"; "era_dd_mainnet_151")]
|
||||
#[test_case::test_case("mainnet-00293-0d6c5812.era1"; "era_dd_mainnet_293")]
|
||||
#[test_case::test_case("mainnet-00443-ea71b6f9.era1"; "era_dd_mainnet_443")]
|
||||
#[test_case::test_case("mainnet-01367-d7efc68f.era1"; "era_dd_mainnet_1367")]
|
||||
#[test_case::test_case("mainnet-01610-99fdde4b.era1"; "era_dd_mainnet_1610")]
|
||||
#[test_case::test_case("mainnet-01895-3f81607c.era1"; "era_dd_mainnet_1895")]
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[ignore = "download intensive"]
|
||||
async fn test_mainnet_era1_file_decompression_and_decoding(filename: &str) -> eyre::Result<()> {
|
||||
let downloader = Era1TestDownloader::new().await?;
|
||||
test_file_decompression(&downloader, filename).await
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ use reth_era::{
|
||||
use reth_era_downloader::EraClient;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
@@ -63,7 +63,7 @@ const ERA1_SEPOLIA_FILES_NAMES: [&str; 4] = [
|
||||
"sepolia-00000-643a00f7.era1",
|
||||
"sepolia-00074-0e81003c.era1",
|
||||
"sepolia-00173-b6924da5.era1",
|
||||
"sepolia-00182-a4f0a8a1.era1 ",
|
||||
"sepolia-00182-a4f0a8a1.era1",
|
||||
];
|
||||
|
||||
/// Utility for downloading `.era1` files for tests
|
||||
@@ -157,18 +157,3 @@ impl Era1TestDownloader {
|
||||
Era1Reader::open(&path, network).map_err(|e| eyre!("Failed to open Era1 file: {e}"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Open a test file by name,
|
||||
/// downloading only if it is necessary
|
||||
async fn open_test_file(
|
||||
file_path: &str,
|
||||
downloader: &Era1TestDownloader,
|
||||
network: &str,
|
||||
) -> Result<Era1File> {
|
||||
let filename = Path::new(file_path)
|
||||
.file_name()
|
||||
.and_then(|os_str| os_str.to_str())
|
||||
.ok_or_else(|| eyre!("Invalid file path: {}", file_path))?;
|
||||
|
||||
downloader.open_era1_file(filename, network).await
|
||||
}
|
||||
|
||||
@@ -7,8 +7,7 @@
|
||||
//! - Writing the data back to a new file
|
||||
//! - Confirming that all original data is preserved throughout the process
|
||||
|
||||
use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom};
|
||||
use rand::{prelude::IndexedRandom, rng};
|
||||
use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptEnvelope};
|
||||
use reth_era::{
|
||||
common::file_ops::{EraFileFormat, StreamReader, StreamWriter},
|
||||
e2s::types::IndexEntry,
|
||||
@@ -25,9 +24,7 @@ use reth_era::{
|
||||
use reth_ethereum_primitives::TransactionSigned;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::{
|
||||
Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA,
|
||||
};
|
||||
use crate::{Era1TestDownloader, MAINNET, SEPOLIA};
|
||||
|
||||
// Helper function to test roundtrip compression/encoding for a specific file
|
||||
async fn test_file_roundtrip(
|
||||
@@ -152,10 +149,9 @@ async fn test_file_roundtrip(
|
||||
);
|
||||
|
||||
// Decode receipts
|
||||
let original_receipts_decoded =
|
||||
original_block.receipts.decode::<Vec<ReceiptWithBloom>>()?;
|
||||
let original_receipts_decoded = original_block.receipts.decode::<Vec<ReceiptEnvelope>>()?;
|
||||
let roundtrip_receipts_decoded =
|
||||
roundtrip_block.receipts.decode::<Vec<ReceiptWithBloom>>()?;
|
||||
roundtrip_block.receipts.decode::<Vec<ReceiptEnvelope>>()?;
|
||||
|
||||
assert_eq!(
|
||||
original_receipts_decoded, roundtrip_receipts_decoded,
|
||||
@@ -256,35 +252,27 @@ async fn test_file_roundtrip(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test_case::test_case("mainnet-00000-5ec1ffb8.era1"; "era_mainnet_0")]
|
||||
#[test_case::test_case("mainnet-00151-e322efe1.era1"; "era_mainnet_151")]
|
||||
#[test_case::test_case("mainnet-01367-d7efc68f.era1"; "era_mainnet_1367")]
|
||||
#[test_case::test_case("mainnet-01895-3f81607c.era1"; "era_mainnet_1895")]
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[ignore = "download intensive"]
|
||||
async fn test_roundtrip_compression_encoding_mainnet() -> eyre::Result<()> {
|
||||
async fn test_roundtrip_compression_encoding_mainnet(filename: &str) -> eyre::Result<()> {
|
||||
let downloader = Era1TestDownloader::new().await?;
|
||||
test_file_roundtrip(&downloader, filename, MAINNET).await
|
||||
}
|
||||
|
||||
#[test_case::test_case("sepolia-00000-643a00f7.era1"; "era_sepolia_0")]
|
||||
#[test_case::test_case("sepolia-00074-0e81003c.era1"; "era_sepolia_74")]
|
||||
#[test_case::test_case("sepolia-00173-b6924da5.era1"; "era_sepolia_173")]
|
||||
#[test_case::test_case("sepolia-00182-a4f0a8a1.era1"; "era_sepolia_182")]
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[ignore = "download intensive"]
|
||||
async fn test_roundtrip_compression_encoding_sepolia(filename: &str) -> eyre::Result<()> {
|
||||
let downloader = Era1TestDownloader::new().await?;
|
||||
|
||||
let mut rng = rng();
|
||||
|
||||
// pick 4 random files from the mainnet list
|
||||
let sample_files: Vec<&str> =
|
||||
ERA1_MAINNET_FILES_NAMES.choose_multiple(&mut rng, 4).copied().collect();
|
||||
|
||||
println!("Testing {} randomly selected mainnet files", sample_files.len());
|
||||
|
||||
for &filename in &sample_files {
|
||||
test_file_roundtrip(&downloader, filename, MAINNET).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[ignore = "download intensive"]
|
||||
async fn test_roundtrip_compression_encoding_sepolia() -> eyre::Result<()> {
|
||||
let downloader = Era1TestDownloader::new().await?;
|
||||
|
||||
// Test all Sepolia files
|
||||
for &filename in &ERA1_SEPOLIA_FILES_NAMES {
|
||||
test_file_roundtrip(&downloader, filename, SEPOLIA).await?;
|
||||
}
|
||||
test_file_roundtrip(&downloader, filename, SEPOLIA).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -10,13 +10,11 @@ use reth_cli_runner::CliRunner;
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_node_api::NodePrimitives;
|
||||
use reth_node_builder::{NodeBuilder, WithLaunchContext};
|
||||
use reth_node_core::args::OtlpInitStatus;
|
||||
use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNode};
|
||||
use reth_node_metrics::recorder::install_prometheus_recorder;
|
||||
use reth_rpc_server_types::RpcModuleValidator;
|
||||
use reth_tracing::{FileWorkerGuard, Layers};
|
||||
use std::{fmt, sync::Arc};
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// A wrapper around a parsed CLI that handles command execution.
|
||||
#[derive(Debug)]
|
||||
@@ -107,26 +105,12 @@ where
|
||||
|
||||
/// Initializes tracing with the configured options.
|
||||
///
|
||||
/// If file logging is enabled, this function stores guard to the struct.
|
||||
/// For gRPC OTLP, it requires tokio runtime context.
|
||||
/// See [`Cli::init_tracing`] for more information.
|
||||
pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> {
|
||||
if self.guard.is_none() {
|
||||
let mut layers = self.layers.take().unwrap_or_default();
|
||||
|
||||
let otlp_status = runner.block_on(self.cli.traces.init_otlp_tracing(&mut layers))?;
|
||||
|
||||
self.guard = self.cli.logs.init_tracing_with_layers(layers)?;
|
||||
info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory);
|
||||
match otlp_status {
|
||||
OtlpInitStatus::Started(endpoint) => {
|
||||
info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.cli.traces.protocol);
|
||||
}
|
||||
OtlpInitStatus::NoFeature => {
|
||||
warn!(target: "reth::cli", "Provided OTLP tracing arguments do not have effect, compile with the `otlp` feature")
|
||||
}
|
||||
OtlpInitStatus::Disabled => {}
|
||||
}
|
||||
self.guard = self.cli.init_tracing(runner, self.layers.take().unwrap_or_default())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,14 +19,14 @@ use reth_db::DatabaseEnv;
|
||||
use reth_node_api::NodePrimitives;
|
||||
use reth_node_builder::{NodeBuilder, WithLaunchContext};
|
||||
use reth_node_core::{
|
||||
args::{LogArgs, TraceArgs},
|
||||
args::{LogArgs, OtlpInitStatus, TraceArgs},
|
||||
version::version_metadata,
|
||||
};
|
||||
use reth_node_metrics::recorder::install_prometheus_recorder;
|
||||
use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator};
|
||||
use reth_tracing::FileWorkerGuard;
|
||||
use reth_tracing::{FileWorkerGuard, Layers};
|
||||
use std::{ffi::OsString, fmt, future::Future, marker::PhantomData, sync::Arc};
|
||||
use tracing::info;
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// The main reth cli interface.
|
||||
///
|
||||
@@ -205,8 +205,7 @@ impl<C: ChainSpecParser, Ext: clap::Args + fmt::Debug, Rpc: RpcModuleValidator>
|
||||
self.logs.log_file_directory =
|
||||
self.logs.log_file_directory.join(chain_spec.chain().to_string());
|
||||
}
|
||||
let _guard = self.init_tracing()?;
|
||||
info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory);
|
||||
let _guard = self.init_tracing(&runner, Layers::new())?;
|
||||
|
||||
// Install the prometheus recorder to be sure to record all metrics
|
||||
let _ = install_prometheus_recorder();
|
||||
@@ -219,11 +218,27 @@ impl<C: ChainSpecParser, Ext: clap::Args + fmt::Debug, Rpc: RpcModuleValidator>
|
||||
///
|
||||
/// If file logging is enabled, this function returns a guard that must be kept alive to ensure
|
||||
/// that all logs are flushed to disk.
|
||||
///
|
||||
/// If an OTLP endpoint is specified, it will export metrics to the configured collector.
|
||||
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
|
||||
let layers = reth_tracing::Layers::new();
|
||||
pub fn init_tracing(
|
||||
&mut self,
|
||||
runner: &CliRunner,
|
||||
mut layers: Layers,
|
||||
) -> eyre::Result<Option<FileWorkerGuard>> {
|
||||
let otlp_status = runner.block_on(self.traces.init_otlp_tracing(&mut layers))?;
|
||||
|
||||
let guard = self.logs.init_tracing_with_layers(layers)?;
|
||||
info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory);
|
||||
match otlp_status {
|
||||
OtlpInitStatus::Started(endpoint) => {
|
||||
info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.traces.protocol);
|
||||
}
|
||||
OtlpInitStatus::NoFeature => {
|
||||
warn!(target: "reth::cli", "Provided OTLP tracing arguments do not have effect, compile with the `otlp` feature")
|
||||
}
|
||||
OtlpInitStatus::Disabled => {}
|
||||
}
|
||||
|
||||
Ok(guard)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
extern crate alloc;
|
||||
|
||||
use alloc::{fmt::Debug, sync::Arc};
|
||||
use alloy_consensus::EMPTY_OMMER_ROOT_HASH;
|
||||
use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH};
|
||||
use alloy_eips::eip7840::BlobParams;
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||
use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator};
|
||||
@@ -38,12 +38,25 @@ pub use validation::validate_block_post_execution;
|
||||
pub struct EthBeaconConsensus<ChainSpec> {
|
||||
/// Configuration
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
/// Maximum allowed extra data size in bytes
|
||||
max_extra_data_size: usize,
|
||||
}
|
||||
|
||||
impl<ChainSpec: EthChainSpec + EthereumHardforks> EthBeaconConsensus<ChainSpec> {
|
||||
/// Create a new instance of [`EthBeaconConsensus`]
|
||||
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self { chain_spec }
|
||||
Self { chain_spec, max_extra_data_size: MAXIMUM_EXTRA_DATA_SIZE }
|
||||
}
|
||||
|
||||
/// Returns the maximum allowed extra data size.
|
||||
pub const fn max_extra_data_size(&self) -> usize {
|
||||
self.max_extra_data_size
|
||||
}
|
||||
|
||||
/// Sets the maximum allowed extra data size and returns the updated instance.
|
||||
pub const fn with_max_extra_data_size(mut self, size: usize) -> Self {
|
||||
self.max_extra_data_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the chain spec associated with this consensus engine.
|
||||
@@ -131,7 +144,7 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
validate_header_extra_data(header)?;
|
||||
validate_header_extra_data(header, self.max_extra_data_size)?;
|
||||
validate_header_gas(header)?;
|
||||
validate_header_base_fee(header, &self.chain_spec)?;
|
||||
|
||||
|
||||
@@ -124,15 +124,17 @@ impl core::fmt::Display for DisplayHardforks {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
format(
|
||||
"Pre-merge hard forks (block based)",
|
||||
&self.pre_merge,
|
||||
self.with_merge.is_empty(),
|
||||
f,
|
||||
)?;
|
||||
if !self.pre_merge.is_empty() {
|
||||
format(
|
||||
"Pre-merge hard forks (block based)",
|
||||
&self.pre_merge,
|
||||
self.with_merge.is_empty(),
|
||||
f,
|
||||
)?;
|
||||
}
|
||||
|
||||
if self.with_merge.is_empty() {
|
||||
if !self.post_merge.is_empty() {
|
||||
if !self.pre_merge.is_empty() && !self.post_merge.is_empty() {
|
||||
// need an extra line here in case we don't have a merge block (optimism)
|
||||
writeln!(f)?;
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ reth-exex.workspace = true
|
||||
reth-node-core.workspace = true
|
||||
reth-e2e-test-utils.workspace = true
|
||||
reth-tasks.workspace = true
|
||||
reth-testing-utils.workspace = true
|
||||
|
||||
alloy-primitives.workspace = true
|
||||
alloy-provider.workspace = true
|
||||
@@ -74,6 +75,9 @@ futures.workspace = true
|
||||
tokio.workspace = true
|
||||
serde_json.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
alloy-rpc-types-trace.workspace = true
|
||||
similar-asserts.workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
@@ -5,6 +5,7 @@ mod dev;
|
||||
mod eth;
|
||||
mod p2p;
|
||||
mod pool;
|
||||
mod prestate;
|
||||
mod rpc;
|
||||
mod utils;
|
||||
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
use crate::utils::{advance_with_random_transactions, eth_payload_attributes};
|
||||
use alloy_consensus::{SignableTransaction, TxEip1559, TxEnvelope};
|
||||
use alloy_eips::Encodable2718;
|
||||
use alloy_network::TxSignerSync;
|
||||
use alloy_provider::{Provider, ProviderBuilder};
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use futures::future::JoinAll;
|
||||
use rand::{rngs::StdRng, seq::IndexedRandom, Rng, SeedableRng};
|
||||
use reth_chainspec::{ChainSpecBuilder, MAINNET};
|
||||
use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext};
|
||||
use reth_e2e_test_utils::{
|
||||
setup, setup_engine, setup_engine_with_connection, transaction::TransactionTestContext,
|
||||
wallet::Wallet,
|
||||
};
|
||||
use reth_node_ethereum::EthereumNode;
|
||||
use std::sync::Arc;
|
||||
use reth_rpc_api::EthApiServer;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_sync() -> eyre::Result<()> {
|
||||
@@ -195,3 +203,94 @@ async fn test_reorg_through_backfill() -> eyre::Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_tx_propagation() -> eyre::Result<()> {
|
||||
reth_tracing::init_test_tracing();
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::default()
|
||||
.chain(MAINNET.chain)
|
||||
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
|
||||
.cancun_activated()
|
||||
.prague_activated()
|
||||
.build(),
|
||||
);
|
||||
|
||||
// Setup wallet
|
||||
let chain_id = chain_spec.chain().into();
|
||||
let wallet = Wallet::new(1).inner;
|
||||
let mut nonce = 0;
|
||||
let mut build_tx = || {
|
||||
let mut tx = TxEip1559 {
|
||||
chain_id,
|
||||
max_priority_fee_per_gas: 1_000_000_000,
|
||||
max_fee_per_gas: 1_000_000_000,
|
||||
gas_limit: 100_000,
|
||||
nonce,
|
||||
..Default::default()
|
||||
};
|
||||
nonce += 1;
|
||||
let signature = wallet.sign_transaction_sync(&mut tx).unwrap();
|
||||
TxEnvelope::Eip1559(tx.into_signed(signature))
|
||||
};
|
||||
|
||||
// Setup 10 nodes
|
||||
let (mut nodes, _tasks, _) = setup_engine_with_connection::<EthereumNode>(
|
||||
10,
|
||||
chain_spec.clone(),
|
||||
false,
|
||||
Default::default(),
|
||||
eth_payload_attributes,
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Connect all nodes to the first one
|
||||
let (first, rest) = nodes.split_at_mut(1);
|
||||
for node in rest {
|
||||
node.connect(&mut first[0]).await;
|
||||
}
|
||||
|
||||
// Advance all nodes for 1 block so that they don't consider themselves unsynced
|
||||
let tx = build_tx();
|
||||
nodes[0].rpc.inject_tx(tx.encoded_2718().into()).await?;
|
||||
let payload = nodes[0].advance_block().await?;
|
||||
nodes[1..]
|
||||
.iter_mut()
|
||||
.map(|node| async {
|
||||
node.submit_payload(payload.clone()).await.unwrap();
|
||||
node.sync_to(payload.block().hash()).await.unwrap();
|
||||
})
|
||||
.collect::<JoinAll<_>>()
|
||||
.await;
|
||||
|
||||
// Build and send transaction to first node
|
||||
let tx = build_tx();
|
||||
let tx_hash = *tx.tx_hash();
|
||||
let _ = nodes[0].rpc.inject_tx(tx.encoded_2718().into()).await?;
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Assert that all nodes have the transaction
|
||||
for (i, node) in nodes.iter().enumerate() {
|
||||
assert!(
|
||||
node.rpc.inner.eth_api().transaction_by_hash(tx_hash).await?.is_some(),
|
||||
"Node {i} should have the transaction"
|
||||
);
|
||||
}
|
||||
|
||||
// Build and send one more transaction to a random node
|
||||
let tx = build_tx();
|
||||
let tx_hash = *tx.tx_hash();
|
||||
let _ = nodes.choose(&mut rand::rng()).unwrap().rpc.inject_tx(tx.encoded_2718().into()).await?;
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Assert that all nodes have the transaction
|
||||
for node in nodes {
|
||||
assert!(node.rpc.inner.eth_api().transaction_by_hash(tx_hash).await?.is_some());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
132
crates/ethereum/node/tests/e2e/prestate.rs
Normal file
132
crates/ethereum/node/tests/e2e/prestate.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use alloy_eips::BlockId;
|
||||
use alloy_genesis::{Genesis, GenesisAccount};
|
||||
use alloy_primitives::address;
|
||||
use alloy_provider::ext::DebugApi;
|
||||
use alloy_rpc_types_eth::{Transaction, TransactionRequest};
|
||||
use alloy_rpc_types_trace::geth::{
|
||||
AccountState, GethDebugTracingOptions, PreStateConfig, PreStateFrame,
|
||||
};
|
||||
use eyre::{eyre, Result};
|
||||
use reth_chainspec::{ChainSpecBuilder, MAINNET};
|
||||
use reth_node_builder::{NodeBuilder, NodeHandle};
|
||||
use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig};
|
||||
use reth_node_ethereum::EthereumNode;
|
||||
use reth_rpc_server_types::RpcModuleSelection;
|
||||
use reth_tasks::TaskManager;
|
||||
use serde::Deserialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
const PRESTATE_SNAPSHOT: &str =
|
||||
include_str!("../../../../../testing/prestate/tx-selfdestruct-prestate.json");
|
||||
|
||||
/// Replays the selfdestruct transaction via `debug_traceCall` and ensures Reth's prestate matches
|
||||
/// Geth's captured snapshot.
|
||||
// <https://github.com/paradigmxyz/reth/issues/19703>
|
||||
#[tokio::test]
|
||||
async fn debug_trace_call_matches_geth_prestate_snapshot() -> Result<()> {
|
||||
reth_tracing::init_test_tracing();
|
||||
|
||||
let mut genesis: Genesis = MAINNET.genesis().clone();
|
||||
genesis.coinbase = address!("0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5");
|
||||
|
||||
let exec = TaskManager::current();
|
||||
let exec = exec.executor();
|
||||
|
||||
let expected_frame = expected_snapshot_frame()?;
|
||||
let prestate_mode = match &expected_frame {
|
||||
PreStateFrame::Default(mode) => mode.clone(),
|
||||
_ => return Err(eyre!("snapshot must contain default prestate frame")),
|
||||
};
|
||||
|
||||
genesis.alloc.extend(
|
||||
prestate_mode
|
||||
.0
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(addr, state)| (addr, account_state_to_genesis(state))),
|
||||
);
|
||||
|
||||
let chain_spec = Arc::new(
|
||||
ChainSpecBuilder::default()
|
||||
.chain(MAINNET.chain)
|
||||
.genesis(genesis)
|
||||
.cancun_activated()
|
||||
.prague_activated()
|
||||
.build(),
|
||||
);
|
||||
|
||||
let node_config = NodeConfig::test().with_chain(chain_spec).with_rpc(
|
||||
RpcServerArgs::default()
|
||||
.with_unused_ports()
|
||||
.with_http()
|
||||
.with_http_api(RpcModuleSelection::all_modules().into()),
|
||||
);
|
||||
|
||||
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
|
||||
.testing_node(exec)
|
||||
.node(EthereumNode::default())
|
||||
.launch()
|
||||
.await?;
|
||||
|
||||
let provider = node.rpc_server_handle().eth_http_provider().unwrap();
|
||||
|
||||
// <https://etherscan.io/tx/0x391f4b6a382d3bcc3120adc2ea8c62003e604e487d97281129156fd284a1a89d>
|
||||
let tx = r#"{
|
||||
"type": "0x2",
|
||||
"chainId": "0x1",
|
||||
"nonce": "0x39af8",
|
||||
"gas": "0x249f0",
|
||||
"maxFeePerGas": "0xc6432e2d7",
|
||||
"maxPriorityFeePerGas": "0x68889c2b",
|
||||
"to": "0xc77ad0a71008d7094a62cfbd250a2eb2afdf2776",
|
||||
"value": "0x0",
|
||||
"accessList": [],
|
||||
"input": "0xf3fef3a3000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec700000000000000000000000000000000000000000000000000000000000f6b64",
|
||||
"r": "0x40ab901a8262d5e6fe9b6513996cd5df412526580cab7410c13acc9dd9f6ec93",
|
||||
"s": "0x6b76354c8cb1c1d6dbebfd555be9053170f02a648c4b36740e3fd7c6e9499572",
|
||||
"yParity": "0x1",
|
||||
"v": "0x1",
|
||||
"hash": "0x391f4b6a382d3bcc3120adc2ea8c62003e604e487d97281129156fd284a1a89d",
|
||||
"blockHash": "0xf9b77bcf8c69544304dff34129f3bdc71f00fdf766c1522ed6ac1382726ead82",
|
||||
"blockNumber": "0x1294fd2",
|
||||
"transactionIndex": "0x3a",
|
||||
"from": "0xa7fb5ca286fc3fd67525629048a4de3ba24cba2e",
|
||||
"gasPrice": "0x7c5bcc0e0"
|
||||
}"#;
|
||||
let tx = serde_json::from_str::<Transaction>(tx).unwrap();
|
||||
let request = TransactionRequest::from_recovered_transaction(tx.into_recovered());
|
||||
|
||||
let trace: PreStateFrame = provider
|
||||
.debug_trace_call_prestate(
|
||||
request,
|
||||
BlockId::latest(),
|
||||
GethDebugTracingOptions::prestate_tracer(PreStateConfig::default()).into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
similar_asserts::assert_eq!(trace, expected_frame);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn expected_snapshot_frame() -> Result<PreStateFrame> {
|
||||
#[derive(Deserialize)]
|
||||
struct Snapshot {
|
||||
result: serde_json::Value,
|
||||
}
|
||||
|
||||
let snapshot: Snapshot = serde_json::from_str(PRESTATE_SNAPSHOT)?;
|
||||
Ok(serde_json::from_value(snapshot.result)?)
|
||||
}
|
||||
|
||||
fn account_state_to_genesis(value: AccountState) -> GenesisAccount {
|
||||
let balance = value.balance.unwrap_or_default();
|
||||
let code = value.code.filter(|code| !code.is_empty());
|
||||
let storage = (!value.storage.is_empty()).then_some(value.storage);
|
||||
|
||||
GenesisAccount::default()
|
||||
.with_balance(balance)
|
||||
.with_nonce(value.nonce)
|
||||
.with_code(code)
|
||||
.with_storage(storage)
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::utils::eth_payload_attributes;
|
||||
use alloy_eips::{eip2718::Encodable2718, eip7910::EthConfig};
|
||||
use alloy_genesis::Genesis;
|
||||
use alloy_primitives::{Address, B256, U256};
|
||||
use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx};
|
||||
use alloy_rpc_types_beacon::relay::{
|
||||
@@ -11,8 +12,16 @@ use alloy_rpc_types_eth::TransactionRequest;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use reth_chainspec::{ChainSpecBuilder, EthChainSpec, MAINNET};
|
||||
use reth_e2e_test_utils::setup_engine;
|
||||
use reth_network::types::NatResolver;
|
||||
use reth_node_builder::{NodeBuilder, NodeHandle};
|
||||
use reth_node_core::{
|
||||
args::{NetworkArgs, RpcServerArgs},
|
||||
node_config::NodeConfig,
|
||||
};
|
||||
use reth_node_ethereum::EthereumNode;
|
||||
use reth_payload_primitives::BuiltPayload;
|
||||
use reth_rpc_api::servers::AdminApiServer;
|
||||
use reth_tasks::TaskManager;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
@@ -329,3 +338,41 @@ async fn test_eth_config() -> eyre::Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// <https://github.com/paradigmxyz/reth/issues/19765>
|
||||
#[tokio::test]
|
||||
async fn test_admin_external_ip() -> eyre::Result<()> {
|
||||
reth_tracing::init_test_tracing();
|
||||
|
||||
let exec = TaskManager::current();
|
||||
let exec = exec.executor();
|
||||
|
||||
// Chain spec with test allocs
|
||||
let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap();
|
||||
let chain_spec =
|
||||
Arc::new(ChainSpecBuilder::default().chain(MAINNET.chain).genesis(genesis).build());
|
||||
|
||||
let external_ip = "10.64.128.71".parse().unwrap();
|
||||
// Node setup
|
||||
let node_config = NodeConfig::test()
|
||||
.with_chain(chain_spec)
|
||||
.with_network(
|
||||
NetworkArgs::default().with_nat_resolver(NatResolver::ExternalIp(external_ip)),
|
||||
)
|
||||
.with_unused_ports()
|
||||
.with_rpc(RpcServerArgs::default().with_unused_ports().with_http());
|
||||
|
||||
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
|
||||
.testing_node(exec)
|
||||
.node(EthereumNode::default())
|
||||
.launch()
|
||||
.await?;
|
||||
|
||||
let api = node.add_ons_handle.admin_api();
|
||||
|
||||
let info = api.node_info().await.unwrap();
|
||||
|
||||
assert_eq!(info.ip, external_ip);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use alloy_consensus::{
|
||||
TxLegacy, TxType, Typed2718,
|
||||
};
|
||||
use alloy_eips::{
|
||||
eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718},
|
||||
eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718},
|
||||
eip2930::AccessList,
|
||||
eip7702::SignedAuthorization,
|
||||
};
|
||||
@@ -664,6 +664,12 @@ impl TxHashRef for TransactionSigned {
|
||||
}
|
||||
}
|
||||
|
||||
impl IsTyped2718 for TransactionSigned {
|
||||
fn is_type(type_id: u8) -> bool {
|
||||
<alloy_consensus::TxEnvelope as IsTyped2718>::is_type(type_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl SignedTransaction for TransactionSigned {}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -14,3 +14,6 @@ workspace = true
|
||||
[dependencies]
|
||||
# ethereum
|
||||
alloy-primitives.workspace = true
|
||||
|
||||
# networking
|
||||
ipnet.workspace = true
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
type PeerId = alloy_primitives::B512;
|
||||
|
||||
use std::{collections::HashMap, net::IpAddr, time::Instant};
|
||||
use std::{collections::HashMap, net::IpAddr, str::FromStr, time::Instant};
|
||||
|
||||
/// Determines whether or not the IP is globally routable.
|
||||
/// Should be replaced with [`IpAddr::is_global`](std::net::IpAddr::is_global) once it is stable.
|
||||
@@ -215,3 +215,161 @@ mod tests {
|
||||
assert!(!banlist.is_banned_ip(&ip));
|
||||
}
|
||||
}
|
||||
|
||||
/// IP filter for restricting network communication to specific IP ranges using CIDR notation.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct IpFilter {
|
||||
/// List of allowed IP networks in CIDR notation.
|
||||
/// If empty, all IPs are allowed.
|
||||
allowed_networks: Vec<ipnet::IpNet>,
|
||||
}
|
||||
|
||||
impl IpFilter {
|
||||
/// Creates a new IP filter with the given CIDR networks.
|
||||
///
|
||||
/// If the list is empty, all IPs will be allowed.
|
||||
pub const fn new(allowed_networks: Vec<ipnet::IpNet>) -> Self {
|
||||
Self { allowed_networks }
|
||||
}
|
||||
|
||||
/// Creates an IP filter from a comma-separated list of CIDR networks.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if any of the CIDR strings cannot be parsed.
|
||||
pub fn from_cidr_string(cidrs: &str) -> Result<Self, ipnet::AddrParseError> {
|
||||
if cidrs.is_empty() {
|
||||
return Ok(Self::allow_all())
|
||||
}
|
||||
|
||||
let networks = cidrs
|
||||
.split(',')
|
||||
.map(|s| s.trim())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(ipnet::IpNet::from_str)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(Self::new(networks))
|
||||
}
|
||||
|
||||
/// Creates a filter that allows all IPs.
|
||||
pub const fn allow_all() -> Self {
|
||||
Self { allowed_networks: Vec::new() }
|
||||
}
|
||||
|
||||
/// Checks if the given IP address is allowed by this filter.
|
||||
///
|
||||
/// Returns `true` if the filter is empty (allows all) or if the IP is within
|
||||
/// any of the allowed networks.
|
||||
pub fn is_allowed(&self, ip: &IpAddr) -> bool {
|
||||
// If no restrictions are set, allow all IPs
|
||||
if self.allowed_networks.is_empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if the IP is within any of the allowed networks
|
||||
self.allowed_networks.iter().any(|net| net.contains(ip))
|
||||
}
|
||||
|
||||
/// Returns `true` if this filter has restrictions (i.e., not allowing all IPs).
|
||||
pub const fn has_restrictions(&self) -> bool {
|
||||
!self.allowed_networks.is_empty()
|
||||
}
|
||||
|
||||
/// Returns the list of allowed networks.
|
||||
pub fn allowed_networks(&self) -> &[ipnet::IpNet] {
|
||||
&self.allowed_networks
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for IpFilter {
|
||||
fn default() -> Self {
|
||||
Self::allow_all()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod ip_filter_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_allow_all_filter() {
|
||||
let filter = IpFilter::allow_all();
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1])));
|
||||
assert!(filter.is_allowed(&IpAddr::from([10, 0, 0, 1])));
|
||||
assert!(filter.is_allowed(&IpAddr::from([8, 8, 8, 8])));
|
||||
assert!(!filter.has_restrictions());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_network_filter() {
|
||||
let filter = IpFilter::from_cidr_string("192.168.0.0/16").unwrap();
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1])));
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 255, 255])));
|
||||
assert!(!filter.is_allowed(&IpAddr::from([192, 169, 1, 1])));
|
||||
assert!(!filter.is_allowed(&IpAddr::from([10, 0, 0, 1])));
|
||||
assert!(filter.has_restrictions());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_networks_filter() {
|
||||
let filter = IpFilter::from_cidr_string("192.168.0.0/16,10.0.0.0/8").unwrap();
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1])));
|
||||
assert!(filter.is_allowed(&IpAddr::from([10, 5, 10, 20])));
|
||||
assert!(filter.is_allowed(&IpAddr::from([10, 255, 255, 255])));
|
||||
assert!(!filter.is_allowed(&IpAddr::from([172, 16, 0, 1])));
|
||||
assert!(!filter.is_allowed(&IpAddr::from([8, 8, 8, 8])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ipv6_filter() {
|
||||
let filter = IpFilter::from_cidr_string("2001:db8::/32").unwrap();
|
||||
let ipv6_in_range: IpAddr = "2001:db8::1".parse().unwrap();
|
||||
let ipv6_out_range: IpAddr = "2001:db9::1".parse().unwrap();
|
||||
|
||||
assert!(filter.is_allowed(&ipv6_in_range));
|
||||
assert!(!filter.is_allowed(&ipv6_out_range));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_ipv4_ipv6_filter() {
|
||||
let filter = IpFilter::from_cidr_string("192.168.0.0/16,2001:db8::/32").unwrap();
|
||||
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1])));
|
||||
let ipv6_in_range: IpAddr = "2001:db8::1".parse().unwrap();
|
||||
assert!(filter.is_allowed(&ipv6_in_range));
|
||||
|
||||
assert!(!filter.is_allowed(&IpAddr::from([10, 0, 0, 1])));
|
||||
let ipv6_out_range: IpAddr = "2001:db9::1".parse().unwrap();
|
||||
assert!(!filter.is_allowed(&ipv6_out_range));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_string() {
|
||||
let filter = IpFilter::from_cidr_string("").unwrap();
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1])));
|
||||
assert!(!filter.has_restrictions());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_cidr() {
|
||||
assert!(IpFilter::from_cidr_string("invalid").is_err());
|
||||
assert!(IpFilter::from_cidr_string("192.168.0.0/33").is_err());
|
||||
assert!(IpFilter::from_cidr_string("192.168.0.0,10.0.0.0").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_whitespace_handling() {
|
||||
let filter = IpFilter::from_cidr_string(" 192.168.0.0/16 , 10.0.0.0/8 ").unwrap();
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1])));
|
||||
assert!(filter.is_allowed(&IpAddr::from([10, 0, 0, 1])));
|
||||
assert!(!filter.is_allowed(&IpAddr::from([172, 16, 0, 1])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_ip_as_cidr() {
|
||||
let filter = IpFilter::from_cidr_string("192.168.1.100/32").unwrap();
|
||||
assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 100])));
|
||||
assert!(!filter.is_allowed(&IpAddr::from([192, 168, 1, 101])));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ impl Discv4Config {
|
||||
pub fn resolve_external_ip_interval(&self) -> Option<ResolveNatInterval> {
|
||||
let resolver = self.external_ip_resolver?;
|
||||
let interval = self.resolve_external_ip_interval?;
|
||||
Some(ResolveNatInterval::interval(resolver, interval))
|
||||
Some(ResolveNatInterval::interval_at(resolver, tokio::time::Instant::now(), interval))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,4 +336,25 @@ mod tests {
|
||||
.enable_lookup(true)
|
||||
.build();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resolve_external_ip_interval_uses_interval_at() {
|
||||
use reth_net_nat::NatResolver;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1));
|
||||
|
||||
// Create a config with external IP resolver
|
||||
let mut builder = Discv4Config::builder();
|
||||
builder.external_ip_resolver(Some(NatResolver::ExternalIp(ip_addr)));
|
||||
builder.resolve_external_ip_interval(Some(Duration::from_secs(60 * 5)));
|
||||
let config = builder.build();
|
||||
|
||||
// Get the ResolveNatInterval
|
||||
let mut interval = config.resolve_external_ip_interval().expect("should have interval");
|
||||
|
||||
// Test that first tick returns immediately (interval_at behavior)
|
||||
let ip = interval.tick().await;
|
||||
assert_eq!(ip, Some(ip_addr));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,6 +75,11 @@ pub struct Discv5 {
|
||||
discovered_peer_filter: MustNotIncludeKeys,
|
||||
/// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers.
|
||||
metrics: Discv5Metrics,
|
||||
/// Returns the _local_ [`NodeRecord`] this service was started with.
|
||||
// Note: we must track this separately because the `discv5::Discv5` does not necessarily
|
||||
// provide this via it's [`local_enr`](discv5::Discv5::local_ner()) This is intended for
|
||||
// obtaining the port this service was launched at
|
||||
local_node_record: NodeRecord,
|
||||
}
|
||||
|
||||
impl Discv5 {
|
||||
@@ -155,22 +160,29 @@ impl Discv5 {
|
||||
enr.try_into().ok()
|
||||
}
|
||||
|
||||
/// Returns the local [`Enr`] of the service.
|
||||
pub fn local_enr(&self) -> Enr<discv5::enr::CombinedKey> {
|
||||
self.discv5.local_enr()
|
||||
}
|
||||
|
||||
/// The port the discv5 service is listening on.
|
||||
pub const fn local_port(&self) -> u16 {
|
||||
self.local_node_record.udp_port
|
||||
}
|
||||
|
||||
/// Spawns [`discv5::Discv5`]. Returns [`discv5::Discv5`] handle in reth compatible wrapper type
|
||||
/// [`Discv5`], a receiver of [`discv5::Event`]s from the underlying node, and the local
|
||||
/// [`Enr`](discv5::Enr) converted into the reth compatible [`NodeRecord`] type.
|
||||
pub async fn start(
|
||||
sk: &SecretKey,
|
||||
discv5_config: Config,
|
||||
) -> Result<(Self, mpsc::Receiver<discv5::Event>, NodeRecord), Error> {
|
||||
) -> Result<(Self, mpsc::Receiver<discv5::Event>), Error> {
|
||||
//
|
||||
// 1. make local enr from listen config
|
||||
//
|
||||
let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config);
|
||||
let (enr, local_node_record, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config);
|
||||
|
||||
trace!(target: "net::discv5",
|
||||
?enr,
|
||||
"local ENR"
|
||||
);
|
||||
trace!(target: "net::discv5", ?enr, "local ENR");
|
||||
|
||||
//
|
||||
// 2. start discv5
|
||||
@@ -217,9 +229,15 @@ impl Discv5 {
|
||||
);
|
||||
|
||||
Ok((
|
||||
Self { discv5, rlpx_ip_mode, fork_key, discovered_peer_filter, metrics },
|
||||
Self {
|
||||
discv5,
|
||||
rlpx_ip_mode,
|
||||
fork_key,
|
||||
discovered_peer_filter,
|
||||
metrics,
|
||||
local_node_record,
|
||||
},
|
||||
discv5_updates,
|
||||
bc_enr,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -699,12 +717,14 @@ mod test {
|
||||
fork_key: None,
|
||||
discovered_peer_filter: MustNotIncludeKeys::default(),
|
||||
metrics: Discv5Metrics::default(),
|
||||
local_node_record: NodeRecord::new(
|
||||
(Ipv4Addr::LOCALHOST, 30303).into(),
|
||||
PeerId::random(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn start_discovery_node(
|
||||
udp_port_discv5: u16,
|
||||
) -> (Discv5, mpsc::Receiver<discv5::Event>, NodeRecord) {
|
||||
async fn start_discovery_node(udp_port_discv5: u16) -> (Discv5, mpsc::Receiver<discv5::Event>) {
|
||||
let secret_key = SecretKey::new(&mut thread_rng());
|
||||
|
||||
let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap();
|
||||
@@ -725,11 +745,11 @@ mod test {
|
||||
// rig test
|
||||
|
||||
// rig node_1
|
||||
let (node_1, mut stream_1, _) = start_discovery_node(30344).await;
|
||||
let (node_1, mut stream_1) = start_discovery_node(30344).await;
|
||||
let node_1_enr = node_1.with_discv5(|discv5| discv5.local_enr());
|
||||
|
||||
// rig node_2
|
||||
let (node_2, mut stream_2, _) = start_discovery_node(30355).await;
|
||||
let (node_2, mut stream_2) = start_discovery_node(30355).await;
|
||||
let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr());
|
||||
|
||||
trace!(target: "net::discv5::test",
|
||||
|
||||
@@ -583,8 +583,14 @@ mod tests {
|
||||
// await recheck timeout
|
||||
tokio::time::sleep(config.recheck_interval).await;
|
||||
|
||||
let mut new_root = root.clone();
|
||||
new_root.sequence_number = new_root.sequence_number.saturating_add(1);
|
||||
new_root.enr_root = "NEW_ENR_ROOT".to_string();
|
||||
new_root.sign(&secret_key).unwrap();
|
||||
resolver.insert(link.domain.clone(), new_root.to_string());
|
||||
|
||||
let enr = Enr::empty(&secret_key).unwrap();
|
||||
resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64());
|
||||
resolver.insert(format!("{}.{}", new_root.enr_root.clone(), link.domain), enr.to_base64());
|
||||
|
||||
let event = poll_fn(|cx| service.poll(cx)).await;
|
||||
|
||||
|
||||
@@ -102,29 +102,30 @@ impl<K: EnrKeyUnambiguous> SyncTree<K> {
|
||||
|
||||
/// Updates the root and returns what changed
|
||||
pub(crate) fn update_root(&mut self, root: TreeRootEntry) {
|
||||
let enr = root.enr_root == self.root.enr_root;
|
||||
let link = root.link_root == self.root.link_root;
|
||||
let enr_unchanged = root.enr_root == self.root.enr_root;
|
||||
let link_unchanged = root.link_root == self.root.link_root;
|
||||
|
||||
self.root = root;
|
||||
self.root_updated = Instant::now();
|
||||
|
||||
let state = match (enr, link) {
|
||||
(true, true) => {
|
||||
self.unresolved_nodes.clear();
|
||||
self.unresolved_links.clear();
|
||||
SyncState::Pending
|
||||
}
|
||||
(true, _) => {
|
||||
let state = match (enr_unchanged, link_unchanged) {
|
||||
// both unchanged — no resync needed
|
||||
(true, true) => return,
|
||||
// only ENR changed
|
||||
(false, true) => {
|
||||
self.unresolved_nodes.clear();
|
||||
SyncState::Enr
|
||||
}
|
||||
(_, true) => {
|
||||
// only LINK changed
|
||||
(true, false) => {
|
||||
self.unresolved_links.clear();
|
||||
SyncState::Link
|
||||
}
|
||||
_ => {
|
||||
// unchanged
|
||||
return
|
||||
// both changed
|
||||
(false, false) => {
|
||||
self.unresolved_nodes.clear();
|
||||
self.unresolved_links.clear();
|
||||
SyncState::Pending
|
||||
}
|
||||
};
|
||||
self.sync_state = state;
|
||||
@@ -132,6 +133,7 @@ impl<K: EnrKeyUnambiguous> SyncTree<K> {
|
||||
}
|
||||
|
||||
/// The action to perform by the service
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum SyncAction {
|
||||
UpdateRoot,
|
||||
Enr(String),
|
||||
@@ -160,3 +162,97 @@ impl ResolveKind {
|
||||
matches!(self, Self::Link)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use enr::EnrKey;
|
||||
use secp256k1::rand::thread_rng;
|
||||
|
||||
fn base_root() -> TreeRootEntry {
|
||||
// taken from existing tests to ensure valid formatting
|
||||
let s = "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE";
|
||||
s.parse::<TreeRootEntry>().unwrap()
|
||||
}
|
||||
|
||||
fn make_tree() -> SyncTree {
|
||||
let secret_key = SecretKey::new(&mut thread_rng());
|
||||
let link =
|
||||
LinkEntry { domain: "nodes.example.org".to_string(), pubkey: secret_key.public() };
|
||||
SyncTree::new(base_root(), link)
|
||||
}
|
||||
|
||||
fn advance_to_active(tree: &mut SyncTree) {
|
||||
// Move Pending -> (emit Link) -> Enr, then Enr -> (emit Enr) -> Active
|
||||
let now = Instant::now();
|
||||
let timeout = Duration::from_secs(60 * 60 * 24);
|
||||
let _ = tree.poll(now, timeout);
|
||||
let _ = tree.poll(now, timeout);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_root_unchanged_no_action_from_active() {
|
||||
let mut tree = make_tree();
|
||||
let now = Instant::now();
|
||||
let timeout = Duration::from_secs(60 * 60 * 24);
|
||||
advance_to_active(&mut tree);
|
||||
|
||||
// same root -> no resync
|
||||
let same = base_root();
|
||||
tree.update_root(same);
|
||||
assert!(tree.poll(now, timeout).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_root_only_enr_changed_triggers_enr() {
|
||||
let mut tree = make_tree();
|
||||
advance_to_active(&mut tree);
|
||||
let mut new_root = base_root();
|
||||
new_root.enr_root = "NEW_ENR_ROOT".to_string();
|
||||
let now = Instant::now();
|
||||
let timeout = Duration::from_secs(60 * 60 * 24);
|
||||
|
||||
tree.update_root(new_root.clone());
|
||||
match tree.poll(now, timeout) {
|
||||
Some(SyncAction::Enr(hash)) => assert_eq!(hash, new_root.enr_root),
|
||||
other => panic!("expected Enr action, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_root_only_link_changed_triggers_link() {
|
||||
let mut tree = make_tree();
|
||||
advance_to_active(&mut tree);
|
||||
let mut new_root = base_root();
|
||||
new_root.link_root = "NEW_LINK_ROOT".to_string();
|
||||
let now = Instant::now();
|
||||
let timeout = Duration::from_secs(60 * 60 * 24);
|
||||
|
||||
tree.update_root(new_root.clone());
|
||||
match tree.poll(now, timeout) {
|
||||
Some(SyncAction::Link(hash)) => assert_eq!(hash, new_root.link_root),
|
||||
other => panic!("expected Link action, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_root_both_changed_triggers_link_then_enr() {
|
||||
let mut tree = make_tree();
|
||||
advance_to_active(&mut tree);
|
||||
let mut new_root = base_root();
|
||||
new_root.enr_root = "NEW_ENR_ROOT".to_string();
|
||||
new_root.link_root = "NEW_LINK_ROOT".to_string();
|
||||
let now = Instant::now();
|
||||
let timeout = Duration::from_secs(60 * 60 * 24);
|
||||
|
||||
tree.update_root(new_root.clone());
|
||||
match tree.poll(now, timeout) {
|
||||
Some(SyncAction::Link(hash)) => assert_eq!(hash, new_root.link_root),
|
||||
other => panic!("expected first Link action, got {:?}", other),
|
||||
}
|
||||
match tree.poll(now, timeout) {
|
||||
Some(SyncAction::Enr(hash)) => assert_eq!(hash, new_root.enr_root),
|
||||
other => panic!("expected second Enr action, got {:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,6 +158,15 @@ pub struct Capabilities {
|
||||
}
|
||||
|
||||
impl Capabilities {
|
||||
/// Create a new instance from the given vec.
|
||||
pub fn new(value: Vec<Capability>) -> Self {
|
||||
Self {
|
||||
eth_66: value.iter().any(Capability::is_eth_v66),
|
||||
eth_67: value.iter().any(Capability::is_eth_v67),
|
||||
eth_68: value.iter().any(Capability::is_eth_v68),
|
||||
inner: value,
|
||||
}
|
||||
}
|
||||
/// Returns all capabilities.
|
||||
#[inline]
|
||||
pub fn capabilities(&self) -> &[Capability] {
|
||||
@@ -197,12 +206,7 @@ impl Capabilities {
|
||||
|
||||
impl From<Vec<Capability>> for Capabilities {
|
||||
fn from(value: Vec<Capability>) -> Self {
|
||||
Self {
|
||||
eth_66: value.iter().any(Capability::is_eth_v66),
|
||||
eth_67: value.iter().any(Capability::is_eth_v67),
|
||||
eth_68: value.iter().any(Capability::is_eth_v68),
|
||||
inner: value,
|
||||
}
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use reth_net_banlist::BanList;
|
||||
use reth_net_banlist::{BanList, IpFilter};
|
||||
use reth_network_peers::{NodeRecord, TrustedPeer};
|
||||
use tracing::info;
|
||||
|
||||
@@ -166,6 +166,12 @@ pub struct PeersConfig {
|
||||
/// This acts as an IP based rate limit.
|
||||
#[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))]
|
||||
pub incoming_ip_throttle_duration: Duration,
|
||||
/// IP address filter for restricting network connections to specific IP ranges.
|
||||
///
|
||||
/// Similar to geth's --netrestrict flag. If configured, only connections to/from
|
||||
/// IPs within the specified CIDR ranges will be allowed.
|
||||
#[cfg_attr(feature = "serde", serde(skip))]
|
||||
pub ip_filter: IpFilter,
|
||||
}
|
||||
|
||||
impl Default for PeersConfig {
|
||||
@@ -184,6 +190,7 @@ impl Default for PeersConfig {
|
||||
basic_nodes: Default::default(),
|
||||
max_backoff_count: 5,
|
||||
incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION,
|
||||
ip_filter: IpFilter::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -301,6 +308,12 @@ impl PeersConfig {
|
||||
Ok(self.with_basic_nodes(nodes))
|
||||
}
|
||||
|
||||
/// Configure the IP filter for restricting network connections to specific IP ranges.
|
||||
pub fn with_ip_filter(mut self, ip_filter: IpFilter) -> Self {
|
||||
self.ip_filter = ip_filter;
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns settings for testing
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
pub fn test() -> Self {
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
//! Builder support for configuring the entire setup.
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
use crate::{
|
||||
eth_requests::EthRequestHandler,
|
||||
transactions::{
|
||||
@@ -77,15 +75,7 @@ impl<Tx, Eth, N: NetworkPrimitives> NetworkBuilder<Tx, Eth, N> {
|
||||
self,
|
||||
pool: Pool,
|
||||
transactions_manager_config: TransactionsManagerConfig,
|
||||
) -> NetworkBuilder<
|
||||
TransactionsManager<
|
||||
Pool,
|
||||
N,
|
||||
NetworkPolicies<TransactionPropagationKind, StrictEthAnnouncementFilter>,
|
||||
>,
|
||||
Eth,
|
||||
N,
|
||||
> {
|
||||
) -> NetworkBuilder<TransactionsManager<Pool, N>, Eth, N> {
|
||||
self.transactions_with_policy(
|
||||
pool,
|
||||
transactions_manager_config,
|
||||
@@ -94,19 +84,12 @@ impl<Tx, Eth, N: NetworkPrimitives> NetworkBuilder<Tx, Eth, N> {
|
||||
}
|
||||
|
||||
/// Creates a new [`TransactionsManager`] and wires it to the network.
|
||||
pub fn transactions_with_policy<
|
||||
Pool: TransactionPool,
|
||||
P: TransactionPropagationPolicy + Debug,
|
||||
>(
|
||||
pub fn transactions_with_policy<Pool: TransactionPool>(
|
||||
self,
|
||||
pool: Pool,
|
||||
transactions_manager_config: TransactionsManagerConfig,
|
||||
propagation_policy: P,
|
||||
) -> NetworkBuilder<
|
||||
TransactionsManager<Pool, N, NetworkPolicies<P, StrictEthAnnouncementFilter>>,
|
||||
Eth,
|
||||
N,
|
||||
> {
|
||||
propagation_policy: impl TransactionPropagationPolicy<N>,
|
||||
) -> NetworkBuilder<TransactionsManager<Pool, N>, Eth, N> {
|
||||
let Self { mut network, request_handler, .. } = self;
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
network.set_transactions(tx);
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::{
|
||||
transactions::TransactionsManagerConfig,
|
||||
NetworkHandle, NetworkManager,
|
||||
};
|
||||
use alloy_primitives::B256;
|
||||
use alloy_eips::BlockNumHash;
|
||||
use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks};
|
||||
use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS};
|
||||
use reth_discv5::NetworkStackId;
|
||||
@@ -94,9 +94,9 @@ pub struct NetworkConfig<C, N: NetworkPrimitives = EthNetworkPrimitives> {
|
||||
/// This can be overridden to support custom handshake logic via the
|
||||
/// [`NetworkConfigBuilder`].
|
||||
pub handshake: Arc<dyn EthRlpxHandshake>,
|
||||
/// List of block hashes to check for required blocks.
|
||||
/// List of block number-hash pairs to check for required blocks.
|
||||
/// If non-empty, peers that don't have these blocks will be filtered out.
|
||||
pub required_block_hashes: Vec<B256>,
|
||||
pub required_block_hashes: Vec<BlockNumHash>,
|
||||
}
|
||||
|
||||
// === impl NetworkConfig ===
|
||||
@@ -225,7 +225,7 @@ pub struct NetworkConfigBuilder<N: NetworkPrimitives = EthNetworkPrimitives> {
|
||||
/// <https://github.com/ethereum/devp2p/blob/master/rlpx.md#initial-handshake>.
|
||||
handshake: Arc<dyn EthRlpxHandshake>,
|
||||
/// List of block hashes to check for required blocks.
|
||||
required_block_hashes: Vec<B256>,
|
||||
required_block_hashes: Vec<BlockNumHash>,
|
||||
/// Optional network id
|
||||
network_id: Option<u64>,
|
||||
}
|
||||
@@ -555,7 +555,7 @@ impl<N: NetworkPrimitives> NetworkConfigBuilder<N> {
|
||||
}
|
||||
|
||||
/// Sets the required block hashes for peer filtering.
|
||||
pub fn required_block_hashes(mut self, hashes: Vec<B256>) -> Self {
|
||||
pub fn required_block_hashes(mut self, hashes: Vec<BlockNumHash>) -> Self {
|
||||
self.required_block_hashes = hashes;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ use std::{
|
||||
};
|
||||
use tokio::{sync::mpsc, task::JoinHandle};
|
||||
use tokio_stream::{wrappers::ReceiverStream, Stream};
|
||||
use tracing::trace;
|
||||
use tracing::{debug, trace};
|
||||
|
||||
/// Default max capacity for cache of discovered peers.
|
||||
///
|
||||
@@ -95,12 +95,15 @@ impl Discovery {
|
||||
// spawn the service
|
||||
let discv4_service = discv4_service.spawn();
|
||||
|
||||
debug!(target:"net", ?discovery_v4_addr, "started discovery v4");
|
||||
|
||||
Ok((Some(discv4), Some(discv4_updates), Some(discv4_service)))
|
||||
};
|
||||
|
||||
let discv5_future = async {
|
||||
let Some(config) = discv5_config else { return Ok::<_, NetworkError>((None, None)) };
|
||||
let (discv5, discv5_updates, _local_enr_discv5) = Discv5::start(&sk, config).await?;
|
||||
let (discv5, discv5_updates) = Discv5::start(&sk, config).await?;
|
||||
debug!(target:"net", discovery_v5_enr=? discv5.local_enr(), "started discovery v5");
|
||||
Ok((Some(discv5), Some(discv5_updates.into())))
|
||||
};
|
||||
|
||||
|
||||
@@ -113,7 +113,22 @@ impl SessionError for EthStreamError {
|
||||
P2PHandshakeError::HelloNotInHandshake |
|
||||
P2PHandshakeError::NonHelloMessageInHandshake,
|
||||
)) => true,
|
||||
Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse),
|
||||
Self::EthHandshakeError(err) => {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match err {
|
||||
EthHandshakeError::NoResponse => {
|
||||
// this happens when the conn simply stalled
|
||||
false
|
||||
}
|
||||
EthHandshakeError::InvalidFork(_) => {
|
||||
// this can occur when the remote or our node is running an outdated client,
|
||||
// we shouldn't treat this as fatal, because the node can come back online
|
||||
// with an updated version any time
|
||||
false
|
||||
}
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@@ -144,7 +159,22 @@ impl SessionError for EthStreamError {
|
||||
P2PStreamError::MismatchedProtocolVersion { .. }
|
||||
)
|
||||
}
|
||||
Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse),
|
||||
Self::EthHandshakeError(err) => {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match err {
|
||||
EthHandshakeError::NoResponse => {
|
||||
// this happens when the conn simply stalled
|
||||
false
|
||||
}
|
||||
EthHandshakeError::InvalidFork(_) => {
|
||||
// this can occur when the remote or our node is running an outdated client,
|
||||
// we shouldn't treat this as fatal, because the node can come back online
|
||||
// with an updated version any time
|
||||
false
|
||||
}
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@@ -196,6 +226,11 @@ impl SessionError for EthStreamError {
|
||||
P2PStreamError::PingerError(_) |
|
||||
P2PStreamError::Snap(_),
|
||||
) => Some(BackoffKind::Medium),
|
||||
Self::EthHandshakeError(EthHandshakeError::InvalidFork(_)) => {
|
||||
// the remote can come back online after updating client version, so we can back off
|
||||
// for a bit
|
||||
Some(BackoffKind::Medium)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,8 +139,9 @@ impl<N: NetworkPrimitives> StateFetcher<N> {
|
||||
|
||||
/// Returns the _next_ idle peer that's ready to accept a request,
|
||||
/// prioritizing those with the lowest timeout/latency and those that recently responded with
|
||||
/// adequate data.
|
||||
fn next_best_peer(&self) -> Option<PeerId> {
|
||||
/// adequate data. Additionally, if full blocks are required this prioritizes peers that have
|
||||
/// full history available
|
||||
fn next_best_peer(&self, requirement: BestPeerRequirements) -> Option<PeerId> {
|
||||
let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle());
|
||||
|
||||
let mut best_peer = idle.next()?;
|
||||
@@ -152,7 +153,13 @@ impl<N: NetworkPrimitives> StateFetcher<N> {
|
||||
continue
|
||||
}
|
||||
|
||||
// replace best peer if this peer has better rtt
|
||||
// replace best peer if this peer meets the requirements better
|
||||
if maybe_better.1.is_better(best_peer.1, &requirement) {
|
||||
best_peer = maybe_better;
|
||||
continue
|
||||
}
|
||||
|
||||
// replace best peer if this peer has better rtt and both have same range quality
|
||||
if maybe_better.1.timeout() < best_peer.1.timeout() &&
|
||||
!maybe_better.1.last_response_likely_bad
|
||||
{
|
||||
@@ -170,9 +177,13 @@ impl<N: NetworkPrimitives> StateFetcher<N> {
|
||||
return PollAction::NoRequests
|
||||
}
|
||||
|
||||
let Some(peer_id) = self.next_best_peer() else { return PollAction::NoPeersAvailable };
|
||||
|
||||
let request = self.queued_requests.pop_front().expect("not empty");
|
||||
let Some(peer_id) = self.next_best_peer(request.best_peer_requirements()) else {
|
||||
// need to put back the the request
|
||||
self.queued_requests.push_front(request);
|
||||
return PollAction::NoPeersAvailable
|
||||
};
|
||||
|
||||
let request = self.prepare_block_request(peer_id, request);
|
||||
|
||||
PollAction::Ready(FetchAction::BlockRequest { peer_id, request })
|
||||
@@ -358,7 +369,6 @@ struct Peer {
|
||||
/// lowest timeout.
|
||||
last_response_likely_bad: bool,
|
||||
/// Tracks the range info for the peer.
|
||||
#[allow(dead_code)]
|
||||
range_info: Option<BlockRangeInfo>,
|
||||
}
|
||||
|
||||
@@ -366,6 +376,74 @@ impl Peer {
|
||||
fn timeout(&self) -> u64 {
|
||||
self.timeout.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns the earliest block number available from the peer.
|
||||
fn earliest(&self) -> u64 {
|
||||
self.range_info.as_ref().map_or(0, |info| info.earliest())
|
||||
}
|
||||
|
||||
/// Returns true if the peer has the full history available.
|
||||
fn has_full_history(&self) -> bool {
|
||||
self.earliest() == 0
|
||||
}
|
||||
|
||||
fn range(&self) -> Option<RangeInclusive<u64>> {
|
||||
self.range_info.as_ref().map(|info| info.range())
|
||||
}
|
||||
|
||||
/// Returns true if this peer has a better range than the other peer for serving the requested
|
||||
/// range.
|
||||
///
|
||||
/// A peer has a "better range" if:
|
||||
/// 1. It can fully cover the requested range while the other cannot
|
||||
/// 2. None can fully cover the range, but this peer has lower start value
|
||||
/// 3. If a peer doesnt announce a range we assume it has full history, but check the other's
|
||||
/// range and treat that as better if it can cover the range
|
||||
fn has_better_range(&self, other: &Self, range: RangeInclusive<u64>) -> bool {
|
||||
let self_range = self.range();
|
||||
let other_range = other.range();
|
||||
|
||||
match (self_range, other_range) {
|
||||
(Some(self_r), Some(other_r)) => {
|
||||
// Check if each peer can fully cover the requested range
|
||||
let self_covers = self_r.contains(range.start()) && self_r.contains(range.end());
|
||||
let other_covers = other_r.contains(range.start()) && other_r.contains(range.end());
|
||||
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match (self_covers, other_covers) {
|
||||
(true, false) => true, // Only self covers the range
|
||||
(false, true) => false, // Only other covers the range
|
||||
(true, true) => false, // Both cover
|
||||
(false, false) => {
|
||||
// neither covers - prefer if peer has lower (better) start range
|
||||
self_r.start() < other_r.start()
|
||||
}
|
||||
}
|
||||
}
|
||||
(Some(self_r), None) => {
|
||||
// Self has range info, other doesn't (treated as full history with unknown latest)
|
||||
// Self is better only if it covers the range
|
||||
self_r.contains(range.start()) && self_r.contains(range.end())
|
||||
}
|
||||
(None, Some(other_r)) => {
|
||||
// Self has no range info (full history), other has range info
|
||||
// Self is better only if other doesn't cover the range
|
||||
!(other_r.contains(range.start()) && other_r.contains(range.end()))
|
||||
}
|
||||
(None, None) => false, // Neither has range info - no one is better
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this peer is better than the other peer based on the given requirements.
|
||||
fn is_better(&self, other: &Self, requirement: &BestPeerRequirements) -> bool {
|
||||
match requirement {
|
||||
BestPeerRequirements::None => false,
|
||||
BestPeerRequirements::FullBlockRange(range) => {
|
||||
self.has_better_range(other, range.clone())
|
||||
}
|
||||
BestPeerRequirements::FullBlock => self.has_full_history() && !other.has_full_history(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks the state of an individual peer
|
||||
@@ -427,7 +505,6 @@ pub(crate) enum DownloadRequest<N: NetworkPrimitives> {
|
||||
request: Vec<B256>,
|
||||
response: oneshot::Sender<PeerRequestResult<Vec<N::BlockBody>>>,
|
||||
priority: Priority,
|
||||
#[allow(dead_code)]
|
||||
range_hint: Option<RangeInclusive<u64>>,
|
||||
},
|
||||
}
|
||||
@@ -456,6 +533,20 @@ impl<N: NetworkPrimitives> DownloadRequest<N> {
|
||||
const fn is_normal_priority(&self) -> bool {
|
||||
self.get_priority().is_normal()
|
||||
}
|
||||
|
||||
/// Returns the best peer requirements for this request.
|
||||
fn best_peer_requirements(&self) -> BestPeerRequirements {
|
||||
match self {
|
||||
Self::GetBlockHeaders { .. } => BestPeerRequirements::None,
|
||||
Self::GetBlockBodies { range_hint, .. } => {
|
||||
if let Some(range) = range_hint {
|
||||
BestPeerRequirements::FullBlockRange(range.clone())
|
||||
} else {
|
||||
BestPeerRequirements::FullBlock
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An action the syncer can emit.
|
||||
@@ -480,6 +571,16 @@ pub(crate) enum BlockResponseOutcome {
|
||||
BadResponse(PeerId, ReputationChangeKind),
|
||||
}
|
||||
|
||||
/// Additional requirements for how to rank peers during selection.
|
||||
enum BestPeerRequirements {
|
||||
/// No additional requirements
|
||||
None,
|
||||
/// Peer must have this block range available.
|
||||
FullBlockRange(RangeInclusive<u64>),
|
||||
/// Peer must have full range.
|
||||
FullBlock,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -536,17 +637,17 @@ mod tests {
|
||||
None,
|
||||
);
|
||||
|
||||
let first_peer = fetcher.next_best_peer().unwrap();
|
||||
let first_peer = fetcher.next_best_peer(BestPeerRequirements::None).unwrap();
|
||||
assert!(first_peer == peer1 || first_peer == peer2);
|
||||
// Pending disconnect for first_peer
|
||||
fetcher.on_pending_disconnect(&first_peer);
|
||||
// first_peer now isn't idle, so we should get other peer
|
||||
let second_peer = fetcher.next_best_peer().unwrap();
|
||||
let second_peer = fetcher.next_best_peer(BestPeerRequirements::None).unwrap();
|
||||
assert!(first_peer == peer1 || first_peer == peer2);
|
||||
assert_ne!(first_peer, second_peer);
|
||||
// without idle peers, returns None
|
||||
fetcher.on_pending_disconnect(&second_peer);
|
||||
assert_eq!(fetcher.next_best_peer(), None);
|
||||
assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -588,13 +689,13 @@ mod tests {
|
||||
);
|
||||
|
||||
// Must always get peer1 (lowest timeout)
|
||||
assert_eq!(fetcher.next_best_peer(), Some(peer1));
|
||||
assert_eq!(fetcher.next_best_peer(), Some(peer1));
|
||||
assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer1));
|
||||
assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer1));
|
||||
// peer2's timeout changes below peer1's
|
||||
peer2_timeout.store(10, Ordering::Relaxed);
|
||||
// Then we get peer 2 always (now lowest)
|
||||
assert_eq!(fetcher.next_best_peer(), Some(peer2));
|
||||
assert_eq!(fetcher.next_best_peer(), Some(peer2));
|
||||
assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer2));
|
||||
assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -684,4 +785,367 @@ mod tests {
|
||||
|
||||
assert!(fetcher.peers[&peer_id].state.is_idle());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_none_requirement() {
|
||||
let peer1 = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(0, 100, B256::random())),
|
||||
};
|
||||
|
||||
let peer2 = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 50,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(20)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: None,
|
||||
};
|
||||
|
||||
// With None requirement, is_better should always return false
|
||||
assert!(!peer1.is_better(&peer2, &BestPeerRequirements::None));
|
||||
assert!(!peer2.is_better(&peer1, &BestPeerRequirements::None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_full_block_requirement() {
|
||||
// Peer with full history (earliest = 0)
|
||||
let peer_full = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(0, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without full history (earliest = 50)
|
||||
let peer_partial = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(50, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without range info (treated as full history)
|
||||
let peer_no_range = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: None,
|
||||
};
|
||||
|
||||
// Peer with full history is better than peer without
|
||||
assert!(peer_full.is_better(&peer_partial, &BestPeerRequirements::FullBlock));
|
||||
assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlock));
|
||||
|
||||
// Peer without range info (full history) is better than partial
|
||||
assert!(peer_no_range.is_better(&peer_partial, &BestPeerRequirements::FullBlock));
|
||||
assert!(!peer_partial.is_better(&peer_no_range, &BestPeerRequirements::FullBlock));
|
||||
|
||||
// Both have full history - no improvement
|
||||
assert!(!peer_full.is_better(&peer_no_range, &BestPeerRequirements::FullBlock));
|
||||
assert!(!peer_no_range.is_better(&peer_full, &BestPeerRequirements::FullBlock));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_full_block_range_requirement() {
|
||||
let range = RangeInclusive::new(40, 60);
|
||||
|
||||
// Peer that covers the requested range
|
||||
let peer_covers = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(0, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer that doesn't cover the range (earliest too high)
|
||||
let peer_no_cover = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(70, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer that covers the requested range is better than one that doesn't
|
||||
assert!(peer_covers
|
||||
.is_better(&peer_no_cover, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
assert!(
|
||||
!peer_no_cover.is_better(&peer_covers, &BestPeerRequirements::FullBlockRange(range))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_both_cover_range() {
|
||||
let range = RangeInclusive::new(30, 50);
|
||||
|
||||
// Peer with full history that covers the range
|
||||
let peer_full = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(0, 50, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without full history that also covers the range
|
||||
let peer_partial = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(30, 50, B256::random())),
|
||||
};
|
||||
|
||||
// When both cover the range, prefer none
|
||||
assert!(!peer_full
|
||||
.is_better(&peer_partial, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlockRange(range)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_lower_start() {
|
||||
let range = RangeInclusive::new(30, 60);
|
||||
|
||||
// Peer with full history that covers the range
|
||||
let peer_full = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(0, 50, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without full history that also covers the range
|
||||
let peer_partial = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(30, 50, B256::random())),
|
||||
};
|
||||
|
||||
// When both cover the range, prefer lower start value
|
||||
assert!(peer_full
|
||||
.is_better(&peer_partial, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlockRange(range)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_neither_covers_range() {
|
||||
let range = RangeInclusive::new(40, 60);
|
||||
|
||||
// Peer with full history that doesn't cover the range (latest too low)
|
||||
let peer_full = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 30,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(0, 30, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without full history that also doesn't cover the range
|
||||
let peer_partial = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 30,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(10, 30, B256::random())),
|
||||
};
|
||||
|
||||
// When neither covers the range, prefer full history
|
||||
assert!(peer_full
|
||||
.is_better(&peer_partial, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlockRange(range)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_no_range_info() {
|
||||
let range = RangeInclusive::new(40, 60);
|
||||
|
||||
// Peer with range info
|
||||
let peer_with_range = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(30, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without range info
|
||||
let peer_no_range = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: None,
|
||||
};
|
||||
|
||||
// Peer without range info is not better (we prefer peers with known ranges)
|
||||
assert!(!peer_no_range
|
||||
.is_better(&peer_with_range, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
|
||||
// Peer with range info is better than peer without
|
||||
assert!(
|
||||
peer_with_range.is_better(&peer_no_range, &BestPeerRequirements::FullBlockRange(range))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_one_peer_no_range_covers() {
|
||||
let range = RangeInclusive::new(40, 60);
|
||||
|
||||
// Peer with range info that covers the requested range
|
||||
let peer_with_range_covers = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(30, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without range info (treated as full history with unknown latest)
|
||||
let peer_no_range = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: None,
|
||||
};
|
||||
|
||||
// Peer with range that covers is better than peer without range info
|
||||
assert!(peer_with_range_covers
|
||||
.is_better(&peer_no_range, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
|
||||
// Peer without range info is not better when other covers
|
||||
assert!(!peer_no_range
|
||||
.is_better(&peer_with_range_covers, &BestPeerRequirements::FullBlockRange(range)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_one_peer_no_range_doesnt_cover() {
|
||||
let range = RangeInclusive::new(40, 60);
|
||||
|
||||
// Peer with range info that does NOT cover the requested range (too high)
|
||||
let peer_with_range_no_cover = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(70, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer without range info (treated as full history)
|
||||
let peer_no_range = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: None,
|
||||
};
|
||||
|
||||
// Peer with range that doesn't cover is not better
|
||||
assert!(!peer_with_range_no_cover
|
||||
.is_better(&peer_no_range, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
|
||||
// Peer without range info (full history) is better when other doesn't cover
|
||||
assert!(peer_no_range
|
||||
.is_better(&peer_with_range_no_cover, &BestPeerRequirements::FullBlockRange(range)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peer_is_better_edge_cases() {
|
||||
// Test exact range boundaries
|
||||
let range = RangeInclusive::new(50, 100);
|
||||
|
||||
// Peer that exactly covers the range
|
||||
let peer_exact = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(50, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer that's one block short at the start
|
||||
let peer_short_start = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(51, 100, B256::random())),
|
||||
};
|
||||
|
||||
// Peer that's one block short at the end
|
||||
let peer_short_end = Peer {
|
||||
state: PeerState::Idle,
|
||||
best_hash: B256::random(),
|
||||
best_number: 100,
|
||||
capabilities: Arc::new(Capabilities::new(vec![])),
|
||||
timeout: Arc::new(AtomicU64::new(10)),
|
||||
last_response_likely_bad: false,
|
||||
range_info: Some(BlockRangeInfo::new(50, 99, B256::random())),
|
||||
};
|
||||
|
||||
// Exact coverage is better than short coverage
|
||||
assert!(peer_exact
|
||||
.is_better(&peer_short_start, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
assert!(peer_exact
|
||||
.is_better(&peer_short_end, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
|
||||
// Short coverage is not better than exact coverage
|
||||
assert!(!peer_short_start
|
||||
.is_better(&peer_exact, &BestPeerRequirements::FullBlockRange(range.clone())));
|
||||
assert!(
|
||||
!peer_short_end.is_better(&peer_exact, &BestPeerRequirements::FullBlockRange(range))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,6 +175,7 @@ pub use reth_network_p2p as p2p;
|
||||
|
||||
/// re-export types crates
|
||||
pub mod types {
|
||||
pub use reth_discv4::NatResolver;
|
||||
pub use reth_eth_wire_types::*;
|
||||
pub use reth_network_types::*;
|
||||
}
|
||||
|
||||
@@ -232,9 +232,25 @@ impl<N: NetworkPrimitives> PeersInfo for NetworkHandle<N> {
|
||||
|
||||
fn local_node_record(&self) -> NodeRecord {
|
||||
if let Some(discv4) = &self.inner.discv4 {
|
||||
// Note: the discv4 services uses the same `nat` so we can directly return the node
|
||||
// record here
|
||||
discv4.node_record()
|
||||
} else if let Some(record) = self.inner.discv5.as_ref().and_then(|d| d.node_record()) {
|
||||
record
|
||||
} else if let Some(discv5) = self.inner.discv5.as_ref() {
|
||||
// for disv5 we must check if we have an external ip configured
|
||||
if let Some(external) = self.inner.nat.and_then(|nat| nat.as_external_ip()) {
|
||||
NodeRecord::new((external, discv5.local_port()).into(), *self.peer_id())
|
||||
} else {
|
||||
// use the node record that discv5 tracks or use localhost
|
||||
self.inner.discv5.as_ref().and_then(|d| d.node_record()).unwrap_or_else(|| {
|
||||
NodeRecord::new(
|
||||
(std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST), discv5.local_port())
|
||||
.into(),
|
||||
*self.peer_id(),
|
||||
)
|
||||
})
|
||||
}
|
||||
// also use the tcp port
|
||||
.with_tcp_port(self.inner.listener_address.lock().port())
|
||||
} else {
|
||||
let external_ip = self.inner.nat.and_then(|nat| nat.as_external_ip());
|
||||
|
||||
|
||||
@@ -90,6 +90,8 @@ pub struct PeersManager {
|
||||
net_connection_state: NetworkConnectionState,
|
||||
/// How long to temporarily ban ip on an incoming connection attempt.
|
||||
incoming_ip_throttle_duration: Duration,
|
||||
/// IP address filter for restricting network connections to specific IP ranges.
|
||||
ip_filter: reth_net_banlist::IpFilter,
|
||||
}
|
||||
|
||||
impl PeersManager {
|
||||
@@ -108,6 +110,7 @@ impl PeersManager {
|
||||
basic_nodes,
|
||||
max_backoff_count,
|
||||
incoming_ip_throttle_duration,
|
||||
ip_filter,
|
||||
} = config;
|
||||
let (manager_tx, handle_rx) = mpsc::unbounded_channel();
|
||||
let now = Instant::now();
|
||||
@@ -161,6 +164,7 @@ impl PeersManager {
|
||||
max_backoff_count,
|
||||
net_connection_state: NetworkConnectionState::default(),
|
||||
incoming_ip_throttle_duration,
|
||||
ip_filter,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,6 +247,12 @@ impl PeersManager {
|
||||
&mut self,
|
||||
addr: IpAddr,
|
||||
) -> Result<(), InboundConnectionError> {
|
||||
// Check if the IP is in the allowed ranges (netrestrict)
|
||||
if !self.ip_filter.is_allowed(&addr) {
|
||||
trace!(target: "net", ?addr, "Rejecting connection from IP not in allowed ranges");
|
||||
return Err(InboundConnectionError::IpBanned)
|
||||
}
|
||||
|
||||
if self.ban_list.is_banned_ip(&addr) {
|
||||
return Err(InboundConnectionError::IpBanned)
|
||||
}
|
||||
@@ -749,7 +759,15 @@ impl PeersManager {
|
||||
addr: PeerAddr,
|
||||
fork_id: Option<ForkId>,
|
||||
) {
|
||||
if self.ban_list.is_banned(&peer_id, &addr.tcp().ip()) {
|
||||
let ip_addr = addr.tcp().ip();
|
||||
|
||||
// Check if the IP is in the allowed ranges (netrestrict)
|
||||
if !self.ip_filter.is_allowed(&ip_addr) {
|
||||
trace!(target: "net", ?peer_id, ?ip_addr, "Skipping peer from IP not in allowed ranges");
|
||||
return
|
||||
}
|
||||
|
||||
if self.ban_list.is_banned(&peer_id, &ip_addr) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -830,7 +848,15 @@ impl PeersManager {
|
||||
addr: PeerAddr,
|
||||
fork_id: Option<ForkId>,
|
||||
) {
|
||||
if self.ban_list.is_banned(&peer_id, &addr.tcp().ip()) {
|
||||
let ip_addr = addr.tcp().ip();
|
||||
|
||||
// Check if the IP is in the allowed ranges (netrestrict)
|
||||
if !self.ip_filter.is_allowed(&ip_addr) {
|
||||
trace!(target: "net", ?peer_id, ?ip_addr, "Skipping outbound connection to IP not in allowed ranges");
|
||||
return
|
||||
}
|
||||
|
||||
if self.ban_list.is_banned(&peer_id, &ip_addr) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2899,4 +2925,106 @@ mod tests {
|
||||
let updated_peer = manager.peers.get(&peer_id).unwrap();
|
||||
assert_eq!(updated_peer.addr.tcp().ip(), updated_ip);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_filter_blocks_inbound_connection() {
|
||||
use reth_net_banlist::IpFilter;
|
||||
use std::net::IpAddr;
|
||||
|
||||
// Create a filter that only allows 192.168.0.0/16
|
||||
let ip_filter = IpFilter::from_cidr_string("192.168.0.0/16").unwrap();
|
||||
let config = PeersConfig::test().with_ip_filter(ip_filter);
|
||||
let mut peers = PeersManager::new(config);
|
||||
|
||||
// Try to connect from an allowed IP
|
||||
let allowed_ip: IpAddr = "192.168.1.100".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(allowed_ip).is_ok());
|
||||
|
||||
// Try to connect from a disallowed IP
|
||||
let disallowed_ip: IpAddr = "10.0.0.1".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(disallowed_ip).is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_filter_blocks_outbound_connection() {
|
||||
use reth_net_banlist::IpFilter;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
// Create a filter that only allows 192.168.0.0/16
|
||||
let ip_filter = IpFilter::from_cidr_string("192.168.0.0/16").unwrap();
|
||||
let config = PeersConfig::test().with_ip_filter(ip_filter);
|
||||
let mut peers = PeersManager::new(config);
|
||||
|
||||
let peer_id = PeerId::new([1; 64]);
|
||||
|
||||
// Try to add a peer with an allowed IP
|
||||
let allowed_addr: SocketAddr = "192.168.1.100:30303".parse().unwrap();
|
||||
peers.add_peer(peer_id, PeerAddr::from_tcp(allowed_addr), None);
|
||||
assert!(peers.peers.contains_key(&peer_id));
|
||||
|
||||
// Try to add a peer with a disallowed IP
|
||||
let peer_id2 = PeerId::new([2; 64]);
|
||||
let disallowed_addr: SocketAddr = "10.0.0.1:30303".parse().unwrap();
|
||||
peers.add_peer(peer_id2, PeerAddr::from_tcp(disallowed_addr), None);
|
||||
assert!(!peers.peers.contains_key(&peer_id2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_filter_ipv6() {
|
||||
use reth_net_banlist::IpFilter;
|
||||
use std::net::IpAddr;
|
||||
|
||||
// Create a filter that only allows IPv6 range 2001:db8::/32
|
||||
let ip_filter = IpFilter::from_cidr_string("2001:db8::/32").unwrap();
|
||||
let config = PeersConfig::test().with_ip_filter(ip_filter);
|
||||
let mut peers = PeersManager::new(config);
|
||||
|
||||
// Try to connect from an allowed IPv6 address
|
||||
let allowed_ip: IpAddr = "2001:db8::1".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(allowed_ip).is_ok());
|
||||
|
||||
// Try to connect from a disallowed IPv6 address
|
||||
let disallowed_ip: IpAddr = "2001:db9::1".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(disallowed_ip).is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_filter_multiple_ranges() {
|
||||
use reth_net_banlist::IpFilter;
|
||||
use std::net::IpAddr;
|
||||
|
||||
// Create a filter that allows multiple ranges
|
||||
let ip_filter = IpFilter::from_cidr_string("192.168.0.0/16,10.0.0.0/8").unwrap();
|
||||
let config = PeersConfig::test().with_ip_filter(ip_filter);
|
||||
let mut peers = PeersManager::new(config);
|
||||
|
||||
// Try IPs from both allowed ranges
|
||||
let ip1: IpAddr = "192.168.1.1".parse().unwrap();
|
||||
let ip2: IpAddr = "10.5.10.20".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(ip1).is_ok());
|
||||
assert!(peers.on_incoming_pending_session(ip2).is_ok());
|
||||
|
||||
// Try IP from disallowed range
|
||||
let disallowed_ip: IpAddr = "172.16.0.1".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(disallowed_ip).is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_filter_no_restriction() {
|
||||
use reth_net_banlist::IpFilter;
|
||||
use std::net::IpAddr;
|
||||
|
||||
// Create a filter with no restrictions (allow all)
|
||||
let ip_filter = IpFilter::allow_all();
|
||||
let config = PeersConfig::test().with_ip_filter(ip_filter);
|
||||
let mut peers = PeersManager::new(config);
|
||||
|
||||
// All IPs should be allowed
|
||||
let ip1: IpAddr = "192.168.1.1".parse().unwrap();
|
||||
let ip2: IpAddr = "10.0.0.1".parse().unwrap();
|
||||
let ip3: IpAddr = "8.8.8.8".parse().unwrap();
|
||||
assert!(peers.on_incoming_pending_session(ip1).is_ok());
|
||||
assert!(peers.on_incoming_pending_session(ip2).is_ok());
|
||||
assert!(peers.on_incoming_pending_session(ip3).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
//! This module provides functionality to filter out peers that don't have
|
||||
//! specific required blocks (primarily used for shadowfork testing).
|
||||
|
||||
use alloy_primitives::B256;
|
||||
use alloy_eips::BlockNumHash;
|
||||
use futures::StreamExt;
|
||||
use reth_eth_wire_types::{GetBlockHeaders, HeadersDirection};
|
||||
use reth_network_api::{
|
||||
@@ -16,11 +16,13 @@ use tracing::{debug, info, trace};
|
||||
///
|
||||
/// This task listens for new peer sessions and checks if they have the required
|
||||
/// block hashes. Peers that don't have these blocks are banned.
|
||||
///
|
||||
/// This type is mainly used to connect peers on shadow forks (e.g. mainnet shadowfork=
|
||||
pub struct RequiredBlockFilter<N> {
|
||||
/// Network handle for listening to events and managing peer reputation.
|
||||
network: N,
|
||||
/// List of block hashes that peers must have to be considered valid.
|
||||
block_hashes: Vec<B256>,
|
||||
/// List of block number-hash pairs that peers must have to be considered valid.
|
||||
block_num_hashes: Vec<BlockNumHash>,
|
||||
}
|
||||
|
||||
impl<N> RequiredBlockFilter<N>
|
||||
@@ -28,8 +30,8 @@ where
|
||||
N: NetworkEventListenerProvider + Peers + Clone + Send + Sync + 'static,
|
||||
{
|
||||
/// Creates a new required block peer filter.
|
||||
pub const fn new(network: N, block_hashes: Vec<B256>) -> Self {
|
||||
Self { network, block_hashes }
|
||||
pub const fn new(network: N, block_num_hashes: Vec<BlockNumHash>) -> Self {
|
||||
Self { network, block_num_hashes }
|
||||
}
|
||||
|
||||
/// Spawns the required block peer filter task.
|
||||
@@ -37,12 +39,12 @@ where
|
||||
/// This task will run indefinitely, monitoring new peer sessions and filtering
|
||||
/// out peers that don't have the required blocks.
|
||||
pub fn spawn(self) {
|
||||
if self.block_hashes.is_empty() {
|
||||
if self.block_num_hashes.is_empty() {
|
||||
debug!(target: "net::filter", "No required block hashes configured, skipping peer filtering");
|
||||
return;
|
||||
}
|
||||
|
||||
info!(target: "net::filter", "Starting required block peer filter with {} block hashes", self.block_hashes.len());
|
||||
info!(target: "net::filter", "Starting required block peer filter with {} block hashes", self.block_num_hashes.len());
|
||||
|
||||
tokio::spawn(async move {
|
||||
self.run().await;
|
||||
@@ -60,10 +62,18 @@ where
|
||||
|
||||
// Spawn a task to check this peer's blocks
|
||||
let network = self.network.clone();
|
||||
let block_hashes = self.block_hashes.clone();
|
||||
let block_num_hashes = self.block_num_hashes.clone();
|
||||
let peer_block_number = info.status.latest_block.unwrap_or(0);
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::check_peer_blocks(network, peer_id, messages, block_hashes).await;
|
||||
Self::check_peer_blocks(
|
||||
network,
|
||||
peer_id,
|
||||
messages,
|
||||
block_num_hashes,
|
||||
peer_block_number,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -74,9 +84,19 @@ where
|
||||
network: N,
|
||||
peer_id: reth_network_api::PeerId,
|
||||
messages: reth_network_api::PeerRequestSender<PeerRequest<N::Primitives>>,
|
||||
block_hashes: Vec<B256>,
|
||||
block_num_hashes: Vec<BlockNumHash>,
|
||||
latest_peer_block: u64,
|
||||
) {
|
||||
for block_hash in block_hashes {
|
||||
for block_num_hash in block_num_hashes {
|
||||
// Skip if peer's block number is lower than required, peer might also be syncing and
|
||||
// still on the same chain.
|
||||
if block_num_hash.number > 0 && latest_peer_block <= block_num_hash.number {
|
||||
debug!(target: "net::filter", "Skipping check for block {} - peer {} only at block {}",
|
||||
block_num_hash.number, peer_id, latest_peer_block);
|
||||
continue;
|
||||
}
|
||||
|
||||
let block_hash = block_num_hash.hash;
|
||||
trace!(target: "net::filter", "Checking if peer {} has block {}", peer_id, block_hash);
|
||||
|
||||
// Create a request for block headers
|
||||
@@ -139,28 +159,35 @@ where
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_eips::BlockNumHash;
|
||||
use alloy_primitives::{b256, B256};
|
||||
use reth_network_api::noop::NoopNetwork;
|
||||
|
||||
#[test]
|
||||
fn test_required_block_filter_creation() {
|
||||
let network = NoopNetwork::default();
|
||||
let block_hashes = vec![
|
||||
b256!("0x1111111111111111111111111111111111111111111111111111111111111111"),
|
||||
b256!("0x2222222222222222222222222222222222222222222222222222222222222222"),
|
||||
let block_num_hashes = vec![
|
||||
BlockNumHash::new(
|
||||
0,
|
||||
b256!("0x1111111111111111111111111111111111111111111111111111111111111111"),
|
||||
),
|
||||
BlockNumHash::new(
|
||||
23115201,
|
||||
b256!("0x2222222222222222222222222222222222222222222222222222222222222222"),
|
||||
),
|
||||
];
|
||||
|
||||
let filter = RequiredBlockFilter::new(network, block_hashes.clone());
|
||||
assert_eq!(filter.block_hashes.len(), 2);
|
||||
assert_eq!(filter.block_hashes, block_hashes);
|
||||
let filter = RequiredBlockFilter::new(network, block_num_hashes.clone());
|
||||
assert_eq!(filter.block_num_hashes.len(), 2);
|
||||
assert_eq!(filter.block_num_hashes, block_num_hashes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_required_block_filter_empty_hashes_does_not_spawn() {
|
||||
let network = NoopNetwork::default();
|
||||
let block_hashes = vec![];
|
||||
let block_num_hashes = vec![];
|
||||
|
||||
let filter = RequiredBlockFilter::new(network, block_hashes);
|
||||
let filter = RequiredBlockFilter::new(network, block_num_hashes);
|
||||
// This should not panic and should exit early when spawn is called
|
||||
filter.spawn();
|
||||
}
|
||||
@@ -170,10 +197,10 @@ mod tests {
|
||||
// This test would require a more complex setup with mock network components
|
||||
// For now, we ensure the basic structure is correct
|
||||
let network = NoopNetwork::default();
|
||||
let block_hashes = vec![B256::default()];
|
||||
let block_num_hashes = vec![BlockNumHash::new(0, B256::default())];
|
||||
|
||||
let filter = RequiredBlockFilter::new(network, block_hashes);
|
||||
let filter = RequiredBlockFilter::new(network, block_num_hashes);
|
||||
// Verify the filter can be created and basic properties are set
|
||||
assert_eq!(filter.block_hashes.len(), 1);
|
||||
assert_eq!(filter.block_num_hashes.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,16 +237,6 @@ impl<N: NetworkPrimitives> ActiveSession<N> {
|
||||
self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into()
|
||||
}
|
||||
EthMessage::NewPooledTransactionHashes68(msg) => {
|
||||
if msg.hashes.len() != msg.types.len() || msg.hashes.len() != msg.sizes.len() {
|
||||
return OnIncomingMessageOutcome::BadMessage {
|
||||
error: EthStreamError::TransactionHashesInvalidLenOfFields {
|
||||
hashes_len: msg.hashes.len(),
|
||||
types_len: msg.types.len(),
|
||||
sizes_len: msg.sizes.len(),
|
||||
},
|
||||
message: EthMessage::NewPooledTransactionHashes68(msg),
|
||||
}
|
||||
}
|
||||
self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into()
|
||||
}
|
||||
EthMessage::GetBlockHeaders(req) => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user