Files
reth/bin/reth-bench/src/bench/output.rs
Alexey Shekhirin 881500e592 feat(rpc, reth-bench): reth_newPayload methods for reth-bench (#22133)
Co-authored-by: Amp <amp@ampcode.com>
Co-authored-by: Georgios Konstantopoulos <me@gakonst.com>
2026-02-16 11:11:13 +00:00

323 lines
12 KiB
Rust

//! Contains various benchmark output formats, either for logging or for
//! serialization to / from files.
use alloy_primitives::B256;
use csv::Writer;
use eyre::OptionExt;
use reth_primitives_traits::constants::GIGAGAS;
use serde::{ser::SerializeStruct, Deserialize, Serialize};
use std::{fs, path::Path, time::Duration};
use tracing::info;
/// This is the suffix for gas output csv files.
pub(crate) const GAS_OUTPUT_SUFFIX: &str = "total_gas.csv";
/// This is the suffix for combined output csv files.
pub(crate) const COMBINED_OUTPUT_SUFFIX: &str = "combined_latency.csv";
/// This is the suffix for new payload output csv files.
pub(crate) const NEW_PAYLOAD_OUTPUT_SUFFIX: &str = "new_payload_latency.csv";
/// Serialized format for gas ramp payloads on disk.
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct GasRampPayloadFile {
/// Engine API version (1-5).
pub(crate) version: u8,
/// The block hash for FCU.
pub(crate) block_hash: B256,
/// The params to pass to newPayload.
pub(crate) params: serde_json::Value,
/// The execution data for `reth_newPayload`.
#[serde(skip_serializing_if = "Option::is_none", default)]
pub(crate) execution_data: Option<alloy_rpc_types_engine::ExecutionData>,
}
/// This represents the results of a single `newPayload` call in the benchmark, containing the gas
/// used and the `newPayload` latency.
#[derive(Debug)]
pub(crate) struct NewPayloadResult {
/// The gas used in the `newPayload` call.
pub(crate) gas_used: u64,
/// The latency of the `newPayload` call.
pub(crate) latency: Duration,
/// Time spent waiting for persistence. `None` when no persistence was in-flight.
pub(crate) persistence_wait: Option<Duration>,
/// Time spent waiting for execution cache lock.
pub(crate) execution_cache_wait: Duration,
/// Time spent waiting for sparse trie lock.
pub(crate) sparse_trie_wait: Duration,
}
impl NewPayloadResult {
/// Returns the gas per second processed in the `newPayload` call.
pub(crate) fn gas_per_second(&self) -> f64 {
self.gas_used as f64 / self.latency.as_secs_f64()
}
}
impl std::fmt::Display for NewPayloadResult {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"New payload processed at {:.4} Ggas/s, used {} total gas. Latency: {:?}",
self.gas_per_second() / GIGAGAS as f64,
self.gas_used,
self.latency
)
}
}
/// This is another [`Serialize`] implementation for the [`NewPayloadResult`] struct, serializing
/// the duration as microseconds because the csv writer would fail otherwise.
impl Serialize for NewPayloadResult {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// convert the time to microseconds
let time = self.latency.as_micros();
let mut state = serializer.serialize_struct("NewPayloadResult", 5)?;
state.serialize_field("gas_used", &self.gas_used)?;
state.serialize_field("latency", &time)?;
state.serialize_field("persistence_wait", &self.persistence_wait.map(|d| d.as_micros()))?;
state.serialize_field("execution_cache_wait", &self.execution_cache_wait.as_micros())?;
state.serialize_field("sparse_trie_wait", &self.sparse_trie_wait.as_micros())?;
state.end()
}
}
/// This represents the combined results of a `newPayload` call and a `forkchoiceUpdated` call in
/// the benchmark, containing the gas used, the `newPayload` latency, and the `forkchoiceUpdated`
/// latency.
#[derive(Debug)]
pub(crate) struct CombinedResult {
/// The block number of the block being processed.
pub(crate) block_number: u64,
/// The gas limit of the block.
pub(crate) gas_limit: u64,
/// The number of transactions in the block.
pub(crate) transaction_count: u64,
/// The `newPayload` result.
pub(crate) new_payload_result: NewPayloadResult,
/// The latency of the `forkchoiceUpdated` call.
pub(crate) fcu_latency: Duration,
/// The latency of both calls combined.
pub(crate) total_latency: Duration,
}
impl CombinedResult {
/// Returns the gas per second, including the `newPayload` _and_ `forkchoiceUpdated` duration.
pub(crate) fn combined_gas_per_second(&self) -> f64 {
self.new_payload_result.gas_used as f64 / self.total_latency.as_secs_f64()
}
}
impl std::fmt::Display for CombinedResult {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Block {} processed at {:.4} Ggas/s, used {} total gas. Combined: {:.4} Ggas/s. fcu: {:?}, newPayload: {:?}",
self.block_number,
self.new_payload_result.gas_per_second() / GIGAGAS as f64,
self.new_payload_result.gas_used,
self.combined_gas_per_second() / GIGAGAS as f64,
self.fcu_latency,
self.new_payload_result.latency
)
}
}
/// This is a [`Serialize`] implementation for the [`CombinedResult`] struct, serializing the
/// durations as microseconds because the csv writer would fail otherwise.
impl Serialize for CombinedResult {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// convert the time to microseconds
let fcu_latency = self.fcu_latency.as_micros();
let new_payload_latency = self.new_payload_result.latency.as_micros();
let total_latency = self.total_latency.as_micros();
let mut state = serializer.serialize_struct("CombinedResult", 10)?;
// flatten the new payload result because this is meant for CSV writing
state.serialize_field("block_number", &self.block_number)?;
state.serialize_field("gas_limit", &self.gas_limit)?;
state.serialize_field("transaction_count", &self.transaction_count)?;
state.serialize_field("gas_used", &self.new_payload_result.gas_used)?;
state.serialize_field("new_payload_latency", &new_payload_latency)?;
state.serialize_field("fcu_latency", &fcu_latency)?;
state.serialize_field("total_latency", &total_latency)?;
state.serialize_field(
"persistence_wait",
&self.new_payload_result.persistence_wait.map(|d| d.as_micros()),
)?;
state.serialize_field(
"execution_cache_wait",
&self.new_payload_result.execution_cache_wait.as_micros(),
)?;
state.serialize_field(
"sparse_trie_wait",
&self.new_payload_result.sparse_trie_wait.as_micros(),
)?;
state.end()
}
}
/// This represents a row of total gas data in the benchmark.
#[derive(Debug)]
pub(crate) struct TotalGasRow {
/// The block number of the block being processed.
pub(crate) block_number: u64,
/// The number of transactions in the block.
pub(crate) transaction_count: u64,
/// The total gas used in the block.
pub(crate) gas_used: u64,
/// Time since the start of the benchmark.
pub(crate) time: Duration,
}
/// This represents the aggregated output, meant to show gas per second metrics, of a benchmark run.
#[derive(Debug)]
pub(crate) struct TotalGasOutput {
/// The total gas used in the benchmark.
pub(crate) total_gas_used: u64,
/// The total wall-clock duration of the benchmark (includes wait times).
pub(crate) total_duration: Duration,
/// The total execution-only duration (excludes wait times).
pub(crate) execution_duration: Duration,
/// The number of blocks processed.
pub(crate) blocks_processed: u64,
}
impl TotalGasOutput {
/// Create a new [`TotalGasOutput`] from gas rows only.
///
/// Use this when execution-only timing is not available (e.g., `new_payload_only`).
/// `execution_duration` will equal `total_duration`.
pub(crate) fn new(rows: Vec<TotalGasRow>) -> eyre::Result<Self> {
let total_duration = rows.last().map(|row| row.time).ok_or_eyre("empty results")?;
let blocks_processed = rows.len() as u64;
let total_gas_used: u64 = rows.into_iter().map(|row| row.gas_used).sum();
Ok(Self {
total_gas_used,
total_duration,
execution_duration: total_duration,
blocks_processed,
})
}
/// Create a new [`TotalGasOutput`] from gas rows and combined results.
///
/// - `rows`: Used for total gas and wall-clock duration
/// - `combined_results`: Used for execution-only duration (sum of `total_latency`)
pub(crate) fn with_combined_results(
rows: Vec<TotalGasRow>,
combined_results: &[CombinedResult],
) -> eyre::Result<Self> {
let total_duration = rows.last().map(|row| row.time).ok_or_eyre("empty results")?;
let blocks_processed = rows.len() as u64;
let total_gas_used: u64 = rows.into_iter().map(|row| row.gas_used).sum();
// Sum execution-only time from combined results
let execution_duration: Duration = combined_results.iter().map(|r| r.total_latency).sum();
Ok(Self { total_gas_used, total_duration, execution_duration, blocks_processed })
}
/// Return the total gigagas per second based on wall-clock time.
pub(crate) fn total_gigagas_per_second(&self) -> f64 {
self.total_gas_used as f64 / self.total_duration.as_secs_f64() / GIGAGAS as f64
}
/// Return the execution-only gigagas per second (excludes wait times).
pub(crate) fn execution_gigagas_per_second(&self) -> f64 {
self.total_gas_used as f64 / self.execution_duration.as_secs_f64() / GIGAGAS as f64
}
}
/// Write benchmark results to CSV files.
///
/// Writes two files to the output directory:
/// - `combined_latency.csv`: Per-block latency results
/// - `total_gas.csv`: Per-block gas usage over time
pub(crate) fn write_benchmark_results(
output_dir: &Path,
gas_results: &[TotalGasRow],
combined_results: &[CombinedResult],
) -> eyre::Result<()> {
fs::create_dir_all(output_dir)?;
let output_path = output_dir.join(COMBINED_OUTPUT_SUFFIX);
info!(target: "reth-bench", "Writing engine api call latency output to file: {:?}", output_path);
let mut writer = Writer::from_path(&output_path)?;
for result in combined_results {
writer.serialize(result)?;
}
writer.flush()?;
let output_path = output_dir.join(GAS_OUTPUT_SUFFIX);
info!(target: "reth-bench", "Writing total gas output to file: {:?}", output_path);
let mut writer = Writer::from_path(&output_path)?;
for row in gas_results {
writer.serialize(row)?;
}
writer.flush()?;
info!(target: "reth-bench", "Finished writing benchmark output files to {:?}.", output_dir);
Ok(())
}
/// This serializes the `time` field of the [`TotalGasRow`] to microseconds.
///
/// This is essentially just for the csv writer, which would have headers
impl Serialize for TotalGasRow {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// convert the time to microseconds
let time = self.time.as_micros();
let mut state = serializer.serialize_struct("TotalGasRow", 4)?;
state.serialize_field("block_number", &self.block_number)?;
state.serialize_field("transaction_count", &self.transaction_count)?;
state.serialize_field("gas_used", &self.gas_used)?;
state.serialize_field("time", &time)?;
state.end()
}
}
#[cfg(test)]
mod tests {
use super::*;
use csv::Writer;
use std::io::BufRead;
#[test]
fn test_write_total_gas_row_csv() {
let row = TotalGasRow {
block_number: 1,
transaction_count: 10,
gas_used: 1_000,
time: Duration::from_secs(1),
};
let mut writer = Writer::from_writer(vec![]);
writer.serialize(row).unwrap();
let result = writer.into_inner().unwrap();
// parse into Lines
let mut result = result.as_slice().lines();
// assert header
let expected_first_line = "block_number,transaction_count,gas_used,time";
let first_line = result.next().unwrap().unwrap();
assert_eq!(first_line, expected_first_line);
let expected_second_line = "1,10,1000,1000000";
let second_line = result.next().unwrap().unwrap();
assert_eq!(second_line, expected_second_line);
}
}