chore(db-api): remove sharded_key_encode benchmark (#21215)

Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Georgios Konstantopoulos
2026-01-20 09:01:12 -08:00
committed by GitHub
parent 80980b8e4d
commit 7371bd3f29
3 changed files with 4 additions and 152 deletions

9
Cargo.lock generated
View File

@@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]]
name = "alloy-chains"
version = "0.2.27"
version = "0.2.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25db5bcdd086f0b1b9610140a12c59b757397be90bd130d8d836fc8da0815a34"
checksum = "3842d8c52fcd3378039f4703dba392dca8b546b1c8ed6183048f8dab95b2be78"
dependencies = [
"alloy-primitives",
"alloy-rlp",
@@ -8100,7 +8100,6 @@ dependencies = [
"alloy-primitives",
"arbitrary",
"bytes",
"codspeed-criterion-compat",
"derive_more",
"metrics",
"modular-bitfield",
@@ -14607,9 +14606,9 @@ dependencies = [
[[package]]
name = "zmij"
version = "1.0.15"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94f63c051f4fe3c1509da62131a678643c5b6fbdc9273b2b79d4378ebda003d2"
checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
[[package]]
name = "zstd"

View File

@@ -60,11 +60,6 @@ test-fuzz.workspace = true
arbitrary = { workspace = true, features = ["derive"] }
proptest.workspace = true
proptest-arbitrary-interop.workspace = true
criterion.workspace = true
[[bench]]
name = "sharded_key_encode"
harness = false
[features]
test-utils = [

View File

@@ -1,142 +0,0 @@
//! Benchmarks for `ShardedKey` and `StorageShardedKey` encoding.
//!
//! These benchmarks measure the performance of stack-allocated vs heap-allocated key encoding,
//! inspired by Anza Labs' PR #3603 which saved ~20k allocations/sec by moving `RocksDB` keys
//! from heap to stack.
//!
//! Run with: `cargo bench -p reth-db-api --bench sharded_key_encode`
#![allow(missing_docs)]
use alloy_primitives::{Address, B256};
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion, Throughput};
use reth_db_api::{
models::{storage_sharded_key::StorageShardedKey, ShardedKey},
table::Encode,
};
/// Number of keys to encode per iteration for throughput measurement.
const BATCH_SIZE: usize = 10_000;
fn bench_sharded_key_address_encode(c: &mut Criterion) {
let mut group = c.benchmark_group("sharded_key_encode");
group.throughput(Throughput::Elements(BATCH_SIZE as u64));
// Pre-generate test data
let keys: Vec<ShardedKey<Address>> = (0..BATCH_SIZE)
.map(|i| {
let mut addr_bytes = [0u8; 20];
addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes());
ShardedKey::new(Address::from(addr_bytes), i as u64)
})
.collect();
group.bench_function("ShardedKey<Address>::encode", |b| {
b.iter_batched(
|| keys.clone(),
|keys| {
for key in keys {
let encoded = black_box(key.encode());
black_box(encoded.as_ref());
}
},
BatchSize::SmallInput,
)
});
group.finish();
}
fn bench_storage_sharded_key_encode(c: &mut Criterion) {
let mut group = c.benchmark_group("storage_sharded_key_encode");
group.throughput(Throughput::Elements(BATCH_SIZE as u64));
// Pre-generate test data
let keys: Vec<StorageShardedKey> = (0..BATCH_SIZE)
.map(|i| {
let mut addr_bytes = [0u8; 20];
addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes());
let mut key_bytes = [0u8; 32];
key_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes());
StorageShardedKey::new(Address::from(addr_bytes), B256::from(key_bytes), i as u64)
})
.collect();
group.bench_function("StorageShardedKey::encode", |b| {
b.iter_batched(
|| keys.clone(),
|keys| {
for key in keys {
let encoded = black_box(key.encode());
black_box(encoded.as_ref());
}
},
BatchSize::SmallInput,
)
});
group.finish();
}
fn bench_encode_decode_roundtrip(c: &mut Criterion) {
use reth_db_api::table::Decode;
let mut group = c.benchmark_group("sharded_key_roundtrip");
group.throughput(Throughput::Elements(BATCH_SIZE as u64));
let keys: Vec<ShardedKey<Address>> = (0..BATCH_SIZE)
.map(|i| {
let mut addr_bytes = [0u8; 20];
addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes());
ShardedKey::new(Address::from(addr_bytes), i as u64)
})
.collect();
group.bench_function("ShardedKey<Address>::encode_then_decode", |b| {
b.iter_batched(
|| keys.clone(),
|keys| {
for key in keys {
let encoded = key.encode();
let decoded = black_box(ShardedKey::<Address>::decode(&encoded).unwrap());
black_box(decoded);
}
},
BatchSize::SmallInput,
)
});
let storage_keys: Vec<StorageShardedKey> = (0..BATCH_SIZE)
.map(|i| {
let mut addr_bytes = [0u8; 20];
addr_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes());
let mut key_bytes = [0u8; 32];
key_bytes[..8].copy_from_slice(&(i as u64).to_be_bytes());
StorageShardedKey::new(Address::from(addr_bytes), B256::from(key_bytes), i as u64)
})
.collect();
group.bench_function("StorageShardedKey::encode_then_decode", |b| {
b.iter_batched(
|| storage_keys.clone(),
|keys| {
for key in keys {
let encoded = key.encode();
let decoded = black_box(StorageShardedKey::decode(&encoded).unwrap());
black_box(decoded);
}
},
BatchSize::SmallInput,
)
});
group.finish();
}
criterion_group!(
benches,
bench_sharded_key_address_encode,
bench_storage_sharded_key_encode,
bench_encode_decode_roundtrip,
);
criterion_main!(benches);