Compare commits

...

24 Commits

Author SHA1 Message Date
Ben-PH
800a0e0571 clean up test params 2025-05-15 18:43:32 +02:00
Ben-PH
113e1bb409 more redundant size-input clearing 2025-05-15 17:20:42 +02:00
Ben-PH
756119cf42 a/b bench the hashers in proof-gen 2025-05-15 17:20:33 +02:00
Ben-PH
eb950b64a0 Compare backing hashers with verifications 2025-05-15 17:20:25 +02:00
Ben-PH
36c5af39d3 cargo-fmt 2025-05-15 17:20:14 +02:00
Ben-PH
bdf899bdcd Clear redundant use of inputs in benchies 2025-05-15 17:19:13 +02:00
Ben-PH
20cfed53f2 More thorough black-box use 2025-05-15 17:18:47 +02:00
Ben-PH
ee6fb5dd12 set tree sized from data sizes 2025-05-14 22:00:50 +02:00
Ben-PH
0061def388 cleanup and cross-wired fixups 2025-05-14 18:46:21 +02:00
Ben-PH
810472f6dd Add zk benchies 2025-05-14 18:05:20 +02:00
Ben-PH
e94e019f18 Clean up benchies 2025-05-14 18:02:58 +02:00
Ben-PH
d07649a225 Clean up benchies 2025-05-14 17:58:45 +02:00
Ben-PH
610703ea30 Run a tree/hash benchmark shootout 2025-05-13 19:02:59 +02:00
Ben-PH
27eb9d78d7 fix: hashed -> lean imt tree (oops) 2025-05-13 16:16:16 +02:00
Ben-PH
7fd9291d1e Include ift optimal tree 2025-05-13 13:45:05 +02:00
Ben-PH
a701bda92a IFT optimal vs IMT benched 2025-05-13 13:36:18 +02:00
Ben-PH
9f4da7f319 use less unecissary allocation inside the bench 2025-05-12 19:14:21 +02:00
Ben-PH
702ac11a7c Add batch setup to the imt setup group 2025-05-12 18:56:47 +02:00
Ben-PH
a9838697c2 Add first merkle tree benchmark 2025-05-12 16:56:03 +02:00
Ben-PH
fc9e84b0fa use deterministic+portable PRNG for tree nodes 2025-05-12 15:52:35 +02:00
Ben-PH
3692bf5626 fix: Use typical data in merkle tree bench 2025-05-12 15:49:55 +02:00
Ben-PH
601f2d8e78 feat: Add zk-kit-lean-imt as dev-dependency 2025-05-06 15:49:49 +02:00
markoburcul
e12ab776b3 makefile: install wasm-pack with cargo 2025-05-05 09:52:40 +02:00
markoburcul
6d8a60c310 flake: add rust overlay and shell dependencies 2025-05-05 09:52:33 +02:00
7 changed files with 908 additions and 23 deletions

59
Cargo.lock generated
View File

@@ -1226,6 +1226,18 @@ version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa"
[[package]]
name = "light-poseidon"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39e3d87542063daaccbfecd78b60f988079b6ec4e089249658b9455075c78d42"
dependencies = [
"ark-bn254",
"ark-ff 0.5.0",
"num-bigint",
"thiserror 1.0.69",
]
[[package]]
name = "litrs"
version = "0.4.1"
@@ -1415,7 +1427,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6"
dependencies = [
"memchr",
"thiserror",
"thiserror 2.0.12",
"ucd-trie",
]
@@ -1564,13 +1576,12 @@ dependencies = [
[[package]]
name = "rand"
version = "0.9.0"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94"
checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97"
dependencies = [
"rand_chacha 0.9.0",
"rand_core 0.9.3",
"zerocopy 0.8.24",
]
[[package]]
@@ -1706,7 +1717,7 @@ dependencies = [
"serde",
"serde_json",
"sled",
"thiserror",
"thiserror 2.0.12",
"tiny-keccak",
"zerokit_utils",
]
@@ -1770,7 +1781,7 @@ dependencies = [
"primitive-types",
"proptest",
"rand 0.8.5",
"rand 0.9.0",
"rand 0.9.1",
"rlp",
"ruint-macro",
"serde",
@@ -2008,13 +2019,33 @@ version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057"
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl 1.0.69",
]
[[package]]
name = "thiserror"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
dependencies = [
"thiserror-impl",
"thiserror-impl 2.0.12",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.100",
]
[[package]]
@@ -2537,10 +2568,24 @@ dependencies = [
"hex",
"hex-literal",
"lazy_static",
"light-poseidon",
"num-bigint",
"num-traits",
"rand 0.9.1",
"rand_chacha 0.9.0",
"rand_core 0.9.3",
"rln",
"serde",
"sled",
"tiny-keccak",
"vacp2p_pmtree",
"zk-kit-lean-imt",
]
[[package]]
name = "zk-kit-lean-imt"
version = "0.1.1"
source = "git+https://github.com/privacy-scaling-explorations/zk-kit.rust#1014994e0505befc33d51e854c73ab8717b64d90"
dependencies = [
"thiserror 2.0.12",
]

View File

@@ -26,7 +26,7 @@ endif
[ -s "$$NVM_DIR/nvm.sh" ] && \. "$$NVM_DIR/nvm.sh" && \
nvm install 22.14.0 && \
nvm use 22.14.0'
@curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
@cargo install wasm-pack
@echo "\033[1;32m>>> Now run this command to activate Node.js 22.14.0: \033[1;33msource $$HOME/.nvm/nvm.sh && nvm use 22.14.0\033[0m"
build: .pre-build

23
flake.lock generated
View File

@@ -18,7 +18,28 @@
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1745289264,
"narHash": "sha256-7nt+UJ7qaIUe2J7BdnEEph9n2eKEwxUwKS/QIr091uA=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "3b7171858c20d5293360042936058fb0c4cb93a9",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},

View File

@@ -4,9 +4,13 @@
inputs = {
# Version 24.11
nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs }:
outputs = { self, nixpkgs, rust-overlay }:
let
stableSystems = [
"x86_64-linux" "aarch64-linux"
@@ -15,7 +19,8 @@
"i686-windows"
];
forAllSystems = nixpkgs.lib.genAttrs stableSystems;
pkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
overlays = [ (import rust-overlay) ];
pkgsFor = forAllSystems (system: import nixpkgs { inherit system overlays; });
in rec
{
packages = forAllSystems (system: let
@@ -29,9 +34,21 @@
pkgs = pkgsFor.${system};
in {
default = pkgs.mkShell {
inputsFrom = [
packages.${system}.default
buildInputs = with pkgs; [
git
cmake
cargo-make
gnuplot
rustup
xz
wasm-pack
rust-bin.stable.latest.default
];
# Shared library liblzma.so.5 used by wasm-pack
shellHook = ''
xz_lib=$(nix-store -q --references $(which xz) | grep xz)
export LD_LIBRARY_PATH=$xz_lib/lib:$LD_LIBRARY_PATH
'';
};
});
};

View File

@@ -25,12 +25,22 @@ serde = "1.0"
lazy_static = "1.5.0"
hex = "0.4"
[dev-dependencies]
ark-bn254 = { version = "0.5.0", features = ["std"] }
num-traits = "0.2.19"
hex-literal = "1.0.0"
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
criterion = { version = "0.4.0", features = ["html_reports"] }
rand = "0.9.1"
rand_chacha = "0.9.0"
rand_core = "0.9.3"
# dev artifact for benching zk-kit lean-imt
light-poseidon = "0.3.0"
zk-kit-lean-imt = { git = "https://github.com/privacy-scaling-explorations/zk-kit.rust", package = "zk-kit-lean-imt" }
rln = { path = "../rln", default-features = false }
lazy_static = "1.5.0"
[features]
default = []
@@ -43,3 +53,6 @@ harness = false
[[bench]]
name = "poseidon_benchmark"
harness = false
[[bench]]
name = "imt_benchy"
harness = false

767
utils/benches/imt_benchy.rs Normal file
View File

@@ -0,0 +1,767 @@
use std::{hint::black_box, str::FromStr, time::Duration};
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion};
use light_poseidon::{
Poseidon as LtPoseidon, PoseidonBytesHasher as LtPoseidonBytesHasher, PoseidonHasher as _,
};
use rand::RngCore;
use rand_chacha::ChaCha8Rng;
use rand_core::SeedableRng;
use rln::{
circuit::Fr,
hashers::PoseidonHash,
utils::{bytes_le_to_fr, fr_to_bytes_le},
};
use zerokit_utils::{
FullMerkleConfig, FullMerkleTree, Hasher as ZKitUtilsHasher, OptimalMerkleConfig,
OptimalMerkleTree, ZerokitMerkleTree,
};
use zk_kit_lean_imt::{hashed_tree::LeanIMTHasher, lean_imt::LeanIMT};
// ChaCha8Rng is chosen for its portable determinism
struct HashMockStream {
rng: ChaCha8Rng,
}
impl HashMockStream {
fn seeded_stream(seed: u64) -> Self {
let rng = ChaCha8Rng::seed_from_u64(seed);
Self { rng }
}
}
impl Iterator for HashMockStream {
type Item = [u8; 32];
fn next(&mut self) -> Option<Self::Item> {
let mut res = [0; 32];
self.rng.fill_bytes(&mut res);
Some(res)
}
}
#[derive(Debug)]
/// To benchmark the data structure abscent of the hashing overhead
struct BenchyNoOpHasher;
struct BenchyIFTHasher;
struct BenchyLightPosHasher;
// =====
// Ships to IFT Hasher interface
// IFT poseidon hasher doesn't need
// =====
impl ZKitUtilsHasher for BenchyNoOpHasher {
type Fr = Fr;
fn default_leaf() -> Self::Fr {
Fr::default()
}
fn hash(input: &[Self::Fr]) -> Self::Fr {
*input.first().unwrap_or(&Self::default_leaf())
}
}
impl ZKitUtilsHasher for BenchyLightPosHasher {
type Fr = Fr;
fn default_leaf() -> Self::Fr {
Self::Fr::default()
}
fn hash(input: &[Self::Fr]) -> Self::Fr {
let mut hasher = LtPoseidon::<Fr>::new_circom(input.len()).unwrap();
hasher.hash(input).unwrap()
}
}
// =====
// shims for lean imt interface
// =====
impl<const N: usize> LeanIMTHasher<N> for BenchyNoOpHasher {
fn hash(input: &[u8]) -> [u8; N] {
input[0..32].try_into().unwrap()
}
}
impl LeanIMTHasher<32> for BenchyLightPosHasher {
fn hash(input: &[u8]) -> [u8; 32] {
let chunks: Vec<&[u8]> = input.chunks(32).collect();
let mut hasher = LtPoseidon::<Fr>::new_circom(chunks.len()).unwrap();
hasher.hash_bytes_le(&chunks).unwrap()
}
}
impl LeanIMTHasher<32> for BenchyIFTHasher {
fn hash(input: &[u8]) -> [u8; 32] {
let chunks: Vec<&[u8]> = input.chunks(32).collect();
let mut lt_hasher = LtPoseidon::<Fr>::new_circom(chunks.len()).unwrap();
lt_hasher.hash_bytes_le(&chunks).unwrap()
}
}
/// We start with the data to be hashed, and make the changes needed for them
/// to be valid Fr bytes, just needing raw reinterpretation.
/// Needed for LeanIMT because it processes &[u8], not &[Fr]
/// and we want to do away with that mapping as a performance variable
fn lean_data_prep(raw_vec: &[[u8; 32]]) -> Vec<[u8; 32]> {
raw_vec
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
// turn it back into a byte collection
.map(|bytes| fr_to_bytes_le(&bytes))
// coorce it into the [u8; 32]s needed for light-imt hash signature
.map(|bytes| std::convert::TryInto::<[u8; 32]>::try_into(bytes).unwrap())
.collect()
}
fn spawn_inputs(size_group: &[u32]) -> (Vec<[u8; 32]>, Vec<Fr>) {
let max = *size_group.iter().max().unwrap() as usize;
let data_table: Vec<[u8; 32]> = HashMockStream::seeded_stream(42).take(max).collect();
let fr_table = HashMockStream::seeded_stream(42)
.take(max)
.map(|bytes: [u8; 32]| {
Fr::from_str(
bytes
.iter()
.map(|b| format!("{}", b % 10))
.collect::<String>()
.as_str(),
)
})
.collect::<Result<Vec<_>, _>>()
.unwrap();
(data_table, fr_table)
}
pub fn hashless_setup_iterative(c: &mut Criterion) {
let mut group = c.benchmark_group("hashless tree iterative setup");
group.measurement_time(Duration::from_secs(10));
let size_group = [7u32, 13, 17, 40];
let (data_table, fr_table) = spawn_inputs(&size_group);
for size in size_group {
let data_source = &data_table[0..size as usize];
let fr_source = &fr_table[0..size as usize];
group.bench_function(BenchmarkId::new("Lean IMT iterative", size), |b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| LeanIMT::<32>::new(&[], <BenchyNoOpHasher as LeanIMTHasher<32>>::hash).unwrap(),
// Actual benchmark
|mut tree| {
for d in data_source.iter() {
#[allow(clippy::unit_arg)]
black_box(tree.insert(d, <BenchyNoOpHasher as LeanIMTHasher<32>>::hash))
}
},
BatchSize::SmallInput,
)
});
group.bench_function(
BenchmarkId::new("IFT optimal iterative", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
OptimalMerkleTree::<BenchyNoOpHasher>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap()
},
// Actual benchmark
|mut tree| {
for (i, d) in fr_source.iter().enumerate() {
black_box(tree.set(i, *d)).unwrap();
}
},
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full iterative", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
FullMerkleTree::<BenchyNoOpHasher>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap()
},
// Actual benchmark
|mut tree| {
for (i, d) in fr_source.iter().enumerate() {
black_box(tree.set(i, *d)).unwrap();
}
},
BatchSize::SmallInput,
)
},
);
}
}
pub fn hashless_setup_batch(c: &mut Criterion) {
let mut group = c.benchmark_group("hashless tree batch setup");
group.measurement_time(Duration::from_secs(10));
let size_group = [7u32, 13, 17, 40];
let (data_table, fr_table) = spawn_inputs(&size_group);
for size in size_group {
let data_source = &data_table[0..size as usize];
group.bench_function(BenchmarkId::new("Lean IMT batch", size), |b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| LeanIMT::<32>::new(&[], <BenchyNoOpHasher as LeanIMTHasher<32>>::hash).unwrap(),
// Actual benchmark
|mut tree| {
black_box(tree.insert_many(
data_source,
black_box(<BenchyNoOpHasher as LeanIMTHasher<32>>::hash),
))
},
BatchSize::SmallInput,
)
});
group.bench_function(
BenchmarkId::new("IFT optimal batch", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let data_source = &fr_table[0..size as usize];
let data_source = data_source.iter().copied();
let tree = OptimalMerkleTree::<BenchyNoOpHasher>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
(tree, data_source)
},
// Actual benchmark
|(mut tree, data_source)| black_box(tree.set_range(0, data_source)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full batch", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let data_source = &fr_table[0..size as usize];
let data_source = data_source.iter().copied();
let tree = FullMerkleTree::<BenchyNoOpHasher>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
(tree, data_source)
},
// Actual benchmark
|(mut tree, data_source)| black_box(tree.set_range(0, data_source)),
BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn tree_hash_batch_setup_shootout(c: &mut Criterion) {
let mut group = c.benchmark_group("hash+tree batch shootout");
group.measurement_time(Duration::from_secs(10));
let size_group = [7u32, 13, 17, 40];
let (data_table, fr_table) = spawn_inputs(&size_group);
for size in size_group {
let data_source = &data_table[0..size as usize];
group.bench_function(BenchmarkId::new("Lean IMT batch poseidon", size), |b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let byte_form = lean_data_prep(data_source);
let tree =
LeanIMT::<32>::new(&[], <BenchyIFTHasher as LeanIMTHasher<32>>::hash)
.unwrap();
(tree, byte_form)
},
// Actual benchmark
|(mut tree, byte_form)| {
black_box(tree.insert_many(
&byte_form,
black_box(<BenchyIFTHasher as LeanIMTHasher<32>>::hash),
))
},
BatchSize::SmallInput,
)
});
group.bench_function(
BenchmarkId::new("Lean IMT batch light-poseidon", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let byte_form = lean_data_prep(data_source);
let tree = LeanIMT::<32>::new(
&[],
<BenchyLightPosHasher as LeanIMTHasher<32>>::hash,
)
.unwrap();
(tree, byte_form)
},
// Actual benchmark
|(mut tree, data_source)| {
black_box(tree.insert_many(
&data_source,
black_box(<BenchyLightPosHasher as LeanIMTHasher<32>>::hash),
))
},
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT optimal batch poseidon", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_slice = &fr_table[0..size as usize];
let fr_iter = fr_slice.iter().copied();
let tree = OptimalMerkleTree::<PoseidonHash>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
(tree, fr_iter)
},
// Actual benchmark
|(mut tree, data_source)| black_box(tree.set_range(0, data_source)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT optimal batch light-poseidon", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_slice = &fr_table[0..size as usize];
let fr_iter = fr_slice.iter().copied();
let tree = OptimalMerkleTree::<BenchyLightPosHasher>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
(tree, fr_iter)
},
// Actual benchmark
|(mut tree, fr_iter)| black_box(tree.set_range(0, fr_iter)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full batch poseidon", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_slice = &fr_table[0..size as usize];
let fr_iter = fr_slice.iter().copied();
let tree = FullMerkleTree::<PoseidonHash>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
(tree, fr_iter)
},
// Actual benchmark
|(mut tree, fr_iter)| black_box(tree.set_range(0, fr_iter)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full batch light-poseidon", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_slice = &fr_table[0..size as usize];
let fr_iter = fr_slice.iter().copied();
let tree = FullMerkleTree::<BenchyLightPosHasher>::new(
(size.ilog2() + 1) as usize,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
(tree, fr_iter)
},
// Actual benchmark
|(mut tree, fr_iter)| black_box(tree.set_range(0, fr_iter)),
BatchSize::SmallInput,
)
},
);
}
group.finish();
}
pub fn proof_gen_shootout(c: &mut Criterion) {
let mut group = c.benchmark_group("MTree proof-gen shootout");
group.measurement_time(Duration::from_secs(15));
let size_group = [7u32, 13, 17, 40];
let (data_table, fr_table) = spawn_inputs(&size_group);
for size in size_group {
let data_source = &data_table[0..size as usize];
// let data_stream = HashMockStream::seeded_stream(size as u64);
// let chunk_vec = data_stream.take(size as usize).collect::<Vec<[u8; 32]>>();
group.bench_function(
BenchmarkId::new("Lean IMT + ift_pos proof generation", size),
|b,| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let frd_byte_chunks = lean_data_prep(data_source);
LeanIMT::<32>::new(
&frd_byte_chunks,
<BenchyIFTHasher as LeanIMTHasher<32>>::hash,
)
.unwrap()
},
// Actual benchmark
|tree| black_box(tree.generate_proof(0)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("Lean IMT + light-pos proof generation", size),
|b,| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let frd_byte_chunks = lean_data_prep(data_source);
LeanIMT::<32>::new(
&frd_byte_chunks,
<BenchyLightPosHasher as LeanIMTHasher<32>>::hash,
)
.unwrap()
},
// Actual benchmark
|tree| black_box(tree.generate_proof(0)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full + ift-pos proof generation", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_form: Vec<Fr> = data_source
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
.collect();
let mut tree = FullMerkleTree::<PoseidonHash>::new(
size.ilog2() as usize + 1,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
tree.set_range(0, fr_form.into_iter()).unwrap();
tree
},
// Actual benchmark
|tree| black_box(tree.proof(0)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full + light-pos proof generation", size),
|b,| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_form: Vec<Fr> = data_source
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
.collect();
let mut tree = FullMerkleTree::<BenchyLightPosHasher>::new(
size.ilog2() as usize + 1,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
tree.set_range(0, fr_form.into_iter()).unwrap();
tree
},
// Actual benchmark
|tree| black_box(tree.proof(0)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT optimal + ift-pos proof generation", size),
|b,| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_slice = &fr_table[0..size as usize];
let fr_iter = fr_slice.iter().copied();
let mut tree = OptimalMerkleTree::<PoseidonHash>::new(
size.ilog2() as usize + 1,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
tree.set_range(0, fr_iter).unwrap();
tree
},
// Actual benchmark
|tree| black_box(tree.proof(0)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT optimal + light-pos proof generation", size),
|b,| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_slice = &fr_table[0..size as usize];
let fr_iter = fr_slice.iter().copied();
let mut tree = OptimalMerkleTree::<BenchyLightPosHasher>::new(
size.ilog2() as usize + 1,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
tree.set_range(0, fr_iter).unwrap();
tree
},
// Actual benchmark
|tree| black_box(tree.proof(0)),
BatchSize::SmallInput,
)
},
);
}
}
pub fn verification_shootout(c: &mut Criterion) {
let mut group = c.benchmark_group("MTree verification shootout");
group.measurement_time(Duration::from_secs(15));
let size_group = [7u32, 17, 40];
for size in size_group {
let data_stream = HashMockStream::seeded_stream(size as u64);
let data_source = data_stream.take(size as usize).collect::<Vec<[u8; 32]>>();
group.bench_function(
BenchmarkId::new("Lean IMT + ift-pos verification", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let frd_bytes = lean_data_prep(&data_source);
let tree = LeanIMT::<32>::new(
&frd_bytes,
<BenchyIFTHasher as LeanIMTHasher<32>>::hash,
)
.unwrap();
let proof = tree.generate_proof(0).unwrap();
// assert!(LeanIMT::verify_proof(&proof, <BenchyIFTHasher as LeanIMTHasher<32>>::hash));
proof
},
// Actual benchmark
|proof| {
black_box(LeanIMT::verify_proof(
&proof,
<BenchyIFTHasher as LeanIMTHasher<32>>::hash,
))
},
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("Lean IMT + light-pos verification", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let frd_bytes = lean_data_prep(&data_source);
let tree = LeanIMT::<32>::new(
&frd_bytes,
<BenchyLightPosHasher as LeanIMTHasher<32>>::hash,
)
.unwrap();
let proof = tree.generate_proof(0).unwrap();
// assert!(LeanIMT::verify_proof(&proof, <BenchyIFTHasher as LeanIMTHasher<32>>::hash));
proof
},
// Actual benchmark
|proof| {
black_box(LeanIMT::verify_proof(
&proof,
<BenchyLightPosHasher as LeanIMTHasher<32>>::hash,
))
},
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full + light-pos verification", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_form: Vec<Fr> = data_source
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
.collect();
let mut tree = FullMerkleTree::<BenchyLightPosHasher>::new(
size.ilog2() as usize + 1,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
let first_leaf = *fr_form.first().unwrap();
tree.set_range(0, fr_form.into_iter()).unwrap();
let proof = tree.proof(0).unwrap();
// assert!(tree.verify(&first_leaf, &proof).unwrap());
(first_leaf, tree, proof)
},
// Actual benchmark
|(first_leaf, tree, proof)| black_box(tree.verify(&first_leaf, &proof)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT full + ift-pos verification", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_form: Vec<Fr> = data_source
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
.collect();
let mut tree = FullMerkleTree::<PoseidonHash>::new(
size.ilog2() as usize + 1,
Fr::default(),
FullMerkleConfig::default(),
)
.unwrap();
let first_leaf = *fr_form.first().unwrap();
tree.set_range(0, fr_form.into_iter()).unwrap();
let proof = tree.proof(0).unwrap();
// assert!(tree.verify(&first_leaf, &proof).unwrap());
(first_leaf, tree, proof)
},
// Actual benchmark
|(first_leaf, tree, proof)| black_box(tree.verify(&first_leaf, &proof)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT optimal + light-pos verification", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_form: Vec<Fr> = data_source
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
.collect();
let mut tree = OptimalMerkleTree::<BenchyLightPosHasher>::new(
size.ilog2() as usize + 1,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
let first_leaf = *fr_form.first().unwrap();
tree.set_range(0, fr_form.into_iter()).unwrap();
let proof = tree.proof(0).unwrap();
// assert!(tree.verify(&first_leaf, &proof).unwrap());
(first_leaf, tree, proof)
},
// Actual benchmark
|(first_leaf, tree, proof)| black_box(tree.verify(&first_leaf, &proof)),
BatchSize::SmallInput,
)
},
);
group.bench_function(
BenchmarkId::new("IFT optimal + ift-pos verification", size),
|b| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let fr_form: Vec<Fr> = data_source
.iter()
.cloned()
// take raw bytes and Fr-ize it
.map(|chunk| bytes_le_to_fr(&chunk).0)
.collect();
let mut tree = OptimalMerkleTree::<PoseidonHash>::new(
size.ilog2() as usize + 1,
Fr::default(),
OptimalMerkleConfig::default(),
)
.unwrap();
let first_leaf = *fr_form.first().unwrap();
tree.set_range(0, fr_form.into_iter()).unwrap();
let proof = tree.proof(0).unwrap();
// assert!(tree.verify(&first_leaf, &proof).unwrap());
(first_leaf, tree, proof)
},
// Actual benchmark
|(first_leaf, tree, proof)| black_box(tree.verify(&first_leaf, &proof)),
BatchSize::SmallInput,
)
},
);
}
}
criterion_main!(tree_benchies);
criterion_group! {
name = tree_benchies;
config = Criterion::default();
targets =
hashless_setup_batch,
hashless_setup_iterative,
tree_hash_batch_setup_shootout,
proof_gen_shootout,
verification_shootout
}

View File

@@ -1,6 +1,8 @@
use criterion::{criterion_group, criterion_main, Criterion};
use hex_literal::hex;
use lazy_static::lazy_static;
use rand::RngCore;
use rand_chacha::ChaCha8Rng;
use rand_core::SeedableRng;
use std::{fmt::Display, str::FromStr};
use tiny_keccak::{Hasher as _, Keccak};
use zerokit_utils::{
@@ -14,6 +16,28 @@ struct Keccak256;
#[derive(Clone, Copy, Eq, PartialEq, Debug, Default)]
struct TestFr([u8; 32]);
// ChaCha8Rng is chosen for its portable determinism
struct FrRngStream {
rng: ChaCha8Rng,
}
impl FrRngStream {
fn seeded_stream(seed: u64) -> Self {
let rng = ChaCha8Rng::seed_from_u64(seed);
Self { rng }
}
}
impl Iterator for FrRngStream {
type Item = TestFr;
fn next(&mut self) -> Option<Self::Item> {
let mut res = [0; 32];
self.rng.fill_bytes(&mut res);
Some(TestFr(res))
}
}
impl Hasher for Keccak256 {
type Fr = TestFr;
@@ -47,13 +71,11 @@ impl FromStr for TestFr {
}
lazy_static! {
static ref LEAVES: [TestFr; 4] = [
hex!("0000000000000000000000000000000000000000000000000000000000000001"),
hex!("0000000000000000000000000000000000000000000000000000000000000002"),
hex!("0000000000000000000000000000000000000000000000000000000000000003"),
hex!("0000000000000000000000000000000000000000000000000000000000000004"),
]
.map(TestFr);
static ref LEAVES: [TestFr; 40] = FrRngStream::seeded_stream(42)
.take(40)
.collect::<Vec<_>>()
.try_into()
.unwrap();
}
pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
@@ -75,7 +97,7 @@ pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
c.bench_function("OptimalMerkleTree::override_range", |b| {
b.iter(|| {
tree.override_range(0, LEAVES.into_iter(), [0, 1, 2, 3].into_iter())
tree.override_range(0, LEAVES.into_iter().take(4), [0, 1, 2, 3].into_iter())
.unwrap();
})
});
@@ -124,7 +146,7 @@ pub fn full_merkle_tree_benchmark(c: &mut Criterion) {
c.bench_function("FullMerkleTree::override_range", |b| {
b.iter(|| {
tree.override_range(0, LEAVES.into_iter(), [0, 1, 2, 3].into_iter())
tree.override_range(0, LEAVES.into_iter().take(4), [0, 1, 2, 3].into_iter())
.unwrap();
})
});