mirror of
https://github.com/vacp2p/zerokit.git
synced 2026-01-10 06:58:03 -05:00
feat(rln): extend error handling for rln module (#358)
Changes: - Unified error types (`PoseidonError`, `HashError`, etc.) across hashing, keygen, witness calculation, and serialization for consistent and descriptive error handling. - Refactored tests and examples to use `unwrap()` where safe, and limited `expect()` in library code to non-panicking cases with clear messaging. - Improved witness and proof generation by removing panicking code paths and enforcing proper error propagation. - Cleaned up outdated imports, removed unused operations in `graph.rs`, and updated public API documentation. - Updated C, Nim, and WASM FFI bindings with more robust serialization and clearer error log messages. - Added keywords to package.json and update dependencies in Makefile.toml and Nightly CI.
This commit is contained in:
@@ -3,8 +3,11 @@ use std::{fmt::Display, str::FromStr, sync::LazyLock};
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
use zerokit_utils::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleTree,
|
||||
error::HashError,
|
||||
merkle_tree::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleTree,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
@@ -15,19 +18,20 @@ struct TestFr([u8; 32]);
|
||||
|
||||
impl Hasher for Keccak256 {
|
||||
type Fr = TestFr;
|
||||
type Error = HashError;
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
TestFr([0; 32])
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
fn hash(inputs: &[Self::Fr]) -> Result<Self::Fr, HashError> {
|
||||
let mut output = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
for element in inputs {
|
||||
hasher.update(element.0.as_slice());
|
||||
}
|
||||
hasher.finalize(&mut output);
|
||||
TestFr(output)
|
||||
Ok(TestFr(output))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::hint::black_box;
|
||||
|
||||
use ark_bn254::Fr;
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput};
|
||||
use zerokit_utils::Poseidon;
|
||||
use zerokit_utils::poseidon::Poseidon;
|
||||
|
||||
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
|
||||
(2, 8, 56, 0),
|
||||
|
||||
11
utils/src/error.rs
Normal file
11
utils/src/error.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
use super::poseidon::error::PoseidonError;
|
||||
pub use crate::merkle_tree::{FromConfigError, ZerokitMerkleTreeError};
|
||||
|
||||
/// Errors that can occur during hashing operations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum HashError {
|
||||
#[error("Poseidon hash error: {0}")]
|
||||
Poseidon(#[from] PoseidonError),
|
||||
#[error("Generic hash error: {0}")]
|
||||
Generic(String),
|
||||
}
|
||||
@@ -1,10 +1,4 @@
|
||||
pub mod poseidon;
|
||||
pub use self::poseidon::*;
|
||||
|
||||
pub mod error;
|
||||
pub mod merkle_tree;
|
||||
pub use self::merkle_tree::*;
|
||||
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
pub mod pm_tree;
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
pub use self::pm_tree::*;
|
||||
pub mod poseidon;
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
use crate::error::HashError;
|
||||
|
||||
/// Errors that can occur during Merkle tree operations
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ZerokitMerkleTreeError {
|
||||
#[error("Invalid index")]
|
||||
InvalidIndex,
|
||||
// InvalidProof,
|
||||
#[error("Invalid indices")]
|
||||
InvalidIndices,
|
||||
#[error("Leaf index out of bounds")]
|
||||
InvalidLeaf,
|
||||
#[error("Level exceeds tree depth")]
|
||||
@@ -20,8 +24,11 @@ pub enum ZerokitMerkleTreeError {
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
#[error("Pmtree error: {0}")]
|
||||
PmtreeErrorKind(#[from] pmtree::PmtreeErrorKind),
|
||||
#[error("Hash error: {0}")]
|
||||
HashError(#[from] HashError),
|
||||
}
|
||||
|
||||
/// Errors that can occur while creating Merkle tree from config
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum FromConfigError {
|
||||
#[error("Error while reading pmtree config: {0}")]
|
||||
@@ -30,4 +37,6 @@ pub enum FromConfigError {
|
||||
MissingPath,
|
||||
#[error("Error while creating pmtree config: path already exists")]
|
||||
PathExists,
|
||||
#[error("Error while creating pmtree default temp path: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
@@ -7,9 +7,9 @@ use std::{
|
||||
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::merkle_tree::{
|
||||
use super::{
|
||||
error::{FromConfigError, ZerokitMerkleTreeError},
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES},
|
||||
};
|
||||
|
||||
// Full Merkle Tree Implementation
|
||||
@@ -40,7 +40,7 @@ where
|
||||
|
||||
/// Element of a Merkle proof
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub enum FullMerkleBranch<H: Hasher> {
|
||||
pub(crate) enum FullMerkleBranch<H: Hasher> {
|
||||
/// Left branch taken, value is the right sibling hash.
|
||||
Left(H::Fr),
|
||||
|
||||
@@ -50,7 +50,7 @@ pub enum FullMerkleBranch<H: Hasher> {
|
||||
|
||||
/// Merkle proof path, bottom to top.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
|
||||
pub struct FullMerkleProof<H: Hasher>(Vec<FullMerkleBranch<H>>);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FullMerkleConfig(());
|
||||
@@ -87,7 +87,7 @@ where
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
for i in 0..depth {
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]).map_err(Into::into)?);
|
||||
}
|
||||
cached_nodes.reverse();
|
||||
|
||||
@@ -164,13 +164,14 @@ where
|
||||
let mut idx = self.capacity() + index - 1;
|
||||
let mut nd = self.depth;
|
||||
loop {
|
||||
let parent = self.parent(idx).expect("parent should exist");
|
||||
let parent = self
|
||||
.parent(idx)
|
||||
.ok_or(ZerokitMerkleTreeError::InvalidIndex)?;
|
||||
nd -= 1;
|
||||
if nd == n {
|
||||
return Ok(self.nodes[parent]);
|
||||
} else {
|
||||
idx = parent;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -225,7 +226,10 @@ where
|
||||
J: ExactSizeIterator<Item = usize>,
|
||||
{
|
||||
let indices = indices.into_iter().collect::<Vec<_>>();
|
||||
let min_index = *indices.first().expect("indices should not be empty");
|
||||
if indices.is_empty() {
|
||||
return Err(ZerokitMerkleTreeError::InvalidIndices);
|
||||
}
|
||||
let min_index = indices[0];
|
||||
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let max_index = start + leaves_vec.len();
|
||||
@@ -291,7 +295,7 @@ where
|
||||
hash: &FrOf<Self::Hasher>,
|
||||
merkle_proof: &FullMerkleProof<H>,
|
||||
) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
Ok(merkle_proof.compute_root_from(hash) == self.root())
|
||||
Ok(merkle_proof.compute_root_from(hash)? == self.root())
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
@@ -351,17 +355,20 @@ where
|
||||
{
|
||||
// Use parallel processing when the number of pairs exceeds the threshold
|
||||
if end_parent - start_parent + 1 >= MIN_PARALLEL_NODES {
|
||||
let updates: Vec<(usize, H::Fr)> = (start_parent..=end_parent)
|
||||
#[allow(clippy::type_complexity)]
|
||||
let updates: Result<Vec<(usize, H::Fr)>, ZerokitMerkleTreeError> = (start_parent
|
||||
..=end_parent)
|
||||
.into_par_iter()
|
||||
.map(|parent| {
|
||||
let left_child = self.first_child(parent);
|
||||
let right_child = left_child + 1;
|
||||
let hash = H::hash(&[self.nodes[left_child], self.nodes[right_child]]);
|
||||
(parent, hash)
|
||||
let hash = H::hash(&[self.nodes[left_child], self.nodes[right_child]])
|
||||
.map_err(Into::into)?;
|
||||
Ok((parent, hash))
|
||||
})
|
||||
.collect();
|
||||
|
||||
for (parent, hash) in updates {
|
||||
for (parent, hash) in updates? {
|
||||
self.nodes[parent] = hash;
|
||||
}
|
||||
} else {
|
||||
@@ -370,7 +377,8 @@ where
|
||||
let left_child = self.first_child(parent);
|
||||
let right_child = left_child + 1;
|
||||
self.nodes[parent] =
|
||||
H::hash(&[self.nodes[left_child], self.nodes[right_child]]);
|
||||
H::hash(&[self.nodes[left_child], self.nodes[right_child]])
|
||||
.map_err(Into::into)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,10 +429,13 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
|
||||
}
|
||||
|
||||
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
|
||||
fn compute_root_from(&self, hash: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher> {
|
||||
self.0.iter().fold(*hash, |hash, branch| match branch {
|
||||
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),
|
||||
FullMerkleBranch::Right(sibling) => H::hash(&[*sibling, hash]),
|
||||
fn compute_root_from(
|
||||
&self,
|
||||
hash: &FrOf<Self::Hasher>,
|
||||
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
self.0.iter().try_fold(*hash, |hash, branch| match branch {
|
||||
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]).map_err(Into::into),
|
||||
FullMerkleBranch::Right(sibling) => H::hash(&[*sibling, hash]).map_err(Into::into),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,33 +7,30 @@
|
||||
// Merkle tree implementations are adapted from https://github.com/kilic/rln/blob/master/src/merkle.rs
|
||||
// and https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
|
||||
|
||||
//!
|
||||
//! # TODO
|
||||
//!
|
||||
//! * Disk based storage backend (using mmaped files should be easy)
|
||||
//! * Implement serialization for tree and Merkle proof
|
||||
|
||||
use std::{
|
||||
fmt::{Debug, Display},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use crate::merkle_tree::error::ZerokitMerkleTreeError;
|
||||
use super::error::ZerokitMerkleTreeError;
|
||||
|
||||
/// Enables parallel hashing when there are at least 8 nodes (4 pairs to hash), justifying the overhead.
|
||||
pub const MIN_PARALLEL_NODES: usize = 8;
|
||||
|
||||
/// In the Hasher trait we define the node type, the default leaf
|
||||
/// and the hash function used to initialize a Merkle Tree implementation
|
||||
/// In the Hasher trait we define the node type, the default leaf,
|
||||
/// and the hash function used to initialize a Merkle Tree implementation.
|
||||
pub trait Hasher {
|
||||
/// Type of the leaf and tree node
|
||||
type Fr: Clone + Copy + Eq + Default + Debug + Display + FromStr + Send + Sync;
|
||||
|
||||
/// Error type for hash operations - must be convertible to ZerokitMerkleTreeError
|
||||
type Error: Into<ZerokitMerkleTreeError> + std::error::Error + Send + Sync + 'static;
|
||||
|
||||
/// Returns the default tree leaf
|
||||
fn default_leaf() -> Self::Fr;
|
||||
|
||||
/// Utility to compute the hash of an intermediate node
|
||||
fn hash(input: &[Self::Fr]) -> Self::Fr;
|
||||
fn hash(input: &[Self::Fr]) -> Result<Self::Fr, Self::Error>;
|
||||
}
|
||||
|
||||
pub type FrOf<H> = <H as Hasher>::Fr;
|
||||
@@ -101,5 +98,8 @@ pub trait ZerokitMerkleProof {
|
||||
fn leaf_index(&self) -> usize;
|
||||
fn get_path_elements(&self) -> Vec<FrOf<Self::Hasher>>;
|
||||
fn get_path_index(&self) -> Vec<Self::Index>;
|
||||
fn compute_root_from(&self, leaf: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher>;
|
||||
fn compute_root_from(
|
||||
&self,
|
||||
leaf: &FrOf<Self::Hasher>,
|
||||
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
|
||||
}
|
||||
|
||||
@@ -4,8 +4,7 @@ pub mod full_merkle_tree;
|
||||
pub mod merkle_tree;
|
||||
pub mod optimal_merkle_tree;
|
||||
|
||||
pub use self::{
|
||||
full_merkle_tree::{FullMerkleConfig, FullMerkleProof, FullMerkleTree},
|
||||
merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES},
|
||||
optimal_merkle_tree::{OptimalMerkleConfig, OptimalMerkleProof, OptimalMerkleTree},
|
||||
};
|
||||
pub use error::{FromConfigError, ZerokitMerkleTreeError};
|
||||
pub use full_merkle_tree::{FullMerkleConfig, FullMerkleProof, FullMerkleTree};
|
||||
pub use merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES};
|
||||
pub use optimal_merkle_tree::{OptimalMerkleConfig, OptimalMerkleProof, OptimalMerkleTree};
|
||||
|
||||
@@ -2,9 +2,9 @@ use std::{cmp::max, collections::HashMap, fmt::Debug, str::FromStr};
|
||||
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::merkle_tree::{
|
||||
use super::{
|
||||
error::{FromConfigError, ZerokitMerkleTreeError},
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES},
|
||||
};
|
||||
|
||||
// Optimal Merkle Tree Implementation
|
||||
@@ -79,7 +79,7 @@ where
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
for i in 0..depth {
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]).map_err(Into::into)?);
|
||||
}
|
||||
cached_nodes.reverse();
|
||||
|
||||
@@ -197,7 +197,10 @@ where
|
||||
J: ExactSizeIterator<Item = usize>,
|
||||
{
|
||||
let indices = indices.into_iter().collect::<Vec<_>>();
|
||||
let min_index = *indices.first().expect("indices should not be empty");
|
||||
if indices.is_empty() {
|
||||
return Err(ZerokitMerkleTreeError::InvalidIndices);
|
||||
}
|
||||
let min_index = indices[0];
|
||||
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let max_index = start + leaves_vec.len();
|
||||
@@ -248,10 +251,7 @@ where
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
i ^= 1;
|
||||
witness.push((
|
||||
self.get_node(depth, i),
|
||||
(1 - (i & 1)).try_into().expect("0 or 1 expected"),
|
||||
));
|
||||
witness.push((self.get_node(depth, i), (1 - (i & 1)) as u8));
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
if depth == 0 {
|
||||
@@ -274,7 +274,7 @@ where
|
||||
if merkle_proof.length() != self.depth {
|
||||
return Err(ZerokitMerkleTreeError::InvalidMerkleProof);
|
||||
}
|
||||
let expected_root = merkle_proof.compute_root_from(leaf);
|
||||
let expected_root = merkle_proof.compute_root_from(leaf)?;
|
||||
Ok(expected_root.eq(&self.root()))
|
||||
}
|
||||
|
||||
@@ -304,9 +304,9 @@ where
|
||||
|
||||
/// Computes the hash of a node’s two children at the given depth.
|
||||
/// If the index is odd, it is rounded down to the nearest even index.
|
||||
fn hash_couple(&self, depth: usize, index: usize) -> H::Fr {
|
||||
fn hash_couple(&self, depth: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
let b = index & !1;
|
||||
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
|
||||
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)]).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Updates parent hashes after modifying a range of leaf nodes.
|
||||
@@ -330,25 +330,29 @@ where
|
||||
|
||||
// Use parallel processing when the number of pairs exceeds the threshold
|
||||
if current_index_max - current_index >= MIN_PARALLEL_NODES {
|
||||
let updates: Vec<((usize, usize), H::Fr)> = (current_index..current_index_max)
|
||||
#[allow(clippy::type_complexity)]
|
||||
let updates: Result<
|
||||
Vec<((usize, usize), H::Fr)>,
|
||||
ZerokitMerkleTreeError,
|
||||
> = (current_index..current_index_max)
|
||||
.step_by(2)
|
||||
.collect::<Vec<_>>()
|
||||
.into_par_iter()
|
||||
.map(|index| {
|
||||
// Hash two child nodes at positions (current_depth, index) and (current_depth, index + 1)
|
||||
let hash = self.hash_couple(current_depth, index);
|
||||
let hash = self.hash_couple(current_depth, index)?;
|
||||
// Return the computed parent hash and its position at
|
||||
((parent_depth, index >> 1), hash)
|
||||
Ok(((parent_depth, index >> 1), hash))
|
||||
})
|
||||
.collect();
|
||||
|
||||
for (parent, hash) in updates {
|
||||
for (parent, hash) in updates? {
|
||||
self.nodes.insert(parent, hash);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, fallback to sequential update for small ranges
|
||||
for index in (current_index..current_index_max).step_by(2) {
|
||||
let hash = self.hash_couple(current_depth, index);
|
||||
let hash = self.hash_couple(current_depth, index)?;
|
||||
self.nodes.insert((parent_depth, index >> 1), hash);
|
||||
}
|
||||
}
|
||||
@@ -396,16 +400,16 @@ where
|
||||
}
|
||||
|
||||
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
|
||||
fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
|
||||
fn compute_root_from(&self, leaf: &H::Fr) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
let mut acc: H::Fr = *leaf;
|
||||
for w in self.0.iter() {
|
||||
if w.1 == 0 {
|
||||
acc = H::hash(&[acc, w.0]);
|
||||
acc = H::hash(&[acc, w.0]).map_err(Into::into)?;
|
||||
} else {
|
||||
acc = H::hash(&[w.0, acc]);
|
||||
acc = H::hash(&[w.0, acc]).map_err(Into::into)?;
|
||||
}
|
||||
}
|
||||
acc
|
||||
Ok(acc)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#![cfg(feature = "pmtree-ft")]
|
||||
|
||||
pub mod sled_adapter;
|
||||
|
||||
pub use pmtree;
|
||||
pub use sled::{Config, Mode};
|
||||
|
||||
pub use self::sled_adapter::SledDB;
|
||||
pub use sled_adapter::SledDB;
|
||||
|
||||
8
utils/src/poseidon/error.rs
Normal file
8
utils/src/poseidon/error.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
/// Errors that can occur during Poseidon hash computations
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum PoseidonError {
|
||||
#[error("No parameters found for input length {0}")]
|
||||
NoParametersForInputLength(usize),
|
||||
#[error("Empty input provided")]
|
||||
EmptyInput,
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod poseidon_hash;
|
||||
pub use poseidon_hash::Poseidon;
|
||||
|
||||
pub mod error;
|
||||
pub mod poseidon_constants;
|
||||
pub mod poseidon_hash;
|
||||
|
||||
pub use self::poseidon_hash::Poseidon;
|
||||
|
||||
@@ -12,14 +12,14 @@
|
||||
use ark_ff::PrimeField;
|
||||
use num_bigint::BigUint;
|
||||
|
||||
pub struct PoseidonGrainLFSR {
|
||||
struct PoseidonGrainLFSR {
|
||||
pub prime_num_bits: u64,
|
||||
pub state: [bool; 80],
|
||||
pub head: usize,
|
||||
}
|
||||
|
||||
impl PoseidonGrainLFSR {
|
||||
pub fn new(
|
||||
fn new(
|
||||
is_field: u64,
|
||||
is_sbox_an_inverse: u64,
|
||||
prime_num_bits: u64,
|
||||
@@ -92,7 +92,7 @@ impl PoseidonGrainLFSR {
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_bits(&mut self, num_bits: usize) -> Vec<bool> {
|
||||
fn get_bits(&mut self, num_bits: usize) -> Vec<bool> {
|
||||
let mut res = Vec::new();
|
||||
|
||||
for _ in 0..num_bits {
|
||||
@@ -114,10 +114,7 @@ impl PoseidonGrainLFSR {
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_field_elements_rejection_sampling<F: PrimeField>(
|
||||
&mut self,
|
||||
num_elems: usize,
|
||||
) -> Vec<F> {
|
||||
fn get_field_elements_rejection_sampling<F: PrimeField>(&mut self, num_elems: usize) -> Vec<F> {
|
||||
assert_eq!(F::MODULUS_BIT_SIZE as u64, self.prime_num_bits);
|
||||
let modulus: BigUint = F::MODULUS.into();
|
||||
|
||||
@@ -151,7 +148,7 @@ impl PoseidonGrainLFSR {
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_field_elements_mod_p<F: PrimeField>(&mut self, num_elems: usize) -> Vec<F> {
|
||||
fn get_field_elements_mod_p<F: PrimeField>(&mut self, num_elems: usize) -> Vec<F> {
|
||||
assert_eq!(F::MODULUS_BIT_SIZE as u64, self.prime_num_bits);
|
||||
|
||||
let mut res = Vec::new();
|
||||
@@ -253,7 +250,10 @@ pub fn find_poseidon_ark_and_mds<F: PrimeField>(
|
||||
|
||||
for i in 0..(rate) {
|
||||
for (j, ys_item) in ys.iter().enumerate().take(rate) {
|
||||
mds[i][j] = (xs[i] + ys_item).inverse().unwrap();
|
||||
// Poseidon algorithm guarantees xs[i] + ys[j] != 0
|
||||
mds[i][j] = (xs[i] + ys_item)
|
||||
.inverse()
|
||||
.expect("MDS matrix inverse must be valid");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
use crate::poseidon_constants::find_poseidon_ark_and_mds;
|
||||
use super::{error::PoseidonError, poseidon_constants::find_poseidon_ark_and_mds};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RoundParameters<F: PrimeField> {
|
||||
@@ -20,6 +20,7 @@ pub struct RoundParameters<F: PrimeField> {
|
||||
pub struct Poseidon<F: PrimeField> {
|
||||
round_params: Vec<RoundParameters<F>>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> Poseidon<F> {
|
||||
// Loads round parameters and generates round constants
|
||||
// poseidon_params is a vector containing tuples (t, RF, RP, skip_matrices)
|
||||
@@ -93,18 +94,20 @@ impl<F: PrimeField> Poseidon<F> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(&self, inp: &[F]) -> Result<F, String> {
|
||||
pub fn hash(&self, inp: &[F]) -> Result<F, PoseidonError> {
|
||||
// Note that the rate t becomes input length + 1; hence for length N we pick parameters with T = N + 1
|
||||
let t = inp.len() + 1;
|
||||
|
||||
// We seek the index (Poseidon's round_params is an ordered vector) for the parameters corresponding to t
|
||||
let param_index = self.round_params.iter().position(|el| el.t == t);
|
||||
|
||||
if inp.is_empty() || param_index.is_none() {
|
||||
return Err("No parameters found for inputs length".to_string());
|
||||
if inp.is_empty() {
|
||||
return Err(PoseidonError::EmptyInput);
|
||||
}
|
||||
|
||||
let param_index = param_index.unwrap();
|
||||
// We seek the index (Poseidon's round_params is an ordered vector) for the parameters corresponding to t
|
||||
let param_index = self
|
||||
.round_params
|
||||
.iter()
|
||||
.position(|el| el.t == t)
|
||||
.ok_or(PoseidonError::NoParametersForInputLength(inp.len()))?;
|
||||
|
||||
let mut state = vec![F::ZERO; t];
|
||||
let mut state_2 = state.clone();
|
||||
|
||||
@@ -6,8 +6,11 @@ mod test {
|
||||
use hex_literal::hex;
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
use zerokit_utils::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
error::HashError,
|
||||
merkle_tree::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
},
|
||||
};
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
struct Keccak256;
|
||||
@@ -17,19 +20,20 @@ mod test {
|
||||
|
||||
impl Hasher for Keccak256 {
|
||||
type Fr = TestFr;
|
||||
type Error = HashError;
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
TestFr([0; 32])
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
fn hash(inputs: &[Self::Fr]) -> Result<Self::Fr, HashError> {
|
||||
let mut output = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
for element in inputs {
|
||||
hasher.update(element.0.as_slice());
|
||||
}
|
||||
hasher.finalize(&mut output);
|
||||
TestFr(output)
|
||||
Ok(TestFr(output))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +47,7 @@ mod test {
|
||||
type Err = std::string::FromUtf8Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(TestFr(s.as_bytes().try_into().expect("Invalid length")))
|
||||
Ok(TestFr(s.as_bytes().try_into().unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +55,7 @@ mod test {
|
||||
fn from(value: u32) -> Self {
|
||||
let mut bytes: Vec<u8> = vec![0; 28];
|
||||
bytes.extend_from_slice(&value.to_be_bytes());
|
||||
TestFr(bytes.as_slice().try_into().expect("Invalid length"))
|
||||
TestFr(bytes.as_slice().try_into().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,12 +63,12 @@ mod test {
|
||||
|
||||
fn default_full_merkle_tree(depth: usize) -> FullMerkleTree<Keccak256> {
|
||||
FullMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), FullMerkleConfig::default())
|
||||
.expect("Failed to create FullMerkleTree")
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn default_optimal_merkle_tree(depth: usize) -> OptimalMerkleTree<Keccak256> {
|
||||
OptimalMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), OptimalMerkleConfig::default())
|
||||
.expect("Failed to create OptimalMerkleTree")
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -87,14 +91,14 @@ mod test {
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree_full.root(), default_tree_root);
|
||||
for i in 0..nof_leaves {
|
||||
tree_full.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
tree_full.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree_full.root(), roots[i]);
|
||||
}
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree_opt.root(), default_tree_root);
|
||||
for i in 0..nof_leaves {
|
||||
tree_opt.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
tree_opt.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree_opt.root(), roots[i]);
|
||||
}
|
||||
}
|
||||
@@ -106,17 +110,13 @@ mod test {
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(depth);
|
||||
let root_before = tree_full.root();
|
||||
tree_full
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
tree_full.set_range(0, leaves.iter().cloned()).unwrap();
|
||||
let root_after = tree_full.root();
|
||||
assert_ne!(root_before, root_after);
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
let root_before = tree_opt.root();
|
||||
tree_opt
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
tree_opt.set_range(0, leaves.iter().cloned()).unwrap();
|
||||
let root_after = tree_opt.root();
|
||||
assert_ne!(root_before, root_after);
|
||||
}
|
||||
@@ -128,10 +128,10 @@ mod test {
|
||||
|
||||
for i in 0..4 {
|
||||
let leaf = TestFr::from(i as u32);
|
||||
tree_full.update_next(leaf).expect("Failed to update leaf");
|
||||
tree_opt.update_next(leaf).expect("Failed to update leaf");
|
||||
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), leaf);
|
||||
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), leaf);
|
||||
tree_full.update_next(leaf).unwrap();
|
||||
tree_opt.update_next(leaf).unwrap();
|
||||
assert_eq!(tree_full.get(i).unwrap(), leaf);
|
||||
assert_eq!(tree_opt.get(i).unwrap(), leaf);
|
||||
}
|
||||
|
||||
assert_eq!(tree_full.leaves_set(), 4);
|
||||
@@ -145,38 +145,34 @@ mod test {
|
||||
let new_leaf = TestFr::from(99);
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_full
|
||||
.set(index, original_leaf)
|
||||
.expect("Failed to set leaf");
|
||||
tree_full.set(index, original_leaf).unwrap();
|
||||
let root_with_original = tree_full.root();
|
||||
|
||||
tree_full.delete(index).expect("Failed to delete leaf");
|
||||
tree_full.delete(index).unwrap();
|
||||
let root_after_delete = tree_full.root();
|
||||
assert_ne!(root_with_original, root_after_delete);
|
||||
|
||||
tree_full.set(index, new_leaf).expect("Failed to set leaf");
|
||||
tree_full.set(index, new_leaf).unwrap();
|
||||
let root_after_reset = tree_full.root();
|
||||
|
||||
assert_ne!(root_after_delete, root_after_reset);
|
||||
assert_ne!(root_with_original, root_after_reset);
|
||||
assert_eq!(tree_full.get(index).expect("Failed to get leaf"), new_leaf);
|
||||
assert_eq!(tree_full.get(index).unwrap(), new_leaf);
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_opt
|
||||
.set(index, original_leaf)
|
||||
.expect("Failed to set leaf");
|
||||
tree_opt.set(index, original_leaf).unwrap();
|
||||
let root_with_original = tree_opt.root();
|
||||
|
||||
tree_opt.delete(index).expect("Failed to delete leaf");
|
||||
tree_opt.delete(index).unwrap();
|
||||
let root_after_delete = tree_opt.root();
|
||||
assert_ne!(root_with_original, root_after_delete);
|
||||
|
||||
tree_opt.set(index, new_leaf).expect("Failed to set leaf");
|
||||
tree_opt.set(index, new_leaf).unwrap();
|
||||
let root_after_reset = tree_opt.root();
|
||||
|
||||
assert_ne!(root_after_delete, root_after_reset);
|
||||
assert_ne!(root_with_original, root_after_reset);
|
||||
assert_eq!(tree_opt.get(index).expect("Failed to get leaf"), new_leaf);
|
||||
assert_eq!(tree_opt.get(index).unwrap(), new_leaf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -207,24 +203,24 @@ mod test {
|
||||
// check situation when the number of items to insert is less than the number of items to delete
|
||||
tree_full
|
||||
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
|
||||
// check if the indexes for write and delete are the same
|
||||
tree_full
|
||||
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), Vec::<usize>::new());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_full
|
||||
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree_full
|
||||
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1]);
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
@@ -246,24 +242,24 @@ mod test {
|
||||
// check situation when the number of items to insert is less than the number of items to delete
|
||||
tree_opt
|
||||
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
|
||||
// check if the indexes for write and delete are the same
|
||||
tree_opt
|
||||
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), Vec::<usize>::new());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_opt
|
||||
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree_opt
|
||||
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1]);
|
||||
}
|
||||
|
||||
@@ -279,19 +275,12 @@ mod test {
|
||||
for i in 0..nof_leaves {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree_full.get(i).expect("Failed to get leaf"),
|
||||
tree_full
|
||||
.get_subtree_root(depth, i)
|
||||
.expect("Failed to get subtree root")
|
||||
tree_full.get(i).unwrap(),
|
||||
tree_full.get_subtree_root(depth, i).unwrap()
|
||||
);
|
||||
|
||||
// check root
|
||||
assert_eq!(
|
||||
tree_full.root(),
|
||||
tree_full
|
||||
.get_subtree_root(0, i)
|
||||
.expect("Failed to get subtree root")
|
||||
);
|
||||
assert_eq!(tree_full.root(), tree_full.get_subtree_root(0, i).unwrap());
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
@@ -301,18 +290,12 @@ mod test {
|
||||
let idx_r = (i + 1) * (1 << (depth - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree_full
|
||||
.get_subtree_root(n, idx_l)
|
||||
.expect("Failed to get subtree root");
|
||||
let prev_r = tree_full
|
||||
.get_subtree_root(n, idx_r)
|
||||
.expect("Failed to get subtree root");
|
||||
let subroot = tree_full
|
||||
.get_subtree_root(n - 1, idx_sr)
|
||||
.expect("Failed to get subtree root");
|
||||
let prev_l = tree_full.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree_full.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree_full.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
|
||||
// check intermediate nodes
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]).unwrap(), subroot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,18 +305,11 @@ mod test {
|
||||
for i in 0..nof_leaves {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree_opt.get(i).expect("Failed to get leaf"),
|
||||
tree_opt
|
||||
.get_subtree_root(depth, i)
|
||||
.expect("Failed to get subtree root")
|
||||
tree_opt.get(i).unwrap(),
|
||||
tree_opt.get_subtree_root(depth, i).unwrap()
|
||||
);
|
||||
// check root
|
||||
assert_eq!(
|
||||
tree_opt.root(),
|
||||
tree_opt
|
||||
.get_subtree_root(0, i)
|
||||
.expect("Failed to get subtree root")
|
||||
);
|
||||
assert_eq!(tree_opt.root(), tree_opt.get_subtree_root(0, i).unwrap());
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
@@ -343,18 +319,12 @@ mod test {
|
||||
let idx_r = (i + 1) * (1 << (depth - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree_opt
|
||||
.get_subtree_root(n, idx_l)
|
||||
.expect("Failed to get subtree root");
|
||||
let prev_r = tree_opt
|
||||
.get_subtree_root(n, idx_r)
|
||||
.expect("Failed to get subtree root");
|
||||
let subroot = tree_opt
|
||||
.get_subtree_root(n - 1, idx_sr)
|
||||
.expect("Failed to get subtree root");
|
||||
let prev_l = tree_opt.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree_opt.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree_opt.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
|
||||
// check intermediate nodes
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]).unwrap(), subroot);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -368,52 +338,54 @@ mod test {
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
for i in 0..nof_leaves {
|
||||
// We set the leaves
|
||||
tree_full.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
tree_full.set(i, leaves[i]).unwrap();
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree_full.proof(i).expect("Failed to compute proof");
|
||||
let proof = tree_full.proof(i).unwrap();
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree_full
|
||||
.verify(&leaves[i], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
assert!(tree_full.verify(&leaves[i], &proof).unwrap());
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree_full.root());
|
||||
assert_eq!(
|
||||
proof.compute_root_from(&leaves[i]).unwrap(),
|
||||
tree_full.root()
|
||||
);
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree_full
|
||||
.verify(&leaves[(i + 1) % nof_leaves], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
// We test the OptimalMerkleTree implementation
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
for i in 0..nof_leaves {
|
||||
// We set the leaves
|
||||
tree_opt.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
tree_opt.set(i, leaves[i]).unwrap();
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree_opt.proof(i).expect("Failed to compute proof");
|
||||
let proof = tree_opt.proof(i).unwrap();
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree_opt
|
||||
.verify(&leaves[i], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
assert!(tree_opt.verify(&leaves[i], &proof).unwrap());
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree_opt.root());
|
||||
assert_eq!(
|
||||
proof.compute_root_from(&leaves[i]).unwrap(),
|
||||
tree_opt.root()
|
||||
);
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree_opt
|
||||
.verify(&leaves[(i + 1) % nof_leaves], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,16 +396,12 @@ mod test {
|
||||
|
||||
let invalid_leaf = TestFr::from(12345);
|
||||
|
||||
let proof_full = tree_full.proof(0).expect("Failed to compute proof");
|
||||
let proof_opt = tree_opt.proof(0).expect("Failed to compute proof");
|
||||
let proof_full = tree_full.proof(0).unwrap();
|
||||
let proof_opt = tree_opt.proof(0).unwrap();
|
||||
|
||||
// Should fail because no leaf was set
|
||||
assert!(!tree_full
|
||||
.verify(&invalid_leaf, &proof_full)
|
||||
.expect("Failed to verify proof"));
|
||||
assert!(!tree_opt
|
||||
.verify(&invalid_leaf, &proof_opt)
|
||||
.expect("Failed to verify proof"));
|
||||
assert!(!tree_full.verify(&invalid_leaf, &proof_full).unwrap());
|
||||
assert!(!tree_opt.verify(&invalid_leaf, &proof_opt).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -450,9 +418,7 @@ mod test {
|
||||
let to_delete_indices: [usize; 2] = [0, 1];
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_full
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
tree_full.set_range(0, leaves.iter().cloned()).unwrap();
|
||||
|
||||
tree_full
|
||||
.override_range(
|
||||
@@ -460,16 +426,14 @@ mod test {
|
||||
new_leaves.iter().cloned(),
|
||||
to_delete_indices.iter().cloned(),
|
||||
)
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
|
||||
for (i, &new_leaf) in new_leaves.iter().enumerate() {
|
||||
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), new_leaf);
|
||||
assert_eq!(tree_full.get(i).unwrap(), new_leaf);
|
||||
}
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_opt
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
tree_opt.set_range(0, leaves.iter().cloned()).unwrap();
|
||||
|
||||
tree_opt
|
||||
.override_range(
|
||||
@@ -477,10 +441,10 @@ mod test {
|
||||
new_leaves.iter().cloned(),
|
||||
to_delete_indices.iter().cloned(),
|
||||
)
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
|
||||
for (i, &new_leaf) in new_leaves.iter().enumerate() {
|
||||
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), new_leaf);
|
||||
assert_eq!(tree_opt.get(i).unwrap(), new_leaf);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -499,20 +463,20 @@ mod test {
|
||||
|
||||
tree_full
|
||||
.override_range(0, leaves.iter().cloned(), indices.iter().cloned())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
|
||||
for (i, &leaf) in leaves.iter().enumerate() {
|
||||
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), leaf);
|
||||
assert_eq!(tree_full.get(i).unwrap(), leaf);
|
||||
}
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
|
||||
tree_opt
|
||||
.override_range(0, leaves.iter().cloned(), indices.iter().cloned())
|
||||
.expect("Failed to override range");
|
||||
.unwrap();
|
||||
|
||||
for (i, &leaf) in leaves.iter().enumerate() {
|
||||
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), leaf);
|
||||
assert_eq!(tree_opt.get(i).unwrap(), leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ mod test {
|
||||
use ark_bn254::Fr;
|
||||
use num_bigint::BigUint;
|
||||
use num_traits::Num;
|
||||
use zerokit_utils::poseidon_hash::Poseidon;
|
||||
use zerokit_utils::poseidon::Poseidon;
|
||||
|
||||
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
|
||||
(2, 8, 56, 0),
|
||||
|
||||
@@ -4,7 +4,7 @@ mod test {
|
||||
|
||||
use ark_bn254::Fr;
|
||||
use ark_ff::{AdditiveGroup, Field};
|
||||
use zerokit_utils::poseidon_hash::Poseidon;
|
||||
use zerokit_utils::poseidon::Poseidon;
|
||||
|
||||
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
|
||||
(2, 8, 56, 0),
|
||||
|
||||
Reference in New Issue
Block a user