mirror of
https://github.com/vacp2p/zerokit.git
synced 2026-01-09 14:38:01 -05:00
Eyre removal 2 (#311)
Co-authored-by: Ekaterina Broslavskaya <seemenkina@gmail.com>
This commit is contained in:
14
Cargo.lock
generated
14
Cargo.lock
generated
@@ -573,9 +573,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.24"
|
||||
version = "1.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "16595d3be041c03b09d08d0858631facccee9221e579704070e6e9e4915d3bc7"
|
||||
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
|
||||
dependencies = [
|
||||
"shlex",
|
||||
]
|
||||
@@ -1183,9 +1183,9 @@ checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.12"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
|
||||
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
@@ -1639,7 +1639,6 @@ dependencies = [
|
||||
"ark-std 0.5.0",
|
||||
"byteorder",
|
||||
"cfg-if",
|
||||
"color-eyre",
|
||||
"criterion",
|
||||
"document-features",
|
||||
"lazy_static",
|
||||
@@ -1662,7 +1661,6 @@ name = "rln-cli"
|
||||
version = "0.4.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"clap_derive",
|
||||
"color-eyre",
|
||||
"rln",
|
||||
"serde",
|
||||
@@ -2477,7 +2475,6 @@ version = "0.5.2"
|
||||
dependencies = [
|
||||
"ark-bn254",
|
||||
"ark-ff 0.5.0",
|
||||
"color-eyre",
|
||||
"criterion",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
@@ -2485,8 +2482,9 @@ dependencies = [
|
||||
"num-bigint",
|
||||
"num-traits",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sled",
|
||||
"thiserror",
|
||||
"tiny-keccak",
|
||||
"vacp2p_pmtree",
|
||||
]
|
||||
|
||||
@@ -15,8 +15,7 @@ required-features = ["stateless"]
|
||||
[dependencies]
|
||||
rln = { path = "../rln", default-features = true }
|
||||
zerokit_utils = { path = "../utils" }
|
||||
clap = { version = "4.5.39", features = ["cargo", "derive", "env"] }
|
||||
clap_derive = { version = "4.5.32" }
|
||||
clap = { version = "4.5.38", features = ["cargo", "derive", "env"] }
|
||||
color-eyre = "0.6.4"
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::{
|
||||
};
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use color_eyre::{eyre::eyre, Result};
|
||||
use color_eyre::{eyre::eyre, Report, Result};
|
||||
use rln::{
|
||||
circuit::Fr,
|
||||
hashers::{hash_to_field, poseidon_hash},
|
||||
@@ -182,7 +182,7 @@ impl RLNSystem {
|
||||
Ok(false) => {
|
||||
println!("Verification failed: message_id must be unique within the epoch and satisfy 0 <= message_id < MESSAGE_LIMIT: {MESSAGE_LIMIT}");
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
Err(err) => return Err(Report::new(err)),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ ark-serialize = { version = "0.5.0", default-features = false, features = [
|
||||
] }
|
||||
|
||||
# error handling
|
||||
color-eyre = "0.6.4"
|
||||
thiserror = "2.0.12"
|
||||
|
||||
# utilities
|
||||
|
||||
7
rln/src/circuit/error.rs
Normal file
7
rln/src/circuit/error.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ZKeyReadError {
|
||||
#[error("No proving key found!")]
|
||||
EmptyBytes,
|
||||
#[error("{0}")]
|
||||
SerializationError(#[from] ark_serialize::SerializationError),
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
// This crate provides interfaces for the zero-knowledge circuit and keys
|
||||
|
||||
pub mod error;
|
||||
pub mod iden3calc;
|
||||
pub mod qap;
|
||||
pub mod zkey;
|
||||
@@ -12,15 +13,12 @@ use ark_bn254::{
|
||||
use ark_groth16::ProvingKey;
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use cfg_if::cfg_if;
|
||||
use color_eyre::{Report, Result};
|
||||
|
||||
use crate::circuit::error::ZKeyReadError;
|
||||
use crate::circuit::iden3calc::calc_witness;
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
use {
|
||||
ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::CanonicalSerialize,
|
||||
color_eyre::eyre::WrapErr,
|
||||
};
|
||||
use {ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::CanonicalSerialize};
|
||||
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
use {crate::circuit::zkey::read_zkey, std::io::Cursor};
|
||||
@@ -60,9 +58,11 @@ pub type G2Affine = ArkG2Affine;
|
||||
pub type G2Projective = ArkG2Projective;
|
||||
|
||||
// Loads the proving key using a bytes vector
|
||||
pub fn zkey_from_raw(zkey_data: &[u8]) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
pub fn zkey_from_raw(
|
||||
zkey_data: &[u8],
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>), ZKeyReadError> {
|
||||
if zkey_data.is_empty() {
|
||||
return Err(Report::msg("No proving key found!"));
|
||||
return Err(ZKeyReadError::EmptyBytes);
|
||||
}
|
||||
|
||||
let proving_key_and_matrices = match () {
|
||||
@@ -128,20 +128,18 @@ pub struct SerializableMatrix<F: Field> {
|
||||
#[cfg(feature = "arkzkey")]
|
||||
pub fn read_arkzkey_from_bytes_uncompressed(
|
||||
arkzkey_data: &[u8],
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>), ZKeyReadError> {
|
||||
if arkzkey_data.is_empty() {
|
||||
return Err(Report::msg("No proving key found!"));
|
||||
return Err(ZKeyReadError::EmptyBytes);
|
||||
}
|
||||
|
||||
let mut cursor = std::io::Cursor::new(arkzkey_data);
|
||||
|
||||
let serialized_proving_key =
|
||||
SerializableProvingKey::deserialize_uncompressed_unchecked(&mut cursor)
|
||||
.wrap_err("Failed to deserialize proving key")?;
|
||||
SerializableProvingKey::deserialize_uncompressed_unchecked(&mut cursor)?;
|
||||
|
||||
let serialized_constraint_matrices =
|
||||
SerializableConstraintMatrices::deserialize_uncompressed_unchecked(&mut cursor)
|
||||
.wrap_err("Failed to deserialize constraint matrices")?;
|
||||
SerializableConstraintMatrices::deserialize_uncompressed_unchecked(&mut cursor)?;
|
||||
|
||||
// Get on right form for API
|
||||
let proving_key: ProvingKey<Bn254> = serialized_proving_key.0;
|
||||
|
||||
70
rln/src/error.rs
Normal file
70
rln/src/error.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use crate::circuit::error::ZKeyReadError;
|
||||
use ark_bn254::Fr;
|
||||
use ark_relations::r1cs::SynthesisError;
|
||||
use ark_serialize::SerializationError;
|
||||
use num_bigint::{BigInt, ParseBigIntError};
|
||||
use std::array::TryFromSliceError;
|
||||
use std::num::TryFromIntError;
|
||||
use std::string::FromUtf8Error;
|
||||
use thiserror::Error;
|
||||
use utils::error::{FromConfigError, ZerokitMerkleTreeError};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConversionError {
|
||||
#[error("Expected radix 10 or 16")]
|
||||
WrongRadix,
|
||||
#[error("{0}")]
|
||||
ParseBigInt(#[from] ParseBigIntError),
|
||||
#[error("{0}")]
|
||||
ToUsize(#[from] TryFromIntError),
|
||||
#[error("{0}")]
|
||||
FromSlice(#[from] TryFromSliceError),
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProofError {
|
||||
#[error("{0}")]
|
||||
ProtocolError(#[from] ProtocolError),
|
||||
#[error("Error producing proof: {0}")]
|
||||
SynthesisError(#[from] SynthesisError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ProtocolError {
|
||||
#[error("{0}")]
|
||||
Conversion(#[from] ConversionError),
|
||||
#[error("Expected to read {0} bytes but read only {1} bytes")]
|
||||
InvalidReadLen(usize, usize),
|
||||
#[error("Cannot convert bigint {0:?} to biguint")]
|
||||
BigUintConversion(BigInt),
|
||||
#[error("{0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("Message id ({0}) is not within user_message_limit ({1})")]
|
||||
InvalidMessageId(Fr, Fr),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum RLNError {
|
||||
#[error("I/O error: {0}")]
|
||||
IO(#[from] std::io::Error),
|
||||
#[error("Utf8 error: {0}")]
|
||||
Utf8(#[from] FromUtf8Error),
|
||||
#[error("Serde json error: {0}")]
|
||||
JSON(#[from] serde_json::Error),
|
||||
#[error("Config error: {0}")]
|
||||
Config(#[from] FromConfigError),
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(#[from] SerializationError),
|
||||
#[error("Merkle tree error: {0}")]
|
||||
MerkleTree(#[from] ZerokitMerkleTreeError),
|
||||
#[error("ZKey error: {0}")]
|
||||
ZKey(#[from] ZKeyReadError),
|
||||
#[error("Conversion error: {0}")]
|
||||
Conversion(#[from] ConversionError),
|
||||
#[error("Protocol error: {0}")]
|
||||
Protocol(#[from] ProtocolError),
|
||||
#[error("Proof error: {0}")]
|
||||
Proof(#[from] ProofError),
|
||||
#[error("Unable to extract secret")]
|
||||
RecoverSecret,
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod circuit;
|
||||
pub mod error;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub mod ffi;
|
||||
pub mod hashers;
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
use serde_json::Value;
|
||||
use std::fmt::Debug;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
|
||||
use color_eyre::{Report, Result};
|
||||
use serde_json::Value;
|
||||
|
||||
use utils::pmtree::tree::Key;
|
||||
use utils::pmtree::{Database, Hasher};
|
||||
use utils::*;
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::hashers::{poseidon_hash, PoseidonHash};
|
||||
use crate::utils::{bytes_le_to_fr, fr_to_bytes_le};
|
||||
use utils::error::{FromConfigError, ZerokitMerkleTreeError};
|
||||
use utils::pmtree::tree::Key;
|
||||
use utils::pmtree::{Database, Hasher, PmtreeErrorKind};
|
||||
use utils::*;
|
||||
|
||||
const METADATA_KEY: [u8; 8] = *b"metadata";
|
||||
|
||||
@@ -63,9 +61,9 @@ fn get_tmp() -> bool {
|
||||
pub struct PmtreeConfig(Config);
|
||||
|
||||
impl FromStr for PmtreeConfig {
|
||||
type Err = Report;
|
||||
type Err = FromConfigError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let config: Value = serde_json::from_str(s)?;
|
||||
|
||||
let path = config["path"].as_str();
|
||||
@@ -85,10 +83,7 @@ impl FromStr for PmtreeConfig {
|
||||
&& temporary.unwrap()
|
||||
&& path.as_ref().unwrap().exists()
|
||||
{
|
||||
return Err(Report::msg(format!(
|
||||
"Path {:?} already exists, cannot use temporary",
|
||||
path.unwrap()
|
||||
)));
|
||||
return Err(FromConfigError::PathExists);
|
||||
}
|
||||
|
||||
let config = Config::new()
|
||||
@@ -133,12 +128,16 @@ impl ZerokitMerkleTree for PmTree {
|
||||
type Hasher = PoseidonHash;
|
||||
type Config = PmtreeConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
let default_config = PmtreeConfig::default();
|
||||
PmTree::new(depth, Self::Hasher::default_leaf(), default_config)
|
||||
}
|
||||
|
||||
fn new(depth: usize, _default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self> {
|
||||
fn new(
|
||||
depth: usize,
|
||||
_default_leaf: FrOf<Self::Hasher>,
|
||||
config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
let tree_loaded = pmtree::MerkleTree::load(config.clone().0);
|
||||
let tree = match tree_loaded {
|
||||
Ok(tree) => tree,
|
||||
@@ -168,10 +167,12 @@ impl ZerokitMerkleTree for PmTree {
|
||||
self.tree.root()
|
||||
}
|
||||
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
self.tree
|
||||
.set(index, leaf)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
fn set(
|
||||
&mut self,
|
||||
index: usize,
|
||||
leaf: FrOf<Self::Hasher>,
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree.set(index, leaf)?;
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
Ok(())
|
||||
}
|
||||
@@ -180,27 +181,31 @@ impl ZerokitMerkleTree for PmTree {
|
||||
&mut self,
|
||||
start: usize,
|
||||
values: I,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
let v = values.into_iter().collect::<Vec<_>>();
|
||||
self.tree
|
||||
.set_range(start, v.clone().into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.tree.set_range(start, v.clone().into_iter())?;
|
||||
for i in start..v.len() {
|
||||
self.cached_leaves_indices[i] = 1
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
self.tree.get(index).map_err(|e| Report::msg(e.to_string()))
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.get(index)
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
fn get_subtree_root(
|
||||
&self,
|
||||
n: usize,
|
||||
index: usize,
|
||||
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLevel);
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
@@ -231,55 +236,66 @@ impl ZerokitMerkleTree for PmTree {
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
let leaves = leaves.into_iter().collect::<Vec<_>>();
|
||||
let mut indices = indices.into_iter().collect::<Vec<_>>();
|
||||
indices.sort();
|
||||
|
||||
match (leaves.len(), indices.len()) {
|
||||
(0, 0) => Err(Report::msg("no leaves or indices to be removed")),
|
||||
(0, 0) => Err(ZerokitMerkleTreeError::InvalidLeaf),
|
||||
(1, 0) => self.set(start, leaves[0]),
|
||||
(0, 1) => self.delete(indices[0]),
|
||||
(_, 0) => self.set_range(start, leaves.into_iter()),
|
||||
(0, _) => self.remove_indices(&indices),
|
||||
(_, _) => self.remove_indices_and_set_leaves(start, leaves, &indices),
|
||||
(0, _) => self
|
||||
.remove_indices(&indices)
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind),
|
||||
(_, _) => self
|
||||
.remove_indices_and_set_leaves(start, leaves, &indices)
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind),
|
||||
}
|
||||
}
|
||||
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.update_next(leaf)
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
|
||||
}
|
||||
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.delete(index)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)?;
|
||||
self.cached_leaves_indices[index] = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof> {
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError> {
|
||||
let proof = self.tree.proof(index)?;
|
||||
Ok(PmTreeProof { proof })
|
||||
}
|
||||
|
||||
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool> {
|
||||
fn verify(
|
||||
&self,
|
||||
leaf: &FrOf<Self::Hasher>,
|
||||
witness: &Self::Proof,
|
||||
) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
if self.tree.verify(leaf, &witness.proof) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(Report::msg("verify failed"))
|
||||
Err(ZerokitMerkleTreeError::InvalidWitness)
|
||||
}
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
self.tree.db.put(METADATA_KEY, metadata.to_vec())?;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.db
|
||||
.put(METADATA_KEY, metadata.to_vec())
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)?;
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
|
||||
if !self.metadata.is_empty() {
|
||||
return Ok(self.metadata.clone());
|
||||
}
|
||||
@@ -293,8 +309,11 @@ impl ZerokitMerkleTree for PmTree {
|
||||
Ok(data.unwrap())
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
self.tree.db.close().map_err(|e| Report::msg(e.to_string()))
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.db
|
||||
.close()
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,15 +321,13 @@ type PmTreeHasher = <PmTree as ZerokitMerkleTree>::Hasher;
|
||||
type FrOfPmTreeHasher = FrOf<PmTreeHasher>;
|
||||
|
||||
impl PmTree {
|
||||
fn remove_indices(&mut self, indices: &[usize]) -> Result<()> {
|
||||
fn remove_indices(&mut self, indices: &[usize]) -> Result<(), PmtreeErrorKind> {
|
||||
let start = indices[0];
|
||||
let end = indices.last().unwrap() + 1;
|
||||
|
||||
let new_leaves = (start..end).map(|_| PmTreeHasher::default_leaf());
|
||||
|
||||
self.tree
|
||||
.set_range(start, new_leaves)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.tree.set_range(start, new_leaves)?;
|
||||
|
||||
for i in start..end {
|
||||
self.cached_leaves_indices[i] = 0
|
||||
@@ -323,7 +340,7 @@ impl PmTree {
|
||||
start: usize,
|
||||
leaves: Vec<FrOfPmTreeHasher>,
|
||||
indices: &[usize],
|
||||
) -> Result<()> {
|
||||
) -> Result<(), PmtreeErrorKind> {
|
||||
let min_index = *indices.first().unwrap();
|
||||
let max_index = start + leaves.len();
|
||||
|
||||
@@ -340,9 +357,7 @@ impl PmTree {
|
||||
set_values[start - min_index + i] = leaf;
|
||||
}
|
||||
|
||||
self.tree
|
||||
.set_range(start, set_values)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.tree.set_range(start, set_values)?;
|
||||
|
||||
for i in indices {
|
||||
self.cached_leaves_indices[*i] = 0;
|
||||
|
||||
@@ -2,26 +2,24 @@
|
||||
|
||||
use ark_bn254::Fr;
|
||||
use ark_groth16::{prepare_verifying_key, Groth16, Proof as ArkProof, ProvingKey, VerifyingKey};
|
||||
use ark_relations::r1cs::{ConstraintMatrices, SynthesisError};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use color_eyre::{Report, Result};
|
||||
use num_bigint::BigInt;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[cfg(test)]
|
||||
use std::time::Instant;
|
||||
use thiserror::Error;
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
|
||||
use crate::circuit::{calculate_rln_witness, qap::CircomReduction, Curve};
|
||||
use crate::error::{ConversionError, ProofError, ProtocolError};
|
||||
use crate::hashers::{hash_to_field, poseidon_hash};
|
||||
use crate::poseidon_tree::*;
|
||||
use crate::public::RLN_IDENTIFIER;
|
||||
use crate::utils::*;
|
||||
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// RLN Witness data structure and utility functions
|
||||
///////////////////////////////////////////////////////
|
||||
@@ -99,7 +97,7 @@ pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
/// input data is [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements[<32>] | identity_path_index<8> | x<32> | external_nullifier<32> ]
|
||||
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>, ProtocolError> {
|
||||
// Check if message_id is within user_message_limit
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
@@ -114,8 +112,8 @@ pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.identity_secret));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.user_message_limit));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.message_id));
|
||||
serialized.extend_from_slice(&vec_fr_to_bytes_le(&rln_witness.path_elements)?);
|
||||
serialized.extend_from_slice(&vec_u8_to_bytes_le(&rln_witness.identity_path_index)?);
|
||||
serialized.extend_from_slice(&vec_fr_to_bytes_le(&rln_witness.path_elements));
|
||||
serialized.extend_from_slice(&vec_u8_to_bytes_le(&rln_witness.identity_path_index));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.x));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.external_nullifier));
|
||||
|
||||
@@ -127,7 +125,7 @@ pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)> {
|
||||
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize), ProtocolError> {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
@@ -154,7 +152,7 @@ pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)
|
||||
all_read += read;
|
||||
|
||||
if serialized.len() != all_read {
|
||||
return Err(Report::msg("serialized length is not equal to all_read"));
|
||||
return Err(ProtocolError::InvalidReadLen(serialized.len(), all_read));
|
||||
}
|
||||
|
||||
Ok((
|
||||
@@ -178,15 +176,18 @@ pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)
|
||||
pub fn proof_inputs_to_rln_witness(
|
||||
tree: &mut PoseidonTree,
|
||||
serialized: &[u8],
|
||||
) -> Result<(RLNWitnessInput, usize)> {
|
||||
) -> Result<(RLNWitnessInput, usize), ProtocolError> {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let id_index = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
serialized[all_read..all_read + 8]
|
||||
.try_into()
|
||||
.map_err(ConversionError::FromSlice)?,
|
||||
))
|
||||
.map_err(ConversionError::ToUsize)?;
|
||||
all_read += 8;
|
||||
|
||||
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
@@ -199,8 +200,11 @@ pub fn proof_inputs_to_rln_witness(
|
||||
all_read += read;
|
||||
|
||||
let signal_len = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
serialized[all_read..all_read + 8]
|
||||
.try_into()
|
||||
.map_err(ConversionError::FromSlice)?,
|
||||
))
|
||||
.map_err(ConversionError::ToUsize)?;
|
||||
all_read += 8;
|
||||
|
||||
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
|
||||
@@ -237,7 +241,7 @@ pub fn rln_witness_from_values(
|
||||
external_nullifier: Fr,
|
||||
user_message_limit: Fr,
|
||||
message_id: Fr,
|
||||
) -> Result<RLNWitnessInput> {
|
||||
) -> Result<RLNWitnessInput, ProtocolError> {
|
||||
message_id_range_check(&message_id, &user_message_limit)?;
|
||||
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
@@ -284,7 +288,9 @@ pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn proof_values_from_witness(rln_witness: &RLNWitnessInput) -> Result<RLNProofValues> {
|
||||
pub fn proof_values_from_witness(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<RLNProofValues, ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
// y share
|
||||
@@ -523,19 +529,9 @@ pub fn compute_id_secret(share1: (Fr, Fr), share2: (Fr, Fr)) -> Result<Fr, Strin
|
||||
// zkSNARK utility functions
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProofError {
|
||||
#[error("Error reading circuit key: {0}")]
|
||||
CircuitKeyError(#[from] Report),
|
||||
#[error("Error producing witness: {0}")]
|
||||
WitnessError(Report),
|
||||
#[error("Error producing proof: {0}")]
|
||||
SynthesisError(#[from] SynthesisError),
|
||||
}
|
||||
|
||||
fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
|
||||
witness: Vec<BigInt>,
|
||||
) -> Result<Vec<E::ScalarField>> {
|
||||
) -> Result<Vec<E::ScalarField>, ProtocolError> {
|
||||
use ark_ff::PrimeField;
|
||||
let modulus = <E::ScalarField as PrimeField>::MODULUS;
|
||||
|
||||
@@ -548,9 +544,9 @@ fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
|
||||
modulus.into()
|
||||
- w.abs()
|
||||
.to_biguint()
|
||||
.ok_or(Report::msg("not a biguint value"))?
|
||||
.ok_or(ProtocolError::BigUintConversion(w))?
|
||||
} else {
|
||||
w.to_biguint().ok_or(Report::msg("not a biguint value"))?
|
||||
w.to_biguint().ok_or(ProtocolError::BigUintConversion(w))?
|
||||
};
|
||||
witness_vec.push(E::ScalarField::from(w))
|
||||
}
|
||||
@@ -566,8 +562,7 @@ pub fn generate_proof_with_witness(
|
||||
#[cfg(test)]
|
||||
let now = Instant::now();
|
||||
|
||||
let full_assignment =
|
||||
calculate_witness_element::<Curve>(witness).map_err(ProofError::WitnessError)?;
|
||||
let full_assignment = calculate_witness_element::<Curve>(witness)?;
|
||||
|
||||
#[cfg(test)]
|
||||
println!("witness generation took: {:.2?}", now.elapsed());
|
||||
@@ -604,7 +599,7 @@ pub fn generate_proof_with_witness(
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn inputs_for_witness_calculation(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<[(&str, Vec<Fr>); 7]> {
|
||||
) -> Result<[(&str, Vec<Fr>); 7], ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let mut identity_path_index = Vec::with_capacity(rln_witness.identity_path_index.len());
|
||||
@@ -732,7 +727,9 @@ where
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitnessInput> {
|
||||
pub fn rln_witness_from_json(
|
||||
input_json: serde_json::Value,
|
||||
) -> Result<RLNWitnessInput, ProtocolError> {
|
||||
let rln_witness: RLNWitnessInput = serde_json::from_value(input_json).unwrap();
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
@@ -744,7 +741,9 @@ pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitness
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
|
||||
pub fn rln_witness_to_json(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<serde_json::Value, ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let rln_witness_json = serde_json::to_value(rln_witness)?;
|
||||
@@ -757,13 +756,15 @@ pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_to_bigint_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
|
||||
pub fn rln_witness_to_bigint_json(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<serde_json::Value, ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let mut path_elements = Vec::new();
|
||||
|
||||
for v in rln_witness.path_elements.iter() {
|
||||
path_elements.push(to_bigint(v)?.to_str_radix(10));
|
||||
path_elements.push(to_bigint(v).to_str_radix(10));
|
||||
}
|
||||
|
||||
let mut identity_path_index = Vec::new();
|
||||
@@ -773,22 +774,26 @@ pub fn rln_witness_to_bigint_json(rln_witness: &RLNWitnessInput) -> Result<serde
|
||||
.for_each(|v| identity_path_index.push(BigInt::from(*v).to_str_radix(10)));
|
||||
|
||||
let inputs = serde_json::json!({
|
||||
"identitySecret": to_bigint(&rln_witness.identity_secret)?.to_str_radix(10),
|
||||
"userMessageLimit": to_bigint(&rln_witness.user_message_limit)?.to_str_radix(10),
|
||||
"messageId": to_bigint(&rln_witness.message_id)?.to_str_radix(10),
|
||||
"identitySecret": to_bigint(&rln_witness.identity_secret).to_str_radix(10),
|
||||
"userMessageLimit": to_bigint(&rln_witness.user_message_limit).to_str_radix(10),
|
||||
"messageId": to_bigint(&rln_witness.message_id).to_str_radix(10),
|
||||
"pathElements": path_elements,
|
||||
"identityPathIndex": identity_path_index,
|
||||
"x": to_bigint(&rln_witness.x)?.to_str_radix(10),
|
||||
"externalNullifier": to_bigint(&rln_witness.external_nullifier)?.to_str_radix(10),
|
||||
"x": to_bigint(&rln_witness.x).to_str_radix(10),
|
||||
"externalNullifier": to_bigint(&rln_witness.external_nullifier).to_str_radix(10),
|
||||
});
|
||||
|
||||
Ok(inputs)
|
||||
}
|
||||
|
||||
pub fn message_id_range_check(message_id: &Fr, user_message_limit: &Fr) -> Result<()> {
|
||||
pub fn message_id_range_check(
|
||||
message_id: &Fr,
|
||||
user_message_limit: &Fr,
|
||||
) -> Result<(), ProtocolError> {
|
||||
if message_id > user_message_limit {
|
||||
return Err(color_eyre::Report::msg(
|
||||
"message_id is not within user_message_limit",
|
||||
return Err(ProtocolError::InvalidMessageId(
|
||||
*message_id,
|
||||
*user_message_limit,
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -18,14 +18,14 @@ use {
|
||||
std::default::Default,
|
||||
};
|
||||
|
||||
use crate::error::{ConversionError, ProtocolError, RLNError};
|
||||
use ark_groth16::{Proof as ArkProof, ProvingKey, VerifyingKey};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Write};
|
||||
use color_eyre::{Report, Result};
|
||||
use std::io::Cursor;
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use num_bigint::BigInt;
|
||||
use std::io::Cursor;
|
||||
use utils::error::ZerokitMerkleTreeError;
|
||||
|
||||
/// The application-specific RLN identifier.
|
||||
///
|
||||
@@ -64,7 +64,7 @@ impl RLN {
|
||||
/// let mut rln = RLN::new(tree_height, input);
|
||||
/// ```
|
||||
#[cfg(all(not(target_arch = "wasm32"), not(feature = "stateless")))]
|
||||
pub fn new<R: Read>(tree_height: usize, mut input_data: R) -> Result<RLN> {
|
||||
pub fn new<R: Read>(tree_height: usize, mut input_data: R) -> Result<RLN, RLNError> {
|
||||
// We read input
|
||||
let mut input: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut input)?;
|
||||
@@ -108,7 +108,7 @@ impl RLN {
|
||||
/// ```
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "stateless")))]
|
||||
#[cfg(all(not(target_arch = "wasm32"), feature = "stateless"))]
|
||||
pub fn new() -> Result<RLN> {
|
||||
pub fn new() -> Result<RLN, RLNError> {
|
||||
let proving_key = zkey_from_folder().to_owned();
|
||||
let verification_key = proving_key.0.vk.to_owned();
|
||||
let graph_data = graph_from_folder().to_owned();
|
||||
@@ -162,7 +162,7 @@ impl RLN {
|
||||
zkey_vec: Vec<u8>,
|
||||
graph_data: Vec<u8>,
|
||||
mut tree_config_input: R,
|
||||
) -> Result<RLN> {
|
||||
) -> Result<RLN, RLNError> {
|
||||
let proving_key = zkey_from_raw(&zkey_vec)?;
|
||||
let verification_key = proving_key.0.vk.to_owned();
|
||||
|
||||
@@ -221,7 +221,7 @@ impl RLN {
|
||||
/// );
|
||||
/// ```
|
||||
#[cfg(all(not(target_arch = "wasm32"), feature = "stateless"))]
|
||||
pub fn new_with_params(zkey_vec: Vec<u8>, graph_data: Vec<u8>) -> Result<RLN> {
|
||||
pub fn new_with_params(zkey_vec: Vec<u8>, graph_data: Vec<u8>) -> Result<RLN, RLNError> {
|
||||
let proving_key = zkey_from_raw(&zkey_vec)?;
|
||||
let verification_key = proving_key.0.vk.to_owned();
|
||||
|
||||
@@ -252,7 +252,7 @@ impl RLN {
|
||||
/// let mut rln = RLN::new_with_params(zkey_vec)?;
|
||||
/// ```
|
||||
#[cfg(all(target_arch = "wasm32", feature = "stateless"))]
|
||||
pub fn new_with_params(zkey_vec: Vec<u8>) -> Result<RLN> {
|
||||
pub fn new_with_params(zkey_vec: Vec<u8>) -> Result<RLN, RLNError> {
|
||||
let proving_key = zkey_from_raw(&zkey_vec)?;
|
||||
let verification_key = proving_key.0.vk.to_owned();
|
||||
|
||||
@@ -272,7 +272,7 @@ impl RLN {
|
||||
/// Input values are:
|
||||
/// - `tree_height`: the height of the Merkle tree.
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn set_tree(&mut self, tree_height: usize) -> Result<()> {
|
||||
pub fn set_tree(&mut self, tree_height: usize) -> Result<(), RLNError> {
|
||||
// We compute a default empty tree of desired height
|
||||
self.tree = PoseidonTree::default(tree_height)?;
|
||||
|
||||
@@ -303,7 +303,7 @@ impl RLN {
|
||||
/// rln.set_leaf(id_index, &mut buffer).unwrap();
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn set_leaf<R: Read>(&mut self, index: usize, mut input_data: R) -> Result<()> {
|
||||
pub fn set_leaf<R: Read>(&mut self, index: usize, mut input_data: R) -> Result<(), RLNError> {
|
||||
// We read input
|
||||
let mut leaf_byte: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut leaf_byte)?;
|
||||
@@ -333,7 +333,7 @@ impl RLN {
|
||||
/// rln.get_leaf(id_index, &mut buffer).unwrap();
|
||||
/// let rate_commitment = deserialize_field_element(&buffer.into_inner()).unwrap();
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn get_leaf<W: Write>(&self, index: usize, mut output_data: W) -> Result<()> {
|
||||
pub fn get_leaf<W: Write>(&self, index: usize, mut output_data: W) -> Result<(), RLNError> {
|
||||
// We get the leaf at input index
|
||||
let leaf = self.tree.get(index)?;
|
||||
|
||||
@@ -376,7 +376,11 @@ impl RLN {
|
||||
/// rln.set_leaves_from(index, &mut buffer).unwrap();
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn set_leaves_from<R: Read>(&mut self, index: usize, mut input_data: R) -> Result<()> {
|
||||
pub fn set_leaves_from<R: Read>(
|
||||
&mut self,
|
||||
index: usize,
|
||||
mut input_data: R,
|
||||
) -> Result<(), RLNError> {
|
||||
// We read input
|
||||
let mut leaves_byte: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut leaves_byte)?;
|
||||
@@ -385,8 +389,7 @@ impl RLN {
|
||||
|
||||
// We set the leaves
|
||||
self.tree
|
||||
.override_range(index, leaves.into_iter(), [].into_iter())
|
||||
.map_err(|_| Report::msg("Could not set leaves"))?;
|
||||
.override_range(index, leaves.into_iter(), [].into_iter())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -397,7 +400,7 @@ impl RLN {
|
||||
/// Input values are:
|
||||
/// - `input_data`: a reader for the serialization of multiple leaf values (serialization done with [`rln::utils::vec_fr_to_bytes_le`](crate::utils::vec_fr_to_bytes_le))
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn init_tree_with_leaves<R: Read>(&mut self, input_data: R) -> Result<()> {
|
||||
pub fn init_tree_with_leaves<R: Read>(&mut self, input_data: R) -> Result<(), RLNError> {
|
||||
// reset the tree
|
||||
// NOTE: this requires the tree to be initialized with the correct height initially
|
||||
// TODO: accept tree_height as a parameter and initialize the tree with that height
|
||||
@@ -453,7 +456,7 @@ impl RLN {
|
||||
index: usize,
|
||||
mut input_leaves: R,
|
||||
mut input_indices: R,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
// We read input
|
||||
let mut leaves_byte: Vec<u8> = Vec::new();
|
||||
input_leaves.read_to_end(&mut leaves_byte)?;
|
||||
@@ -468,8 +471,7 @@ impl RLN {
|
||||
|
||||
// We set the leaves
|
||||
self.tree
|
||||
.override_range(index, leaves.into_iter(), indices.into_iter())
|
||||
.map_err(|e| Report::msg(format!("Could not perform the batch operation: {e}")))?;
|
||||
.override_range(index, leaves.into_iter(), indices.into_iter())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -522,7 +524,7 @@ impl RLN {
|
||||
/// rln.set_next_leaf(&mut buffer).unwrap();
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn set_next_leaf<R: Read>(&mut self, mut input_data: R) -> Result<()> {
|
||||
pub fn set_next_leaf<R: Read>(&mut self, mut input_data: R) -> Result<(), RLNError> {
|
||||
// We read input
|
||||
let mut leaf_byte: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut leaf_byte)?;
|
||||
@@ -548,7 +550,7 @@ impl RLN {
|
||||
/// rln.delete_leaf(index).unwrap();
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn delete_leaf(&mut self, index: usize) -> Result<()> {
|
||||
pub fn delete_leaf(&mut self, index: usize) -> Result<(), RLNError> {
|
||||
self.tree.delete(index)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -566,7 +568,7 @@ impl RLN {
|
||||
/// rln.set_metadata(metadata).unwrap();
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
pub fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), RLNError> {
|
||||
self.tree.set_metadata(metadata)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -586,7 +588,7 @@ impl RLN {
|
||||
/// let metadata = buffer.into_inner();
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn get_metadata<W: Write>(&self, mut output_data: W) -> Result<()> {
|
||||
pub fn get_metadata<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
|
||||
let metadata = self.tree.metadata()?;
|
||||
output_data.write_all(&metadata)?;
|
||||
Ok(())
|
||||
@@ -606,10 +608,9 @@ impl RLN {
|
||||
/// let (root, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn get_root<W: Write>(&self, mut output_data: W) -> Result<()> {
|
||||
pub fn get_root<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
|
||||
let root = self.tree.root();
|
||||
output_data.write_all(&fr_to_bytes_le(&root))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -634,7 +635,7 @@ impl RLN {
|
||||
level: usize,
|
||||
index: usize,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
let subroot = self.tree.get_subtree_root(level, index)?;
|
||||
output_data.write_all(&fr_to_bytes_le(&subroot))?;
|
||||
|
||||
@@ -663,13 +664,14 @@ impl RLN {
|
||||
/// let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec());
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn get_proof<W: Write>(&self, index: usize, mut output_data: W) -> Result<()> {
|
||||
pub fn get_proof<W: Write>(&self, index: usize, mut output_data: W) -> Result<(), RLNError> {
|
||||
let merkle_proof = self.tree.proof(index).expect("proof should exist");
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
|
||||
output_data.write_all(&vec_fr_to_bytes_le(&path_elements)?)?;
|
||||
output_data.write_all(&vec_u8_to_bytes_le(&identity_path_index)?)?;
|
||||
// Note: unwrap safe - vec_fr_to_bytes_le & vec_u8_to_bytes_le are infallible
|
||||
output_data.write_all(&vec_fr_to_bytes_le(&path_elements))?;
|
||||
output_data.write_all(&vec_u8_to_bytes_le(&identity_path_index))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -707,7 +709,7 @@ impl RLN {
|
||||
/// assert_eq!(idxs, [0, 1, 2, 3, 4]);
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn get_empty_leaves_indices<W: Write>(&self, mut output_data: W) -> Result<()> {
|
||||
pub fn get_empty_leaves_indices<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
|
||||
let idxs = self.tree.get_empty_leaves_indices();
|
||||
idxs.serialize_compressed(&mut output_data)?;
|
||||
Ok(())
|
||||
@@ -742,7 +744,7 @@ impl RLN {
|
||||
&mut self,
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
// We read input RLN witness and we serialize_compressed it
|
||||
let mut serialized_witness: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized_witness)?;
|
||||
@@ -792,7 +794,7 @@ impl RLN {
|
||||
///
|
||||
/// assert!(verified);
|
||||
/// ```
|
||||
pub fn verify<R: Read>(&self, mut input_data: R) -> Result<bool> {
|
||||
pub fn verify<R: Read>(&self, mut input_data: R) -> Result<bool, RLNError> {
|
||||
// Input data is serialized for Curve as:
|
||||
// serialized_proof (compressed, 4*32 bytes) || serialized_proof_values (6*32 bytes), i.e.
|
||||
// [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
@@ -863,7 +865,7 @@ impl RLN {
|
||||
&mut self,
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
// We read input RLN witness and we serialize_compressed it
|
||||
let mut witness_byte: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut witness_byte)?;
|
||||
@@ -888,7 +890,7 @@ impl RLN {
|
||||
&mut self,
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
let mut serialized_witness: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized_witness)?;
|
||||
let (rln_witness, _) = deserialize_witness(&serialized_witness)?;
|
||||
@@ -912,7 +914,7 @@ impl RLN {
|
||||
calculated_witness: Vec<BigInt>,
|
||||
serialized_witness: Vec<u8>,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
let (rln_witness, _) = deserialize_witness(&serialized_witness[..])?;
|
||||
let proof_values = proof_values_from_witness(&rln_witness)?;
|
||||
|
||||
@@ -953,7 +955,7 @@ impl RLN {
|
||||
/// assert!(verified);
|
||||
/// ```
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn verify_rln_proof<R: Read>(&self, mut input_data: R) -> Result<bool> {
|
||||
pub fn verify_rln_proof<R: Read>(&self, mut input_data: R) -> Result<bool, RLNError> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized)?;
|
||||
let mut all_read = 0;
|
||||
@@ -964,8 +966,11 @@ impl RLN {
|
||||
all_read += read;
|
||||
|
||||
let signal_len = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
serialized[all_read..all_read + 8]
|
||||
.try_into()
|
||||
.map_err(ConversionError::FromSlice)?,
|
||||
))
|
||||
.map_err(ConversionError::from)?;
|
||||
all_read += 8;
|
||||
|
||||
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
|
||||
@@ -1028,7 +1033,11 @@ impl RLN {
|
||||
///
|
||||
/// assert!(verified);
|
||||
/// ```
|
||||
pub fn verify_with_roots<R: Read>(&self, mut input_data: R, mut roots_data: R) -> Result<bool> {
|
||||
pub fn verify_with_roots<R: Read>(
|
||||
&self,
|
||||
mut input_data: R,
|
||||
mut roots_data: R,
|
||||
) -> Result<bool, RLNError> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized)?;
|
||||
let mut all_read = 0;
|
||||
@@ -1039,8 +1048,11 @@ impl RLN {
|
||||
all_read += read;
|
||||
|
||||
let signal_len = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
serialized[all_read..all_read + 8]
|
||||
.try_into()
|
||||
.map_err(ConversionError::FromSlice)?,
|
||||
))
|
||||
.map_err(ConversionError::ToUsize)?;
|
||||
all_read += 8;
|
||||
|
||||
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
|
||||
@@ -1109,7 +1121,7 @@ impl RLN {
|
||||
/// // We serialize_compressed the keygen output
|
||||
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair(buffer.into_inner());
|
||||
/// ```
|
||||
pub fn key_gen<W: Write>(&self, mut output_data: W) -> Result<()> {
|
||||
pub fn key_gen<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
|
||||
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
|
||||
@@ -1139,7 +1151,7 @@ impl RLN {
|
||||
/// // We serialize_compressed the keygen output
|
||||
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
|
||||
/// ```
|
||||
pub fn extended_key_gen<W: Write>(&self, mut output_data: W) -> Result<()> {
|
||||
pub fn extended_key_gen<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
extended_keygen();
|
||||
output_data.write_all(&fr_to_bytes_le(&identity_trapdoor))?;
|
||||
@@ -1178,7 +1190,7 @@ impl RLN {
|
||||
&self,
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized)?;
|
||||
|
||||
@@ -1221,7 +1233,7 @@ impl RLN {
|
||||
&self,
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized)?;
|
||||
|
||||
@@ -1274,7 +1286,7 @@ impl RLN {
|
||||
mut input_proof_data_1: R,
|
||||
mut input_proof_data_2: R,
|
||||
mut output_data: W,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), RLNError> {
|
||||
// We serialize_compressed the two proofs, and we get the corresponding RLNProofValues objects
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_proof_data_1.read_to_end(&mut serialized)?;
|
||||
@@ -1304,7 +1316,7 @@ impl RLN {
|
||||
if let Ok(identity_secret_hash) = recovered_identity_secret_hash {
|
||||
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
|
||||
} else {
|
||||
return Err(Report::msg("could not extract secret"));
|
||||
return Err(RLNError::RecoverSecret);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1318,13 +1330,16 @@ impl RLN {
|
||||
///
|
||||
/// The function returns the corresponding [`RLNWitnessInput`] object serialized using [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness).
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn get_serialized_rln_witness<R: Read>(&mut self, mut input_data: R) -> Result<Vec<u8>> {
|
||||
pub fn get_serialized_rln_witness<R: Read>(
|
||||
&mut self,
|
||||
mut input_data: R,
|
||||
) -> Result<Vec<u8>, RLNError> {
|
||||
// We read input RLN witness and we serialize_compressed it
|
||||
let mut witness_byte: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut witness_byte)?;
|
||||
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut self.tree, &witness_byte)?;
|
||||
|
||||
serialize_witness(&rln_witness)
|
||||
serialize_witness(&rln_witness).map_err(RLNError::Protocol)
|
||||
}
|
||||
|
||||
/// Converts a byte serialization of a [`RLNWitnessInput`] object to the corresponding JSON serialization.
|
||||
@@ -1333,7 +1348,10 @@ impl RLN {
|
||||
/// - `serialized_witness`: the byte serialization of a [`RLNWitnessInput`] object (serialization done with [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
|
||||
///
|
||||
/// The function returns the corresponding JSON encoding of the input [`RLNWitnessInput`] object.
|
||||
pub fn get_rln_witness_json(&mut self, serialized_witness: &[u8]) -> Result<serde_json::Value> {
|
||||
pub fn get_rln_witness_json(
|
||||
&mut self,
|
||||
serialized_witness: &[u8],
|
||||
) -> Result<serde_json::Value, ProtocolError> {
|
||||
let (rln_witness, _) = deserialize_witness(serialized_witness)?;
|
||||
rln_witness_to_json(&rln_witness)
|
||||
}
|
||||
@@ -1348,7 +1366,7 @@ impl RLN {
|
||||
pub fn get_rln_witness_bigint_json(
|
||||
&mut self,
|
||||
serialized_witness: &[u8],
|
||||
) -> Result<serde_json::Value> {
|
||||
) -> Result<serde_json::Value, ProtocolError> {
|
||||
let (rln_witness, _) = deserialize_witness(serialized_witness)?;
|
||||
rln_witness_to_bigint_json(&rln_witness)
|
||||
}
|
||||
@@ -1358,7 +1376,7 @@ impl RLN {
|
||||
/// If not called, the connection will be closed when the RLN object is dropped.
|
||||
/// This improves robustness of the tree.
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn flush(&mut self) -> Result<()> {
|
||||
pub fn flush(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree.close_db_connection()
|
||||
}
|
||||
}
|
||||
@@ -1399,7 +1417,10 @@ impl Default for RLN {
|
||||
/// // We serialize_compressed the keygen output
|
||||
/// let field_element = deserialize_field_element(output_buffer.into_inner());
|
||||
/// ```
|
||||
pub fn hash<R: Read, W: Write>(mut input_data: R, mut output_data: W) -> Result<()> {
|
||||
pub fn hash<R: Read, W: Write>(
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized)?;
|
||||
|
||||
@@ -1432,7 +1453,10 @@ pub fn hash<R: Read, W: Write>(mut input_data: R, mut output_data: W) -> Result<
|
||||
/// // We serialize_compressed the hash output
|
||||
/// let hash_result = deserialize_field_element(output_buffer.into_inner());
|
||||
/// ```
|
||||
pub fn poseidon_hash<R: Read, W: Write>(mut input_data: R, mut output_data: W) -> Result<()> {
|
||||
pub fn poseidon_hash<R: Read, W: Write>(
|
||||
mut input_data: R,
|
||||
mut output_data: W,
|
||||
) -> Result<(), RLNError> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
input_data.read_to_end(&mut serialized)?;
|
||||
|
||||
|
||||
@@ -231,7 +231,7 @@ mod tree_test {
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -289,7 +289,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -303,11 +303,11 @@ mod tree_test {
|
||||
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
|
||||
|
||||
// We add leaves in a batch starting from index 0..set_index
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We add the remaining n leaves in a batch starting from index m
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]));
|
||||
rln.set_leaves_from(set_index, &mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -359,7 +359,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -377,8 +377,8 @@ mod tree_test {
|
||||
let last_leaf_index = no_of_leaves - 1;
|
||||
let indices = vec![last_leaf_index as u8];
|
||||
let last_leaf = vec![*last_leaf];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf).unwrap());
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf));
|
||||
|
||||
rln.atomic_operation(last_leaf_index, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
@@ -408,7 +408,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -422,8 +422,8 @@ mod tree_test {
|
||||
let zero_index = 0;
|
||||
let indices = vec![zero_index as u8];
|
||||
let zero_leaf: Vec<Fr> = vec![];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf));
|
||||
rln.atomic_operation(0, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
@@ -452,7 +452,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -466,8 +466,8 @@ mod tree_test {
|
||||
let set_index = rng.gen_range(0..no_of_leaves) as usize;
|
||||
let indices = vec![set_index as u8];
|
||||
let zero_leaf: Vec<Fr> = vec![];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf));
|
||||
rln.atomic_operation(0, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
@@ -509,7 +509,7 @@ mod tree_test {
|
||||
let (root_empty, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
|
||||
#[allow(unused_must_use)]
|
||||
rln.set_leaves_from(bad_index, &mut buffer)
|
||||
@@ -599,7 +599,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
@@ -671,7 +671,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
@@ -754,7 +754,7 @@ mod tree_test {
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
// This crate provides cross-module useful utilities (mainly type conversions) not necessarily specific to RLN
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::error::ConversionError;
|
||||
use ark_ff::PrimeField;
|
||||
use color_eyre::{Report, Result};
|
||||
use num_bigint::{BigInt, BigUint};
|
||||
use num_traits::Num;
|
||||
use serde_json::json;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::circuit::Fr;
|
||||
|
||||
#[inline(always)]
|
||||
pub fn to_bigint(el: &Fr) -> Result<BigInt> {
|
||||
Ok(BigUint::from(*el).into())
|
||||
pub fn to_bigint(el: &Fr) -> BigInt {
|
||||
BigUint::from(*el).into()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -21,9 +20,9 @@ pub fn fr_byte_size() -> usize {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr> {
|
||||
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr, ConversionError> {
|
||||
if !(radix == 10 || radix == 16) {
|
||||
return Err(Report::msg("wrong radix"));
|
||||
return Err(ConversionError::WrongRadix);
|
||||
}
|
||||
|
||||
// We remove any quote present and we trim
|
||||
@@ -58,7 +57,7 @@ pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Result<Vec<u8>> {
|
||||
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
|
||||
// Calculate capacity for Vec:
|
||||
// - 8 bytes for normalized vector length (usize)
|
||||
// - each Fr element requires fr_byte_size() bytes (typically 32 bytes)
|
||||
@@ -72,11 +71,11 @@ pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Result<Vec<u8>> {
|
||||
bytes.extend_from_slice(&fr_to_bytes_le(el));
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Result<Vec<u8>> {
|
||||
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
|
||||
// Calculate capacity for Vec:
|
||||
// - 8 bytes for normalized vector length (usize)
|
||||
// - variable length input data
|
||||
@@ -88,11 +87,11 @@ pub fn vec_u8_to_bytes_le(input: &[u8]) -> Result<Vec<u8>> {
|
||||
// We store the input
|
||||
bytes.extend_from_slice(input);
|
||||
|
||||
Ok(bytes)
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
|
||||
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
|
||||
let mut read: usize = 0;
|
||||
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
@@ -105,7 +104,7 @@ pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
|
||||
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
|
||||
let mut read: usize = 0;
|
||||
let mut res: Vec<Fr> = Vec::new();
|
||||
|
||||
@@ -123,7 +122,7 @@ pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>> {
|
||||
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
|
||||
let nof_elem = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
if nof_elem == 0 {
|
||||
Ok(vec![])
|
||||
|
||||
@@ -27,7 +27,7 @@ mod test {
|
||||
}
|
||||
|
||||
fn set_leaves_init(rln_pointer: &mut RLN, leaves: &[Fr]) {
|
||||
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
|
||||
let leaves_ser = vec_fr_to_bytes_le(&leaves);
|
||||
let input_buffer = &Buffer::from(leaves_ser.as_ref());
|
||||
let success = init_tree_with_leaves(rln_pointer, input_buffer);
|
||||
assert!(success, "init tree with leaves call failed");
|
||||
@@ -170,7 +170,7 @@ mod test {
|
||||
set_leaves_init(rln_pointer, &leaves[0..set_index]);
|
||||
|
||||
// We add the remaining n leaves in a batch starting from index set_index
|
||||
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]).unwrap();
|
||||
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]);
|
||||
let buffer = &Buffer::from(leaves_n.as_ref());
|
||||
let success = set_leaves_from(rln_pointer, set_index, buffer);
|
||||
assert!(success, "set leaves from call failed");
|
||||
@@ -220,9 +220,9 @@ mod test {
|
||||
let last_leaf_index = NO_OF_LEAVES - 1;
|
||||
let indices = vec![last_leaf_index as u8];
|
||||
let last_leaf = vec![*last_leaf];
|
||||
let indices = vec_u8_to_bytes_le(&indices).unwrap();
|
||||
let indices = vec_u8_to_bytes_le(&indices);
|
||||
let indices_buffer = &Buffer::from(indices.as_ref());
|
||||
let leaves = vec_fr_to_bytes_le(&last_leaf).unwrap();
|
||||
let leaves = vec_fr_to_bytes_le(&last_leaf);
|
||||
let leaves_buffer = &Buffer::from(leaves.as_ref());
|
||||
|
||||
let success = atomic_operation(
|
||||
@@ -253,7 +253,7 @@ mod test {
|
||||
let root_empty = get_tree_root(rln_pointer);
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let leaves = vec_fr_to_bytes_le(&leaves).unwrap();
|
||||
let leaves = vec_fr_to_bytes_le(&leaves);
|
||||
let buffer = &Buffer::from(leaves.as_ref());
|
||||
let success = set_leaves_from(rln_pointer, bad_index, buffer);
|
||||
assert!(!success, "set leaves from call succeeded");
|
||||
@@ -892,7 +892,7 @@ mod test {
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs);
|
||||
let input_buffer = &Buffer::from(inputs_ser.as_ref());
|
||||
|
||||
let expected_hash = utils_poseidon_hash(inputs.as_ref());
|
||||
@@ -1433,7 +1433,7 @@ mod stateless_test {
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs);
|
||||
let input_buffer = &Buffer::from(inputs_ser.as_ref());
|
||||
|
||||
let expected_hash = utils_poseidon_hash(inputs.as_ref());
|
||||
|
||||
@@ -243,7 +243,7 @@ mod test {
|
||||
}
|
||||
let expected_hash = utils_poseidon_hash(&inputs);
|
||||
|
||||
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs).unwrap());
|
||||
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs));
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_poseidon_hash(&mut input_buffer, &mut output_buffer).unwrap();
|
||||
|
||||
@@ -16,13 +16,13 @@ ark-ff = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
num-bigint = { version = "0.4.6", default-features = false }
|
||||
color-eyre = "0.6.4"
|
||||
pmtree = { package = "vacp2p_pmtree", version = "2.0.2", optional = true }
|
||||
sled = "0.34.7"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
lazy_static = "1.5.0"
|
||||
hex = "0.4.3"
|
||||
rayon = "1.7.0"
|
||||
thiserror = "2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
ark-bn254 = { version = "0.5.0", features = ["std"] }
|
||||
|
||||
31
utils/src/merkle_tree/error.rs
Normal file
31
utils/src/merkle_tree/error.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ZerokitMerkleTreeError {
|
||||
#[error("Invalid index")]
|
||||
InvalidIndex,
|
||||
// InvalidProof,
|
||||
#[error("Leaf index out of bounds")]
|
||||
InvalidLeaf,
|
||||
#[error("Level exceeds tree depth")]
|
||||
InvalidLevel,
|
||||
#[error("Subtree index out of bounds")]
|
||||
InvalidSubTreeIndex,
|
||||
#[error("Start level is != from end level")]
|
||||
InvalidStartAndEndLevel,
|
||||
#[error("set_range got too many leaves")]
|
||||
TooManySet,
|
||||
#[error("Unknown error while computing merkle proof")]
|
||||
ComputingProofError,
|
||||
#[error("Invalid witness length (!= tree depth)")]
|
||||
InvalidWitness,
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
#[error("Pmtree error: {0}")]
|
||||
PmtreeErrorKind(#[from] pmtree::PmtreeErrorKind),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum FromConfigError {
|
||||
#[error("Error while reading pmtree config: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("Error while creating pmtree config: path already exists")]
|
||||
PathExists,
|
||||
}
|
||||
@@ -5,11 +5,12 @@ use std::{
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use color_eyre::{Report, Result};
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES};
|
||||
|
||||
use crate::merkle_tree::{
|
||||
error::{FromConfigError, ZerokitMerkleTreeError},
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
};
|
||||
////////////////////////////////////////////////////////////
|
||||
///// Full Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
@@ -56,9 +57,9 @@ pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
|
||||
pub struct FullMerkleConfig(());
|
||||
|
||||
impl FromStr for FullMerkleConfig {
|
||||
type Err = Report;
|
||||
type Err = FromConfigError;
|
||||
|
||||
fn from_str(_s: &str) -> Result<Self> {
|
||||
fn from_str(_s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(FullMerkleConfig::default())
|
||||
}
|
||||
}
|
||||
@@ -72,13 +73,17 @@ where
|
||||
type Hasher = H;
|
||||
type Config = FullMerkleConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
FullMerkleTree::<H>::new(depth, Self::Hasher::default_leaf(), Self::Config::default())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(depth: usize, default_leaf: FrOf<Self::Hasher>, _config: Self::Config) -> Result<Self> {
|
||||
fn new(
|
||||
depth: usize,
|
||||
default_leaf: FrOf<Self::Hasher>,
|
||||
_config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
// Compute cache node values, leaf to root
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
@@ -105,7 +110,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -130,27 +135,27 @@ where
|
||||
}
|
||||
|
||||
/// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<()> {
|
||||
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.set_range(leaf, once(hash))?;
|
||||
self.next_index = max(self.next_index, leaf + 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a leaf from the specified tree index
|
||||
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(Report::msg("leaf index out of bounds"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
Ok(self.nodes[self.capacity() + leaf - 1])
|
||||
}
|
||||
|
||||
/// Returns the root of the subtree at level n and index
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidIndex);
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
@@ -188,14 +193,14 @@ where
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
let index = self.capacity() + start - 1;
|
||||
let mut count = 0;
|
||||
// first count number of leaves, and check that they fit in the tree
|
||||
// then insert into the tree
|
||||
let leaves = leaves.into_iter().collect::<Vec<_>>();
|
||||
if leaves.len() + start > self.capacity() {
|
||||
return Err(Report::msg("provided leaves do not fit in the tree"));
|
||||
return Err(ZerokitMerkleTreeError::TooManySet);
|
||||
}
|
||||
leaves.into_iter().for_each(|hash| {
|
||||
self.nodes[index + count] = hash;
|
||||
@@ -210,7 +215,12 @@ where
|
||||
}
|
||||
|
||||
/// Overrides a range of leaves while resetting specified indices to default and preserving unaffected values.
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
|
||||
fn override_range<I, J>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: ExactSizeIterator<Item = usize>,
|
||||
@@ -239,17 +249,16 @@ where
|
||||
}
|
||||
|
||||
self.set_range(start, set_values.into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
/// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
@@ -259,9 +268,9 @@ where
|
||||
}
|
||||
|
||||
// Computes a merkle proof the leaf at the specified index
|
||||
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>> {
|
||||
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>, ZerokitMerkleTreeError> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
let mut index = self.capacity() + leaf - 1;
|
||||
let mut path = Vec::with_capacity(self.depth + 1);
|
||||
@@ -278,16 +287,20 @@ where
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, hash: &FrOf<Self::Hasher>, proof: &FullMerkleProof<H>) -> Result<bool> {
|
||||
fn verify(
|
||||
&self,
|
||||
hash: &FrOf<Self::Hasher>,
|
||||
proof: &FullMerkleProof<H>,
|
||||
) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
Ok(proof.compute_root_from(hash) == self.root())
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
|
||||
Ok(self.metadata.to_vec())
|
||||
}
|
||||
}
|
||||
@@ -323,12 +336,14 @@ where
|
||||
///
|
||||
/// - `start_index`: The first index at the current level that was updated.
|
||||
/// - `end_index`: The last index (inclusive) at the same level that was updated.
|
||||
fn update_hashes(&mut self, start_index: usize, end_index: usize) -> Result<()> {
|
||||
fn update_hashes(
|
||||
&mut self,
|
||||
start_index: usize,
|
||||
end_index: usize,
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// Ensure the range is within the same tree level
|
||||
if self.levels(start_index) != self.levels(end_index) {
|
||||
return Err(Report::msg(
|
||||
"start_index and end_index must be on the same level",
|
||||
));
|
||||
return Err(ZerokitMerkleTreeError::InvalidStartAndEndLevel);
|
||||
}
|
||||
|
||||
// Compute parent indices for the range
|
||||
|
||||
@@ -13,13 +13,12 @@
|
||||
//! * Disk based storage backend (using mmaped files should be easy)
|
||||
//! * Implement serialization for tree and Merkle proof
|
||||
|
||||
use crate::merkle_tree::error::ZerokitMerkleTreeError;
|
||||
use std::{
|
||||
fmt::{Debug, Display},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use color_eyre::Result;
|
||||
|
||||
/// Enables parallel hashing when there are at least 8 nodes (4 pairs to hash), justifying the overhead.
|
||||
pub const MIN_PARALLEL_NODES: usize = 8;
|
||||
|
||||
@@ -45,34 +44,52 @@ pub trait ZerokitMerkleTree {
|
||||
type Hasher: Hasher;
|
||||
type Config: Default + FromStr;
|
||||
|
||||
fn default(depth: usize) -> Result<Self>
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError>
|
||||
where
|
||||
Self: Sized;
|
||||
fn new(depth: usize, default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self>
|
||||
fn new(
|
||||
depth: usize,
|
||||
default_leaf: FrOf<Self::Hasher>,
|
||||
config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError>
|
||||
where
|
||||
Self: Sized;
|
||||
fn depth(&self) -> usize;
|
||||
fn capacity(&self) -> usize;
|
||||
fn leaves_set(&self) -> usize;
|
||||
fn root(&self) -> FrOf<Self::Hasher>;
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>>;
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()>;
|
||||
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<()>
|
||||
fn get_subtree_root(
|
||||
&self,
|
||||
n: usize,
|
||||
index: usize,
|
||||
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>)
|
||||
-> Result<(), ZerokitMerkleTreeError>;
|
||||
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>;
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>>;
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize>;
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, to_remove_indices: J) -> Result<()>
|
||||
fn override_range<I, J>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
to_remove_indices: J,
|
||||
) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: ExactSizeIterator<Item = usize>;
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()>;
|
||||
fn delete(&mut self, index: usize) -> Result<()>;
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof>;
|
||||
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool>;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()>;
|
||||
fn metadata(&self) -> Result<Vec<u8>>;
|
||||
fn close_db_connection(&mut self) -> Result<()>;
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError>;
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError>;
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError>;
|
||||
fn verify(
|
||||
&self,
|
||||
leaf: &FrOf<Self::Hasher>,
|
||||
witness: &Self::Proof,
|
||||
) -> Result<bool, ZerokitMerkleTreeError>;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError>;
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError>;
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError>;
|
||||
}
|
||||
|
||||
pub trait ZerokitMerkleProof {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
pub mod error;
|
||||
pub mod full_merkle_tree;
|
||||
#[allow(clippy::module_inception)]
|
||||
pub mod merkle_tree;
|
||||
pub mod optimal_merkle_tree;
|
||||
|
||||
pub use self::full_merkle_tree::*;
|
||||
pub use self::merkle_tree::*;
|
||||
pub use self::optimal_merkle_tree::*;
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::{cmp::max, collections::HashMap, fmt::Debug, str::FromStr};
|
||||
|
||||
use color_eyre::{Report, Result};
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES};
|
||||
|
||||
use crate::merkle_tree::{
|
||||
error::{FromConfigError, ZerokitMerkleTreeError},
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
};
|
||||
////////////////////////////////////////////////////////////
|
||||
///// Optimal Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
@@ -48,9 +49,9 @@ pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
|
||||
pub struct OptimalMerkleConfig(());
|
||||
|
||||
impl FromStr for OptimalMerkleConfig {
|
||||
type Err = Report;
|
||||
type Err = FromConfigError;
|
||||
|
||||
fn from_str(_s: &str) -> Result<Self> {
|
||||
fn from_str(_s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(OptimalMerkleConfig::default())
|
||||
}
|
||||
}
|
||||
@@ -64,13 +65,17 @@ where
|
||||
type Hasher = H;
|
||||
type Config = OptimalMerkleConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
OptimalMerkleTree::<H>::new(depth, H::default_leaf(), Self::Config::default())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(depth: usize, default_leaf: H::Fr, _config: Self::Config) -> Result<Self> {
|
||||
fn new(
|
||||
depth: usize,
|
||||
default_leaf: H::Fr,
|
||||
_config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
// Compute cache node values, leaf to root
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
@@ -89,7 +94,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -114,9 +119,9 @@ where
|
||||
}
|
||||
|
||||
/// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<()> {
|
||||
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<(), ZerokitMerkleTreeError> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
self.nodes.insert((self.depth, index), leaf);
|
||||
self.update_hashes(index, 1)?;
|
||||
@@ -126,20 +131,20 @@ where
|
||||
}
|
||||
|
||||
/// Get a leaf from the specified tree index
|
||||
fn get(&self, index: usize) -> Result<H::Fr> {
|
||||
fn get(&self, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
Ok(self.get_node(self.depth, index))
|
||||
}
|
||||
|
||||
/// Returns the root of the subtree at level n and index
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLevel);
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
@@ -166,11 +171,11 @@ where
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// check if the range is valid
|
||||
let leaves_len = leaves.len();
|
||||
if start + leaves_len > self.capacity() {
|
||||
return Err(Report::msg("provided range exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::TooManySet);
|
||||
}
|
||||
for (i, leaf) in leaves.enumerate() {
|
||||
self.nodes.insert((self.depth, start + i), leaf);
|
||||
@@ -182,7 +187,12 @@ where
|
||||
}
|
||||
|
||||
/// Overrides a range of leaves while resetting specified indices to default and preserving unaffected values.
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
|
||||
fn override_range<I, J>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: ExactSizeIterator<Item = usize>,
|
||||
@@ -211,17 +221,16 @@ where
|
||||
}
|
||||
|
||||
self.set_range(start, set_values.into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
/// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: H::Fr) -> Result<()> {
|
||||
fn update_next(&mut self, leaf: H::Fr) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
@@ -231,9 +240,9 @@ where
|
||||
}
|
||||
|
||||
/// Computes a merkle proof the leaf at the specified index
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof> {
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
|
||||
let mut i = index;
|
||||
@@ -251,27 +260,27 @@ where
|
||||
}
|
||||
}
|
||||
if i != 0 {
|
||||
Err(Report::msg("i != 0"))
|
||||
Err(ZerokitMerkleTreeError::ComputingProofError)
|
||||
} else {
|
||||
Ok(OptimalMerkleProof(witness))
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool> {
|
||||
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
if witness.length() != self.depth {
|
||||
return Err(Report::msg("witness length doesn't match tree depth"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidWitness);
|
||||
}
|
||||
let expected_root = witness.compute_root_from(leaf);
|
||||
Ok(expected_root.eq(&self.root()))
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
|
||||
Ok(self.metadata.to_vec())
|
||||
}
|
||||
}
|
||||
@@ -301,7 +310,7 @@ where
|
||||
///
|
||||
/// - `start`: Starting leaf index that was updated.
|
||||
/// - `length`: Number of consecutive leaves that were updated.
|
||||
fn update_hashes(&mut self, start: usize, length: usize) -> Result<()> {
|
||||
fn update_hashes(&mut self, start: usize, length: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// Start at the leaf level
|
||||
let mut current_depth = self.depth;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
pub mod sled_adapter;
|
||||
pub use self::sled_adapter::*;
|
||||
pub use self::sled_adapter::SledDB;
|
||||
pub use pmtree;
|
||||
pub use sled::*;
|
||||
pub use sled::{Config, Mode};
|
||||
|
||||
@@ -212,7 +212,7 @@ pub mod test {
|
||||
tree_full
|
||||
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![]);
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), Vec::<usize>::new());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_full
|
||||
@@ -251,7 +251,7 @@ pub mod test {
|
||||
tree_opt
|
||||
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![]);
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), Vec::<usize>::new());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_opt
|
||||
|
||||
Reference in New Issue
Block a user