Compare commits

...

6 Commits

Author SHA1 Message Date
Sydhds
4077357e3f Add merkle tree glossary + mermaid graph example (#298) 2025-04-18 15:11:55 +02:00
Sydhds
84d9799d09 Add poseidon hash benchmark + optim (#300) 2025-04-18 15:08:43 +02:00
Sydhds
c576af8e62 fix(tree): fix OptimalMerkleTree set_range & override_range performance issue (#295) 2025-04-16 17:40:58 +02:00
Sydhds
81470b9678 Add poseidon hash unit test (against ref values) (#299) 2025-04-16 16:23:58 +02:00
Vinh Trịnh
9d4198c205 feat(rln-wasm): bring back wasm support for zerokit
# Bring Back WebAssembly Support for ZeroKit

- Update minor versions of all dependencies.
- Update documentation to reflect these changes.
- ~~Vendor `wasmer` v4.4.0 in [my git
repository](https://github.com/vinhtc27/wasmer) for `ark-circom`
v0.5.0.~~
- Resolve `wasm-pack` build failures (`os error 2`) caused by a Node.js
version mismatch.
- Restore the previous CI pipeline for the `rln-wasm` feature and update
to the stable toolchain.
- ~~Use `ark-circom` with the `wasm` feature for WebAssembly
compatibility and the `rln.wasm` file for witness calculation.~~
- ~~Fix dependency issues related to `ark-circom` v0.5.0, which
currently uses `wasmer` v4.4.0 and is affected by this
[issue](https://github.com/rust-lang/rust/issues/91632#issuecomment-1477914703).~~
- Install WABT with `brew` and `apt-get` instead of cloning to fix
`wasm-strip not found` issue in the CI workflow.
- Install `wasm-pack` with `curl` instead of using `wasm-pack-action` to
fix parse exception error in the CI workflow.
- Use the `.wasm` file with JS bindings for witness calculation, which
is generated from [`iden3/circom`](https://github.com/iden3/circom)
during circuit compilation. This allows witness computation outside RLN
instance.
- Refactor the `rln` module by moving circuit-related files to the
`src/circuit` folder for better organization.
- Remove `ark-circom` and `wasmer` by cloning the
[CircomReduction](3c95ed98e2/src/circom/qap.rs (L12))
struct and the
[read_zkey](3c95ed98e2/src/zkey.rs (L53))
function into the `rln` module, which reduces the repository's build
size and speeds up compilation time and the CI workflow duration.
- These change also address
[#282](https://github.com/vacp2p/zerokit/issues/282) by removing
`wasmer` and `wasmer-wasix`, which lack x32 system support.
- Benchmark `rln-wasm` with `wasm_bindgen_test`, covering RLN instance
creation, key generation, witness calculation, proving, and
verification. Also, add them to `v0.6.1` in
[benchmark-v0.6.1](https://github.com/vacp2p/zerokit/tree/benchmark-v0.6.1)
for comparison.
- Add `arkzkey` feature for rln-wasm, including tests, benchmarks, CI
workflow updates, and related documentation.
- Benchmark rln-wasm in the browser using HTML, covering initialization,
RLN instance creation, proving, and verification; fork to the
`benchmark-v0.7.0` branch for later use
[here](https://github.com/vacp2p/zerokit/tree/benchmark-v0.7.0).
- Fix clippy error: "this `repeat().take()` can be written more
concisely" on CI workflow for `utils` module.
([error](https://github.com/vacp2p/zerokit/actions/runs/14258579070/job/39965568013))
- Update Makefile.toml to be able to run `make build`, `make test`, and
`make bench` from root and inside each modules.
2025-04-08 13:37:18 +07:00
markoburcul
c60e0c33fc nix: add flake and derivation for android-arm64 arch
Referenced issue: https://github.com/waku-org/nwaku/issues/3232
2025-04-04 10:50:26 +02:00
55 changed files with 2512 additions and 3918 deletions

View File

@@ -5,6 +5,7 @@ on:
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
- "!rln-wasm/**"
- "!rln/src/**"
- "!rln/resources/**"
- "!utils/src/**"
@@ -12,6 +13,7 @@ on:
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
- "!rln-wasm/**"
- "!rln/src/**"
- "!rln/resources/**"
- "!utils/src/**"
@@ -76,12 +78,49 @@ jobs:
fi
working-directory: ${{ matrix.crate }}
rln-wasm:
strategy:
matrix:
platform: [ ubuntu-latest, macos-latest ]
feature: [ "default", "arkzkey" ]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: test - rln-wasm - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- uses: actions/checkout@v3
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
run: make installdeps
- name: cargo-make build
run: |
if [ ${{ matrix.feature }} == default ]; then
cargo make build
else
cargo make build_${{ matrix.feature }}
fi
working-directory: rln-wasm
- name: cargo-make test
run: |
if [ ${{ matrix.feature }} == default ]; then
cargo make test --release
else
cargo make test_${{ matrix.feature }} --release
fi
working-directory: rln-wasm
lint:
strategy:
matrix:
# we run lint tests only on ubuntu
platform: [ ubuntu-latest ]
crate: [ rln, utils ]
crate: [ rln, rln-wasm, utils ]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
@@ -106,7 +145,7 @@ jobs:
- name: cargo clippy
if: success() || failure()
run: |
cargo clippy --release -- -D warnings
cargo clippy --release
working-directory: ${{ matrix.crate }}
benchmark-utils:

View File

@@ -84,9 +84,39 @@ jobs:
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
retention-days: 2
browser-rln-wasm:
name: Browser build (RLN WASM)
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cross make build
run: |
cross make build
mkdir release
cp pkg/** release/
tar -czvf browser-rln-wasm.tar.gz release/
working-directory: rln-wasm
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
name: browser-rln-wasm-archive
path: rln-wasm/browser-rln-wasm.tar.gz
retention-days: 2
prepare-prerelease:
name: Prepare pre-release
needs: [ linux, macos ]
needs: [ linux, macos, browser-rln-wasm ]
runs-on: ubuntu-latest
steps:
- name: Checkout code

4
.gitignore vendored
View File

@@ -9,7 +9,9 @@ rln-cli/database
# will have compiled files and executables
debug/
target/
wabt/
# Generated by Nix
result/
# These are backup files generated by rustfmt
**/*.rs.bk

View File

@@ -1,3 +1,5 @@
# CHANGE LOG
## 2023-02-28 v0.2
This release contains:
@@ -10,7 +12,6 @@ This release contains:
- Dual License under Apache 2.0 and MIT
- RLN compiles as a static library, which can be consumed through a C FFI
## 2022-09-19 v0.1
Initial beta release.

3820
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[workspace]
members = ["rln", "rln-cli", "utils"]
default-members = ["rln", "rln-cli", "utils"]
members = ["rln", "rln-cli", "rln-wasm", "utils"]
default-members = ["rln", "rln-cli", "rln-wasm", "utils"]
resolver = "2"
# Compilation profile for any non-workspace member.

View File

@@ -1,4 +1,4 @@
.PHONY: all installdeps build test clean
.PHONY: all installdeps build test bench clean
all: .pre-build build
@@ -13,20 +13,21 @@ endif
installdeps: .pre-build
ifeq ($(shell uname),Darwin)
# commented due to https://github.com/orgs/Homebrew/discussions/4612
# @brew update
@brew update
@brew install cmake ninja
@git -C "wabt" pull || git clone --recursive https://github.com/WebAssembly/wabt.git "wabt"
@cd wabt && mkdir -p build && make
else ifeq ($(shell uname),Linux)
@sudo apt-get update
@sudo apt-get install -y cmake ninja-build
@git -C "wabt" pull || git clone --recursive https://github.com/WebAssembly/wabt.git "wabt"
@cd wabt && mkdir -p build && cd build && cmake .. -GNinja && ninja && sudo ninja install
endif
# nvm already checks if it's installed, and no-ops if it is
@curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
@. ${HOME}/.nvm/nvm.sh && nvm install 18.20.2 && nvm use 18.20.2;
@if [ ! -d "$$HOME/.nvm" ]; then \
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.2/install.sh | bash; \
fi
@bash -c 'export NVM_DIR="$$HOME/.nvm" && \
[ -s "$$NVM_DIR/nvm.sh" ] && \. "$$NVM_DIR/nvm.sh" && \
nvm install 22.14.0 && \
nvm use 22.14.0'
@curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
@echo "\033[1;32m>>> Now run this command to activate Node.js 22.14.0: \033[1;33msource $$HOME/.nvm/nvm.sh && nvm use 22.14.0\033[0m"
build: .pre-build
@cargo make build
@@ -34,5 +35,8 @@ build: .pre-build
test: .pre-build
@cargo make test
bench: .pre-build
@cargo make bench
clean:
@cargo clean
@cargo clean

27
flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1740603184,
"narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

38
flake.nix Normal file
View File

@@ -0,0 +1,38 @@
{
description = "A flake for building zerokit";
inputs = {
# Version 24.11
nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
};
outputs = { self, nixpkgs }:
let
stableSystems = [
"x86_64-linux" "aarch64-linux"
"x86_64-darwin" "aarch64-darwin"
"x86_64-windows" "i686-linux"
"i686-windows"
];
forAllSystems = nixpkgs.lib.genAttrs stableSystems;
pkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
in rec
{
packages = forAllSystems (system: let
pkgs = pkgsFor.${system};
in rec {
zerokit-android-arm64 = pkgs.callPackage ./nix/default.nix { target-platform="aarch64-android-prebuilt"; rust-target= "aarch64-linux-android"; };
default = zerokit-android-arm64;
});
devShells = forAllSystems (system: let
pkgs = pkgsFor.${system};
in {
default = pkgs.mkShell {
inputsFrom = [
packages.${system}.default
];
};
});
};
}

35
nix/default.nix Normal file
View File

@@ -0,0 +1,35 @@
{
pkgs,
target-platform ? "aarch64-android-prebuilt",
rust-target ? "aarch64-linux-android",
}:
pkgs.pkgsCross.${target-platform}.rustPlatform.buildRustPackage {
pname = "zerokit";
version = "nightly";
src = ../.;
cargoLock = {
lockFile = ../Cargo.lock;
allowBuiltinFetchGit = true;
};
CARGO_HOME = "/tmp";
buildPhase = ''
pushd rln
cargo rustc --crate-type=cdylib --release --lib --target=${rust-target}
popd
'';
installPhase = ''
mkdir -p $out/
cp ./target/${rust-target}/release/librln.so $out/
'';
meta = with pkgs.lib; {
description = "Zerokit";
license = licenses.mit;
};
}

View File

@@ -13,14 +13,15 @@ path = "src/examples/stateless.rs"
required-features = ["stateless"]
[dependencies]
rln = { path = "../rln", default-features = true, features = ["pmtree-ft"] }
rln = { path = "../rln", default-features = false }
zerokit_utils = { path = "../utils" }
clap = { version = "4.5.29", features = ["cargo", "derive", "env"] }
clap_derive = { version = "4.5.28" }
color-eyre = "0.6.2"
serde_json = "1.0.138"
serde = { version = "1.0.217", features = ["derive"] }
clap = { version = "4.5.35", features = ["cargo", "derive", "env"] }
clap_derive = { version = "4.5.32" }
color-eyre = "0.6.3"
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
[features]
default = []
arkzkey = ["rln/arkzkey"]
stateless = ["rln/stateless"]

View File

@@ -1,8 +1,9 @@
[tasks.build]
command = "cargo"
args = ["build", "--release"]
args = ["build"]
[tasks.test]
command = "cargo"
args = ["test", "--release"]
disabled = true
[tasks.bench]
disabled = true

View File

@@ -127,11 +127,12 @@ impl RLNSystem {
};
let merkle_proof = self.tree.proof(user_index)?;
let x = hash_to_field(signal.as_bytes());
let rln_witness = rln_witness_from_values(
identity.identity_secret_hash,
&merkle_proof,
hash_to_field(signal.as_bytes()),
x,
external_nullifier,
Fr::from(MESSAGE_LIMIT),
Fr::from(message_id),
@@ -157,12 +158,12 @@ impl RLNSystem {
let mut input_buffer = Cursor::new(proof_with_signal);
let root = self.tree.root();
let root_serialized = fr_to_bytes_le(&root);
let mut root_buffer = Cursor::new(root_serialized);
let roots_serialized = fr_to_bytes_le(&root);
let mut roots_buffer = Cursor::new(roots_serialized);
match self
.rln
.verify_with_roots(&mut input_buffer, &mut root_buffer)
.verify_with_roots(&mut input_buffer, &mut roots_buffer)
{
Ok(true) => {
let nullifier = &proof_data[256..288];

6
rln-wasm/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
/target
**/*.rs.bk
Cargo.lock
bin/
pkg/
wasm-pack.log

39
rln-wasm/Cargo.toml Normal file
View File

@@ -0,0 +1,39 @@
[package]
name = "rln-wasm"
version = "0.1.0"
edition = "2021"
license = "MIT or Apache2"
[lib]
crate-type = ["cdylib", "rlib"]
required-features = ["stateless"]
[dependencies]
rln = { path = "../rln", default-features = false }
num-bigint = { version = "0.4.6", default-features = false, features = [
"rand",
"serde",
] }
wasm-bindgen = "0.2.100"
serde-wasm-bindgen = "0.6.5"
js-sys = "0.3.77"
serde_json = "1.0"
# The `console_error_panic_xhook` crate provides better debugging of panics by
# logging them with `console.error`. This is great for development, but requires
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
# code size when deploying.
console_error_panic_hook = { version = "0.1.7", optional = true }
zerokit_utils = { path = "../utils" }
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.15", features = ["js"] }
[dev-dependencies]
wasm-bindgen-test = "0.3.50"
wasm-bindgen-futures = "0.4.50"
[features]
default = ["console_error_panic_hook"]
stateless = ["rln/stateless"]
arkzkey = ["rln/arkzkey"]

58
rln-wasm/Makefile.toml Normal file
View File

@@ -0,0 +1,58 @@
[tasks.build]
clear = true
dependencies = ["pack_build", "pack_rename"]
[tasks.build_arkzkey]
clear = true
dependencies = ["pack_build_arkzkey", "pack_rename"]
[tasks.pack_build]
command = "wasm-pack"
args = ["build", "--release", "--target", "web", "--scope", "waku"]
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\"" }
[tasks.pack_build_arkzkey]
command = "wasm-pack"
args = ["build", "--release", "--target", "web", "--scope", "waku"]
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\" --cfg feature=\"arkzkey\"" }
[tasks.pack_rename]
script = "sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak"
[tasks.test]
command = "wasm-pack"
args = [
"test",
"--release",
"--node",
"--target",
"wasm32-unknown-unknown",
"--",
"--nocapture",
]
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\"" }
[tasks.test_arkzkey]
command = "wasm-pack"
args = [
"test",
"--release",
"--node",
"--target",
"wasm32-unknown-unknown",
"--",
"--nocapture",
]
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\" --cfg feature=\"arkzkey\"" }
dependencies = ["build_arkzkey"]
[tasks.bench]
disabled = true
[tasks.login]
command = "wasm-pack"
args = ["login"]
[tasks.publish]
command = "wasm-pack"
args = ["publish", "--access", "public", "--target", "web"]

55
rln-wasm/README.md Normal file
View File

@@ -0,0 +1,55 @@
# RLN for WASM
This library is used in [waku-org/js-rln](https://github.com/waku-org/js-rln/)
> **Note**: This project requires `wasm-pack` for compiling Rust to WebAssembly and `cargo-make` for running the build commands. Make sure both are installed before proceeding.
Install `wasm-pack`:
```bash
curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
```
Install `cargo-make`
```bash
cargo install cargo-make
```
Or install everything needed for `zerokit` at the root of the repository:
```bash
make installdeps
```
## Building the library
First, navigate to the rln-wasm directory:
```bash
cd rln-wasm
```
Compile zerokit for `wasm32-unknown-unknown`:
```bash
cargo make build
```
Or compile with the **arkzkey** feature enabled
```bash
cargo make build_arkzkey
```
## Running tests and benchmarks
```bash
cargo make test
```
Or test with the **arkzkey** feature enabled
```bash
cargo make test_arkzkey
```

View File

@@ -0,0 +1,324 @@
module.exports = async function builder(code, options) {
options = options || {};
let wasmModule;
try {
wasmModule = await WebAssembly.compile(code);
} catch (err) {
console.log(err);
console.log(
"\nTry to run circom --c in order to generate c++ code instead\n"
);
throw new Error(err);
}
let wc;
let errStr = "";
let msgStr = "";
const instance = await WebAssembly.instantiate(wasmModule, {
runtime: {
exceptionHandler: function (code) {
let err;
if (code == 1) {
err = "Signal not found.\n";
} else if (code == 2) {
err = "Too many signals set.\n";
} else if (code == 3) {
err = "Signal already set.\n";
} else if (code == 4) {
err = "Assert Failed.\n";
} else if (code == 5) {
err = "Not enough memory.\n";
} else if (code == 6) {
err = "Input signal array access exceeds the size.\n";
} else {
err = "Unknown error.\n";
}
throw new Error(err + errStr);
},
printErrorMessage: function () {
errStr += getMessage() + "\n";
// console.error(getMessage());
},
writeBufferMessage: function () {
const msg = getMessage();
// Any calls to `log()` will always end with a `\n`, so that's when we print and reset
if (msg === "\n") {
console.log(msgStr);
msgStr = "";
} else {
// If we've buffered other content, put a space in between the items
if (msgStr !== "") {
msgStr += " ";
}
// Then append the message to the message we are creating
msgStr += msg;
}
},
showSharedRWMemory: function () {
printSharedRWMemory();
},
},
});
const sanityCheck = options;
// options &&
// (
// options.sanityCheck ||
// options.logGetSignal ||
// options.logSetSignal ||
// options.logStartComponent ||
// options.logFinishComponent
// );
wc = new WitnessCalculator(instance, sanityCheck);
return wc;
function getMessage() {
var message = "";
var c = instance.exports.getMessageChar();
while (c != 0) {
message += String.fromCharCode(c);
c = instance.exports.getMessageChar();
}
return message;
}
function printSharedRWMemory() {
const shared_rw_memory_size = instance.exports.getFieldNumLen32();
const arr = new Uint32Array(shared_rw_memory_size);
for (let j = 0; j < shared_rw_memory_size; j++) {
arr[shared_rw_memory_size - 1 - j] =
instance.exports.readSharedRWMemory(j);
}
// If we've buffered other content, put a space in between the items
if (msgStr !== "") {
msgStr += " ";
}
// Then append the value to the message we are creating
msgStr += fromArray32(arr).toString();
}
};
class WitnessCalculator {
constructor(instance, sanityCheck) {
this.instance = instance;
this.version = this.instance.exports.getVersion();
this.n32 = this.instance.exports.getFieldNumLen32();
this.instance.exports.getRawPrime();
const arr = new Uint32Array(this.n32);
for (let i = 0; i < this.n32; i++) {
arr[this.n32 - 1 - i] = this.instance.exports.readSharedRWMemory(i);
}
this.prime = fromArray32(arr);
this.witnessSize = this.instance.exports.getWitnessSize();
this.sanityCheck = sanityCheck;
}
circom_version() {
return this.instance.exports.getVersion();
}
async _doCalculateWitness(input, sanityCheck) {
//input is assumed to be a map from signals to arrays of bigints
this.instance.exports.init(this.sanityCheck || sanityCheck ? 1 : 0);
const keys = Object.keys(input);
var input_counter = 0;
keys.forEach((k) => {
const h = fnvHash(k);
const hMSB = parseInt(h.slice(0, 8), 16);
const hLSB = parseInt(h.slice(8, 16), 16);
const fArr = flatArray(input[k]);
let signalSize = this.instance.exports.getInputSignalSize(hMSB, hLSB);
if (signalSize < 0) {
throw new Error(`Signal ${k} not found\n`);
}
if (fArr.length < signalSize) {
throw new Error(`Not enough values for input signal ${k}\n`);
}
if (fArr.length > signalSize) {
throw new Error(`Too many values for input signal ${k}\n`);
}
for (let i = 0; i < fArr.length; i++) {
const arrFr = toArray32(BigInt(fArr[i]) % this.prime, this.n32);
for (let j = 0; j < this.n32; j++) {
this.instance.exports.writeSharedRWMemory(j, arrFr[this.n32 - 1 - j]);
}
try {
this.instance.exports.setInputSignal(hMSB, hLSB, i);
input_counter++;
} catch (err) {
// console.log(`After adding signal ${i} of ${k}`)
throw new Error(err);
}
}
});
if (input_counter < this.instance.exports.getInputSize()) {
throw new Error(
`Not all inputs have been set. Only ${input_counter} out of ${this.instance.exports.getInputSize()}`
);
}
}
async calculateWitness(input, sanityCheck) {
const w = [];
await this._doCalculateWitness(input, sanityCheck);
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const arr = new Uint32Array(this.n32);
for (let j = 0; j < this.n32; j++) {
arr[this.n32 - 1 - j] = this.instance.exports.readSharedRWMemory(j);
}
w.push(fromArray32(arr));
}
return w;
}
async calculateBinWitness(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize * this.n32);
const buff = new Uint8Array(buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const pos = i * this.n32;
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
}
return buff;
}
async calculateWTNSBin(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize * this.n32 + this.n32 + 11);
const buff = new Uint8Array(buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
//"wtns"
buff[0] = "w".charCodeAt(0);
buff[1] = "t".charCodeAt(0);
buff[2] = "n".charCodeAt(0);
buff[3] = "s".charCodeAt(0);
//version 2
buff32[1] = 2;
//number of sections: 2
buff32[2] = 2;
//id section 1
buff32[3] = 1;
const n8 = this.n32 * 4;
//id section 1 length in 64bytes
const idSection1length = 8 + n8;
const idSection1lengthHex = idSection1length.toString(16);
buff32[4] = parseInt(idSection1lengthHex.slice(0, 8), 16);
buff32[5] = parseInt(idSection1lengthHex.slice(8, 16), 16);
//this.n32
buff32[6] = n8;
//prime number
this.instance.exports.getRawPrime();
var pos = 7;
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
// witness size
buff32[pos] = this.witnessSize;
pos++;
//id section 2
buff32[pos] = 2;
pos++;
// section 2 length
const idSection2length = n8 * this.witnessSize;
const idSection2lengthHex = idSection2length.toString(16);
buff32[pos] = parseInt(idSection2lengthHex.slice(0, 8), 16);
buff32[pos + 1] = parseInt(idSection2lengthHex.slice(8, 16), 16);
pos += 2;
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
}
return buff;
}
}
function toArray32(rem, size) {
const res = []; //new Uint32Array(size); //has no unshift
const radix = BigInt(0x100000000);
while (rem) {
res.unshift(Number(rem % radix));
rem = rem / radix;
}
if (size) {
var i = size - res.length;
while (i > 0) {
res.unshift(0);
i--;
}
}
return res;
}
function fromArray32(arr) {
//returns a BigInt
var res = BigInt(0);
const radix = BigInt(0x100000000);
for (let i = 0; i < arr.length; i++) {
res = res * radix + BigInt(arr[i]);
}
return res;
}
function flatArray(a) {
var res = [];
fillArray(res, a);
return res;
function fillArray(res, a) {
if (Array.isArray(a)) {
for (let i = 0; i < a.length; i++) {
fillArray(res, a[i]);
}
} else {
res.push(a);
}
}
}
function fnvHash(str) {
const uint64_max = BigInt(2) ** BigInt(64);
let hash = BigInt("0xCBF29CE484222325");
for (var i = 0; i < str.length; i++) {
hash ^= BigInt(str[i].charCodeAt());
hash *= BigInt(0x100000001b3);
hash %= uint64_max;
}
let shash = hash.toString(16);
let n = 16 - shash.length;
shash = "0".repeat(n).concat(shash);
return shash;
}

294
rln-wasm/src/lib.rs Normal file
View File

@@ -0,0 +1,294 @@
#![cfg(target_arch = "wasm32")]
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
use num_bigint::BigInt;
use rln::public::{hash, poseidon_hash, RLN};
use std::vec::Vec;
use wasm_bindgen::prelude::*;
#[wasm_bindgen(js_name = initPanicHook)]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
#[wasm_bindgen(js_name = RLN)]
pub struct RLNWrapper {
// The purpose of this wrapper is to hold a RLN instance with the 'static lifetime
// because wasm_bindgen does not allow returning elements with lifetimes
instance: RLN,
}
// Macro to call methods with arbitrary amount of arguments,
// which have the last argument is output buffer pointer
// First argument to the macro is context,
// second is the actual method on `RLN`
// third is the aforementioned output buffer argument
// rest are all other arguments to the method
macro_rules! call_with_output_and_error_msg {
// this variant is needed for the case when
// there are zero other arguments
($instance:expr, $method:ident, $error_msg:expr) => {
{
let mut output_data: Vec<u8> = Vec::new();
let new_instance = $instance.process();
if let Err(err) = new_instance.instance.$method(&mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
($instance:expr, $method:ident, $error_msg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
let new_instance = $instance.process();
if let Err(err) = new_instance.instance.$method($($arg.process()),*, &mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
}
macro_rules! call {
($instance:expr, $method:ident $(, $arg:expr)*) => {
{
let new_instance: &mut RLNWrapper = $instance.process();
new_instance.instance.$method($($arg.process()),*)
}
}
}
macro_rules! call_bool_method_with_error_msg {
($instance:expr, $method:ident, $error_msg:expr $(, $arg:expr)*) => {
{
let new_instance: &RLNWrapper = $instance.process();
new_instance.instance.$method($($arg.process()),*).map_err(|err| format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
}
}
}
// Macro to execute a function with arbitrary amount of arguments,
// First argument is the function to execute
// Rest are all other arguments to the method
macro_rules! fn_call_with_output_and_error_msg {
// this variant is needed for the case when
// there are zero other arguments
($func:ident, $error_msg:expr) => {
{
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = $func(&mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
($func:ident, $error_msg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = $func($($arg.process()),*, &mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
}
trait ProcessArg {
type ReturnType;
fn process(self) -> Self::ReturnType;
}
impl ProcessArg for usize {
type ReturnType = usize;
fn process(self) -> Self::ReturnType {
self
}
}
impl<T> ProcessArg for Vec<T> {
type ReturnType = Vec<T>;
fn process(self) -> Self::ReturnType {
self
}
}
impl ProcessArg for *const RLN {
type ReturnType = &'static RLN;
fn process(self) -> Self::ReturnType {
unsafe { &*self }
}
}
impl ProcessArg for *const RLNWrapper {
type ReturnType = &'static RLNWrapper;
fn process(self) -> Self::ReturnType {
unsafe { &*self }
}
}
impl ProcessArg for *mut RLNWrapper {
type ReturnType = &'static mut RLNWrapper;
fn process(self) -> Self::ReturnType {
unsafe { &mut *self }
}
}
impl<'a> ProcessArg for &'a [u8] {
type ReturnType = &'a [u8];
fn process(self) -> Self::ReturnType {
self
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = newRLN)]
pub fn wasm_new(zkey: Uint8Array) -> Result<*mut RLNWrapper, String> {
let instance = RLN::new_with_params(zkey.to_vec()).map_err(|err| format!("{:#?}", err))?;
let wrapper = RLNWrapper { instance };
Ok(Box::into_raw(Box::new(wrapper)))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = rlnWitnessToJson)]
pub fn wasm_rln_witness_to_json(
ctx: *mut RLNWrapper,
serialized_witness: Uint8Array,
) -> Result<Object, String> {
let inputs = call!(
ctx,
get_rln_witness_bigint_json,
&serialized_witness.to_vec()[..]
)
.map_err(|err| err.to_string())?;
let js_value = serde_wasm_bindgen::to_value(&inputs).map_err(|err| err.to_string())?;
Object::from_entries(&js_value).map_err(|err| format!("{:#?}", err))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateRLNProofWithWitness)]
pub fn wasm_generate_rln_proof_with_witness(
ctx: *mut RLNWrapper,
calculated_witness: Vec<JsBigInt>,
serialized_witness: Uint8Array,
) -> Result<Uint8Array, String> {
let mut witness_vec: Vec<BigInt> = vec![];
for v in calculated_witness {
witness_vec.push(
v.to_string(10)
.map_err(|err| format!("{:#?}", err))?
.as_string()
.ok_or("not a string error")?
.parse::<BigInt>()
.map_err(|err| format!("{:#?}", err))?,
);
}
call_with_output_and_error_msg!(
ctx,
generate_rln_proof_with_witness,
"could not generate proof",
witness_vec,
serialized_witness.to_vec()
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateMembershipKey)]
pub fn wasm_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(ctx, key_gen, "could not generate membership keys")
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
pub fn wasm_extended_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(ctx, extended_key_gen, "could not generate membership keys")
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
pub fn wasm_seeded_key_gen(ctx: *const RLNWrapper, seed: Uint8Array) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(
ctx,
seeded_key_gen,
"could not generate membership key",
&seed.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
pub fn wasm_seeded_extended_key_gen(
ctx: *const RLNWrapper,
seed: Uint8Array,
) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(
ctx,
seeded_extended_key_gen,
"could not generate membership key",
&seed.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = recovedIDSecret)]
pub fn wasm_recover_id_secret(
ctx: *const RLNWrapper,
input_proof_data_1: Uint8Array,
input_proof_data_2: Uint8Array,
) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(
ctx,
recover_id_secret,
"could not recover id secret",
&input_proof_data_1.to_vec()[..],
&input_proof_data_2.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = verifyWithRoots)]
pub fn wasm_verify_with_roots(
ctx: *const RLNWrapper,
proof: Uint8Array,
roots: Uint8Array,
) -> Result<bool, String> {
call_bool_method_with_error_msg!(
ctx,
verify_with_roots,
"error while verifying proof with roots".to_string(),
&proof.to_vec()[..],
&roots.to_vec()[..]
)
}
#[wasm_bindgen(js_name = hash)]
pub fn wasm_hash(input: Uint8Array) -> Result<Uint8Array, String> {
fn_call_with_output_and_error_msg!(hash, "could not generate hash", &input.to_vec()[..])
}
#[wasm_bindgen(js_name = poseidonHash)]
pub fn wasm_poseidon_hash(input: Uint8Array) -> Result<Uint8Array, String> {
fn_call_with_output_and_error_msg!(
poseidon_hash,
"could not generate poseidon hash",
&input.to_vec()[..]
)
}

26
rln-wasm/src/utils.js Normal file
View File

@@ -0,0 +1,26 @@
const fs = require("fs");
// Utils functions for loading circom witness calculator and reading files from test
module.exports = {
read_file: function (path) {
return fs.readFileSync(path);
},
calculateWitness: async function (circom_path, inputs) {
const wc = require("../resources/witness_calculator.js");
const wasmFile = fs.readFileSync(circom_path);
const wasmFileBuffer = wasmFile.slice(
wasmFile.byteOffset,
wasmFile.byteOffset + wasmFile.byteLength
);
const witnessCalculator = await wc(wasmFileBuffer);
const calculatedWitness = await witnessCalculator.calculateWitness(
inputs,
false
);
return JSON.stringify(calculatedWitness, (key, value) =>
typeof value === "bigint" ? value.toString() : value
);
},
};

289
rln-wasm/tests/rln-wasm.rs Normal file
View File

@@ -0,0 +1,289 @@
#![cfg(target_arch = "wasm32")]
#[cfg(test)]
mod tests {
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
use rln::hashers::{hash_to_field, poseidon_hash};
use rln::poseidon_tree::PoseidonTree;
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le};
use rln_wasm::*;
use wasm_bindgen::{prelude::*, JsValue};
use wasm_bindgen_test::wasm_bindgen_test;
use zerokit_utils::merkle_tree::merkle_tree::ZerokitMerkleTree;
#[wasm_bindgen(module = "src/utils.js")]
extern "C" {
#[wasm_bindgen(catch)]
fn read_file(path: &str) -> Result<Uint8Array, JsValue>;
#[wasm_bindgen(catch)]
async fn calculateWitness(circom_path: &str, input: Object) -> Result<JsValue, JsValue>;
}
#[cfg(feature = "arkzkey")]
const ZKEY_PATH: &str = "../rln/resources/tree_height_20/rln_final.arkzkey";
#[cfg(not(feature = "arkzkey"))]
const ZKEY_PATH: &str = "../rln/resources/tree_height_20/rln_final.zkey";
const CIRCOM_PATH: &str = "../rln/resources/tree_height_20/rln.wasm";
#[wasm_bindgen_test]
pub async fn rln_wasm_benchmark() {
let mut results = String::from("\nbenchmarks:\n");
let iterations = 10;
let zkey = read_file(&ZKEY_PATH).expect("Failed to read zkey file");
// Benchmark wasm_new
let start_wasm_new = Date::now();
for _ in 0..iterations {
let _ = wasm_new(zkey.clone()).expect("Failed to create RLN instance");
}
let wasm_new_result = Date::now() - start_wasm_new;
// Create RLN instance for other benchmarks
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
let mut tree = PoseidonTree::default(TEST_TREE_HEIGHT).expect("Failed to create tree");
// Benchmark wasm_key_gen
let start_wasm_key_gen = Date::now();
for _ in 0..iterations {
let _ = wasm_key_gen(rln_instance).expect("Failed to generate keys");
}
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
// Generate identity pair for other benchmarks
let mem_keys = wasm_key_gen(rln_instance).expect("Failed to generate keys");
let id_key = mem_keys.subarray(0, 32);
let (identity_secret_hash, _) = bytes_le_to_fr(&id_key.to_vec());
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
let identity_index = tree.leaves_set();
let user_message_limit = Fr::from(100);
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
tree.update_next(rate_commitment)
.expect("Failed to update tree");
let message_id = Fr::from(0);
let signal: [u8; 32] = [0; 32];
let x = hash_to_field(&signal);
let merkle_proof = tree
.proof(identity_index)
.expect("Failed to generate merkle proof");
let rln_witness = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
x,
external_nullifier,
user_message_limit,
message_id,
)
.expect("Failed to create RLN witness");
let serialized_witness =
serialize_witness(&rln_witness).expect("Failed to serialize witness");
let witness_buffer = Uint8Array::from(&serialized_witness[..]);
let json_inputs = wasm_rln_witness_to_json(rln_instance, witness_buffer.clone())
.expect("Failed to convert witness to JSON");
// Benchmark calculateWitness
let start_calculate_witness = Date::now();
for _ in 0..iterations {
let _ = calculateWitness(&CIRCOM_PATH, json_inputs.clone())
.await
.expect("Failed to calculate witness");
}
let calculate_witness_result = Date::now() - start_calculate_witness;
// Calculate witness for other benchmarks
let calculated_witness_json = calculateWitness(&CIRCOM_PATH, json_inputs)
.await
.expect("Failed to calculate witness")
.as_string()
.expect("Failed to convert calculated witness to string");
let calculated_witness_vec_str: Vec<String> =
serde_json::from_str(&calculated_witness_json).expect("Failed to parse JSON");
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
.iter()
.map(|x| JsBigInt::new(&x.into()).expect("Failed to create JsBigInt"))
.collect();
// Benchmark wasm_generate_rln_proof_with_witness
let start_wasm_generate_rln_proof_with_witness = Date::now();
for _ in 0..iterations {
let _ = wasm_generate_rln_proof_with_witness(
rln_instance,
calculated_witness.clone(),
witness_buffer.clone(),
)
.expect("Failed to generate proof");
}
let wasm_generate_rln_proof_with_witness_result =
Date::now() - start_wasm_generate_rln_proof_with_witness;
// Generate a proof for other benchmarks
let proof =
wasm_generate_rln_proof_with_witness(rln_instance, calculated_witness, witness_buffer)
.expect("Failed to generate proof");
let proof_data = proof.to_vec();
let verify_input = prepare_verify_input(proof_data, &signal);
let input_buffer = Uint8Array::from(&verify_input[..]);
let root = tree.root();
let roots_serialized = fr_to_bytes_le(&root);
let roots_buffer = Uint8Array::from(&roots_serialized[..]);
// Benchmark wasm_verify_with_roots
let start_wasm_verify_with_roots = Date::now();
for _ in 0..iterations {
let _ =
wasm_verify_with_roots(rln_instance, input_buffer.clone(), roots_buffer.clone())
.expect("Failed to verify proof");
}
let wasm_verify_with_roots_result = Date::now() - start_wasm_verify_with_roots;
// Verify the proof with the root
let is_proof_valid = wasm_verify_with_roots(rln_instance, input_buffer, roots_buffer)
.expect("Failed to verify proof");
assert!(is_proof_valid, "verification failed");
// Format and display results
let format_duration = |duration_ms: f64| -> String {
let avg_ms = duration_ms / (iterations as f64);
if avg_ms >= 1000.0 {
format!("{:.3} s", avg_ms / 1000.0)
} else {
format!("{:.3} ms", avg_ms)
}
};
results.push_str(&format!("wasm_new: {}\n", format_duration(wasm_new_result)));
results.push_str(&format!(
"wasm_key_gen: {}\n",
format_duration(wasm_key_gen_result)
));
results.push_str(&format!(
"calculateWitness: {}\n",
format_duration(calculate_witness_result)
));
results.push_str(&format!(
"wasm_generate_rln_proof_with_witness: {}\n",
format_duration(wasm_generate_rln_proof_with_witness_result)
));
results.push_str(&format!(
"wasm_verify_with_roots: {}\n",
format_duration(wasm_verify_with_roots_result)
));
// Log the results
wasm_bindgen_test::console_log!("{results}");
}
#[wasm_bindgen_test]
pub async fn rln_wasm_test() {
// Read the zkey file
let zkey = read_file(&ZKEY_PATH).expect("Failed to read zkey file");
// Create RLN instance and separated tree
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
let mut tree = PoseidonTree::default(TEST_TREE_HEIGHT).expect("Failed to create tree");
// Setting up the epoch and rln_identifier
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
// Generate identity pair
let mem_keys = wasm_key_gen(rln_instance).expect("Failed to generate keys");
let (identity_secret_hash, _) = bytes_le_to_fr(&mem_keys.subarray(0, 32).to_vec());
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
// Get index of the identity
let identity_index = tree.leaves_set();
// Setting up the user message limit
let user_message_limit = Fr::from(100);
// Updating the tree with the rate commitment
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
tree.update_next(rate_commitment)
.expect("Failed to update tree");
// Generate merkle proof
let merkle_proof = tree
.proof(identity_index)
.expect("Failed to generate merkle proof");
// Create message id and signal
let message_id = Fr::from(0);
let signal: [u8; 32] = [0; 32];
let x = hash_to_field(&signal);
// Prepare input for witness calculation
let rln_witness = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
x,
external_nullifier,
user_message_limit,
message_id,
)
.expect("Failed to create RLN witness");
// Serialize the rln witness
let serialized_witness =
serialize_witness(&rln_witness).expect("Failed to serialize witness");
// Convert the serialized witness to a Uint8Array
let witness_buffer = Uint8Array::from(&serialized_witness[..]);
// Obtaining inputs that should be sent to circom witness calculator
let json_inputs = wasm_rln_witness_to_json(rln_instance, witness_buffer.clone())
.expect("Failed to convert witness to JSON");
// Calculating witness with JS
// (Using a JSON since wasm_bindgen does not like Result<Vec<JsBigInt>,JsValue>)
let calculated_witness_json = calculateWitness(&CIRCOM_PATH, json_inputs)
.await
.expect("Failed to calculate witness")
.as_string()
.expect("Failed to convert calculated witness to string");
let calculated_witness_vec_str: Vec<String> =
serde_json::from_str(&calculated_witness_json).expect("Failed to parse JSON");
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
.iter()
.map(|x| JsBigInt::new(&x.into()).expect("Failed to create JsBigInt"))
.collect();
// Generate a proof from the calculated witness
let proof =
wasm_generate_rln_proof_with_witness(rln_instance, calculated_witness, witness_buffer)
.expect("Failed to generate proof");
// Prepare the root for verification
let root = tree.root();
let roots_serialized = fr_to_bytes_le(&root);
let roots_buffer = Uint8Array::from(&roots_serialized[..]);
// Prepare input for proof verification
let proof_data = proof.to_vec();
let verify_input = prepare_verify_input(proof_data, &signal);
let input_buffer = Uint8Array::from(&verify_input[..]);
// Verify the proof with the root
let is_proof_valid = wasm_verify_with_roots(rln_instance, input_buffer, roots_buffer)
.expect("Failed to verify proof");
assert!(is_proof_valid, "verification failed");
}
}

View File

@@ -18,78 +18,80 @@ doctest = false
[dependencies]
# ZKP Generation
ark-bn254 = { version = "0.5.0", features = ["std"] }
ark-ff = { version = "0.5.0", features = ["std", "asm"] }
ark-serialize = { version = "0.5.0", features = ["derive"] }
ark-ec = { version = "0.5.0", default-features = false }
ark-std = { version = "0.5.0", default-features = false }
ark-groth16 = { version = "0.5.0", features = [
ark-relations = { version = "0.5.1", features = ["std"] }
ark-ff = { version = "0.5.0", default-features = false, features = [
"parallel",
] }
ark-ec = { version = "0.5.0", default-features = false, features = [
"parallel",
] }
ark-std = { version = "0.5.0", default-features = false, features = [
"parallel",
] }
ark-poly = { version = "0.5.0", default-features = false, features = [
"parallel",
] }
ark-groth16 = { version = "0.5.0", default-features = false, features = [
"parallel",
] }
ark-serialize = { version = "0.5.0", default-features = false, features = [
"parallel",
], default-features = false }
ark-relations = { version = "0.5.0", default-features = false, features = [
"std",
] }
ark-circom = { version = "0.5.0" }
ark-r1cs-std = { version = "0.5.0" }
# error handling
color-eyre = "0.6.2"
thiserror = "2.0.11"
color-eyre = "0.6.3"
thiserror = "2.0.12"
# utilities
byteorder = "1.4.3"
byteorder = "1.5.0"
cfg-if = "1.0"
num-bigint = { version = "0.4.6", default-features = false, features = [
"rand",
"std",
] }
num-traits = "0.2.19"
once_cell = "1.19.0"
lazy_static = "1.4.0"
once_cell = "1.21.3"
lazy_static = "1.5.0"
rand = "0.8.5"
rand_chacha = "0.3.1"
ruint = { version = "1.12.4", features = ["rand", "serde", "ark-ff-04"] }
ruint = { version = "1.14.0", features = ["rand", "serde", "ark-ff-04"] }
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
utils = { package = "zerokit_utils", version = "0.5.2", path = "../utils/", default-features = false }
utils = { package = "zerokit_utils", version = "0.5.2", path = "../utils", default-features = false }
# serialization
prost = "0.13.1"
prost = "0.13.5"
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
document-features = { version = "=0.2.10", optional = true }
document-features = { version = "0.2.11", optional = true }
[dev-dependencies]
sled = "0.34.7"
criterion = { version = "0.4.0", features = ["html_reports"] }
[features]
default = ["parallel", "pmtree-ft"]
parallel = [
"ark-ec/parallel",
"ark-ff/parallel",
"ark-std/parallel",
"ark-groth16/parallel",
"utils/parallel",
]
default = ["pmtree-ft"]
fullmerkletree = ["default"]
arkzkey = []
stateless = []
arkzkey = []
# Note: pmtree feature is still experimental
pmtree-ft = ["utils/pmtree-ft"]
[[bench]]
name = "pmtree_benchmark"
harness = false
[[bench]]
name = "circuit_loading_benchmark"
harness = false
[[bench]]
name = "circuit_loading_arkzkey_benchmark"
harness = false
required-features = ["arkzkey"]
[[bench]]
name = "circuit_loading_benchmark"
harness = false
[[bench]]
name = "pmtree_benchmark"
harness = false
[[bench]]
name = "poseidon_tree_benchmark"
harness = false

View File

@@ -34,9 +34,9 @@ use std::io::Cursor;
use rln::{
circuit::Fr,
hashers::{hash_to_field, poseidon_hash},
protocol::{keygen, prepare_verify_input},
protocol::{keygen, prepare_prove_input, prepare_verify_input},
public::RLN,
utils::{fr_to_bytes_le, normalize_usize},
utils::fr_to_bytes_le,
};
use serde_json::json;
@@ -53,8 +53,8 @@ fn main() {
// 3. Add a rate commitment to the Merkle tree
let id_index = 10;
let user_message_limit = 10;
let rate_commitment = poseidon_hash(&[id_commitment, Fr::from(user_message_limit)]);
let user_message_limit = Fr::from(10);
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
rln.set_leaf(id_index, &mut buffer).unwrap();
@@ -65,25 +65,28 @@ fn main() {
// We generate rln_identifier from a date seed and we ensure is
// mapped to a field element by hashing-to-field its content
let rln_identifier = hash_to_field(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < user_message_limit
let message_id = Fr::from(1);
// 5. Generate and verify a proof for a message
let signal = b"RLN is awesome";
// 6. Prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | external_nullifier<32> | user_message_limit<32> | message_id<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut normalize_usize(id_index));
serialized.append(&mut fr_to_bytes_le(&Fr::from(user_message_limit)));
serialized.append(&mut fr_to_bytes_le(&Fr::from(1)));
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
let prove_input = prepare_prove_input(
identity_secret_hash,
id_index,
user_message_limit,
message_id,
external_nullifier,
signal,
);
// 7. Generate a RLN proof
// We generate a RLN proof for proof_input
let mut input_buffer = Cursor::new(serialized);
let mut input_buffer = Cursor::new(prove_input);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
.unwrap();

View File

@@ -1,5 +1,5 @@
use ark_circom::read_zkey;
use criterion::{criterion_group, criterion_main, Criterion};
use rln::circuit::zkey::read_zkey;
use std::io::Cursor;
pub fn zkey_load_benchmark(c: &mut Criterion) {

View File

@@ -21,7 +21,7 @@ pub fn pmtree_benchmark(c: &mut Criterion) {
c.bench_function("Pmtree::override_range", |b| {
b.iter(|| {
tree.override_range(0, leaves.clone(), [0, 1, 2, 3])
tree.override_range(0, leaves.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
})
});

Binary file not shown.

View File

@@ -13,8 +13,8 @@ use std::{
ops::{BitAnd, BitOr, BitXor, Deref, Shl, Shr},
};
use crate::circuit::iden3calc::proto;
use crate::circuit::Fr;
use crate::iden3calc::proto;
pub const M: U256 =
uint!(21888242871839275222246405745257275088548364400416034343698204186575808495617_U256);

View File

@@ -7,7 +7,7 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use prost::Message;
use std::io::{Read, Write};
use crate::iden3calc::{
use crate::circuit::iden3calc::{
graph,
graph::{Operation, TresOperation, UnoOperation},
proto, InputSignalsInfo,

View File

@@ -1,16 +1,20 @@
// This crate provides interfaces for the zero-knowledge circuit and keys
pub mod iden3calc;
pub mod qap;
pub mod zkey;
use ::lazy_static::lazy_static;
use ark_bn254::{
Bn254, Fq as ArkFq, Fq2 as ArkFq2, Fr as ArkFr, G1Affine as ArkG1Affine,
G1Projective as ArkG1Projective, G2Affine as ArkG2Affine, G2Projective as ArkG2Projective,
};
use ark_groth16::{ProvingKey, VerifyingKey};
use ark_groth16::ProvingKey;
use ark_relations::r1cs::ConstraintMatrices;
use cfg_if::cfg_if;
use color_eyre::{Report, Result};
use crate::iden3calc::calc_witness;
use crate::circuit::iden3calc::calc_witness;
#[cfg(feature = "arkzkey")]
use {
@@ -19,13 +23,15 @@ use {
};
#[cfg(not(feature = "arkzkey"))]
use {ark_circom::read_zkey, std::io::Cursor};
use {crate::circuit::zkey::read_zkey, std::io::Cursor};
#[cfg(feature = "arkzkey")]
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../resources/tree_height_20/rln_final.arkzkey");
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/rln_final.arkzkey");
pub const ZKEY_BYTES: &[u8] = include_bytes!("../resources/tree_height_20/rln_final.zkey");
const GRAPH_BYTES: &[u8] = include_bytes!("../resources/tree_height_20/graph.bin");
pub const ZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/rln_final.zkey");
#[cfg(not(target_arch = "wasm32"))]
const GRAPH_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/graph.bin");
lazy_static! {
static ref ZKEY: (ProvingKey<Curve>, ConstraintMatrices<Fr>) = {
@@ -73,30 +79,11 @@ pub fn zkey_from_raw(zkey_data: &[u8]) -> Result<(ProvingKey<Curve>, ConstraintM
}
// Loads the proving key
#[cfg(not(target_arch = "wasm32"))]
pub fn zkey_from_folder() -> &'static (ProvingKey<Curve>, ConstraintMatrices<Fr>) {
&ZKEY
}
// Loads the verification key from a bytes vector
pub fn vk_from_raw(zkey_data: &[u8]) -> Result<VerifyingKey<Curve>> {
if !zkey_data.is_empty() {
let (proving_key, _matrices) = zkey_from_raw(zkey_data)?;
return Ok(proving_key.vk);
}
Err(Report::msg("No proving/verification key found!"))
}
// Checks verification key to be correct with respect to proving key
pub fn check_vk_from_zkey(verifying_key: VerifyingKey<Curve>) -> Result<()> {
let (proving_key, _matrices) = zkey_from_folder();
if proving_key.vk == verifying_key {
Ok(())
} else {
Err(Report::msg("verifying_keys are not equal"))
}
}
pub fn calculate_rln_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
inputs: I,
graph_data: &[u8],
@@ -104,6 +91,7 @@ pub fn calculate_rln_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
calc_witness(inputs, graph_data)
}
#[cfg(not(target_arch = "wasm32"))]
pub fn graph_from_folder() -> &'static [u8] {
GRAPH_BYTES
}

114
rln/src/circuit/qap.rs Normal file
View File

@@ -0,0 +1,114 @@
// This file is based on the code by arkworks. Its preimage can be found here:
// https://github.com/arkworks-rs/circom-compat/blob/3c95ed98e23a408b4d99a53e483a9bba39685a4e/src/circom/qap.rs
use ark_ff::PrimeField;
use ark_groth16::r1cs_to_qap::{evaluate_constraint, LibsnarkReduction, R1CSToQAP};
use ark_poly::EvaluationDomain;
use ark_relations::r1cs::{ConstraintMatrices, ConstraintSystemRef, SynthesisError};
use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
/// Implements the witness map used by snarkjs. The arkworks witness map calculates the
/// coefficients of H through computing (AB-C)/Z in the evaluation domain and going back to the
/// coefficients domain. snarkjs instead precomputes the Lagrange form of the powers of tau bases
/// in a domain twice as large and the witness map is computed as the odd coefficients of (AB-C)
/// in that domain. This serves as HZ when computing the C proof element.
pub struct CircomReduction;
impl R1CSToQAP for CircomReduction {
#[allow(clippy::type_complexity)]
fn instance_map_with_evaluation<F: PrimeField, D: EvaluationDomain<F>>(
cs: ConstraintSystemRef<F>,
t: &F,
) -> Result<(Vec<F>, Vec<F>, Vec<F>, F, usize, usize), SynthesisError> {
LibsnarkReduction::instance_map_with_evaluation::<F, D>(cs, t)
}
fn witness_map_from_matrices<F: PrimeField, D: EvaluationDomain<F>>(
matrices: &ConstraintMatrices<F>,
num_inputs: usize,
num_constraints: usize,
full_assignment: &[F],
) -> Result<Vec<F>, SynthesisError> {
let zero = F::zero();
let domain =
D::new(num_constraints + num_inputs).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
let domain_size = domain.size();
let mut a = vec![zero; domain_size];
let mut b = vec![zero; domain_size];
#[allow(unexpected_cfgs)]
cfg_iter_mut!(a[..num_constraints])
.zip(cfg_iter_mut!(b[..num_constraints]))
.zip(cfg_iter!(&matrices.a))
.zip(cfg_iter!(&matrices.b))
.for_each(|(((a, b), at_i), bt_i)| {
*a = evaluate_constraint(at_i, full_assignment);
*b = evaluate_constraint(bt_i, full_assignment);
});
{
let start = num_constraints;
let end = start + num_inputs;
a[start..end].clone_from_slice(&full_assignment[..num_inputs]);
}
let mut c = vec![zero; domain_size];
#[allow(unexpected_cfgs)]
cfg_iter_mut!(c[..num_constraints])
.zip(&a)
.zip(&b)
.for_each(|((c_i, &a), &b)| {
*c_i = a * b;
});
domain.ifft_in_place(&mut a);
domain.ifft_in_place(&mut b);
let root_of_unity = {
let domain_size_double = 2 * domain_size;
let domain_double =
D::new(domain_size_double).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
domain_double.element(1)
};
D::distribute_powers_and_mul_by_const(&mut a, root_of_unity, F::one());
D::distribute_powers_and_mul_by_const(&mut b, root_of_unity, F::one());
domain.fft_in_place(&mut a);
domain.fft_in_place(&mut b);
let mut ab = domain.mul_polynomials_in_evaluation_domain(&a, &b);
drop(a);
drop(b);
domain.ifft_in_place(&mut c);
D::distribute_powers_and_mul_by_const(&mut c, root_of_unity, F::one());
domain.fft_in_place(&mut c);
#[allow(unexpected_cfgs)]
cfg_iter_mut!(ab)
.zip(c)
.for_each(|(ab_i, c_i)| *ab_i -= &c_i);
Ok(ab)
}
fn h_query_scalars<F: PrimeField, D: EvaluationDomain<F>>(
max_power: usize,
t: F,
_: F,
delta_inverse: F,
) -> Result<Vec<F>, SynthesisError> {
// the usual H query has domain-1 powers. Z has domain powers. So HZ has 2*domain-1 powers.
#[allow(unexpected_cfgs)]
let mut scalars = cfg_into_iter!(0..2 * max_power + 1)
.map(|i| delta_inverse * t.pow([i as u64]))
.collect::<Vec<_>>();
let domain_size = scalars.len();
let domain = D::new(domain_size).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
// generate the lagrange coefficients
domain.ifft_in_place(&mut scalars);
#[allow(unexpected_cfgs)]
Ok(cfg_into_iter!(scalars).skip(1).step_by(2).collect())
}
}

371
rln/src/circuit/zkey.rs Normal file
View File

@@ -0,0 +1,371 @@
// This file is based on the code by arkworks. Its preimage can be found here:
// https://github.com/arkworks-rs/circom-compat/blob/3c95ed98e23a408b4d99a53e483a9bba39685a4e/src/zkey.rs
//! ZKey Parsing
//!
//! Each ZKey file is broken into sections:
//! Header(1)
//! Prover Type 1 Groth
//! HeaderGroth(2)
//! n8q
//! q
//! n8r
//! r
//! NVars
//! NPub
//! DomainSize (multiple of 2
//! alpha1
//! beta1
//! delta1
//! beta2
//! gamma2
//! delta2
//! IC(3)
//! Coefs(4)
//! PointsA(5)
//! PointsB1(6)
//! PointsB2(7)
//! PointsC(8)
//! PointsH(9)
//! Contributions(10)
use ark_ff::{BigInteger256, PrimeField};
use ark_relations::r1cs::ConstraintMatrices;
use ark_serialize::{CanonicalDeserialize, SerializationError};
use ark_std::log2;
use byteorder::{LittleEndian, ReadBytesExt};
use std::{
collections::HashMap,
io::{Read, Seek, SeekFrom},
};
use ark_bn254::{Bn254, Fq, Fq2, Fr, G1Affine, G2Affine};
use ark_groth16::{ProvingKey, VerifyingKey};
use num_traits::Zero;
type IoResult<T> = Result<T, SerializationError>;
#[derive(Clone, Debug)]
struct Section {
position: u64,
#[allow(dead_code)]
size: usize,
}
/// Reads a SnarkJS ZKey file into an Arkworks ProvingKey.
pub fn read_zkey<R: Read + Seek>(
reader: &mut R,
) -> IoResult<(ProvingKey<Bn254>, ConstraintMatrices<Fr>)> {
let mut binfile = BinFile::new(reader)?;
let proving_key = binfile.proving_key()?;
let matrices = binfile.matrices()?;
Ok((proving_key, matrices))
}
#[derive(Debug)]
struct BinFile<'a, R> {
#[allow(dead_code)]
ftype: String,
#[allow(dead_code)]
version: u32,
sections: HashMap<u32, Vec<Section>>,
reader: &'a mut R,
}
impl<'a, R: Read + Seek> BinFile<'a, R> {
fn new(reader: &'a mut R) -> IoResult<Self> {
let mut magic = [0u8; 4];
reader.read_exact(&mut magic)?;
let version = reader.read_u32::<LittleEndian>()?;
let num_sections = reader.read_u32::<LittleEndian>()?;
let mut sections = HashMap::new();
for _ in 0..num_sections {
let section_id = reader.read_u32::<LittleEndian>()?;
let section_length = reader.read_u64::<LittleEndian>()?;
let section = sections.entry(section_id).or_insert_with(Vec::new);
section.push(Section {
position: reader.stream_position()?,
size: section_length as usize,
});
reader.seek(SeekFrom::Current(section_length as i64))?;
}
Ok(Self {
ftype: std::str::from_utf8(&magic[..]).unwrap().to_string(),
version,
sections,
reader,
})
}
fn proving_key(&mut self) -> IoResult<ProvingKey<Bn254>> {
let header = self.groth_header()?;
let ic = self.ic(header.n_public)?;
let a_query = self.a_query(header.n_vars)?;
let b_g1_query = self.b_g1_query(header.n_vars)?;
let b_g2_query = self.b_g2_query(header.n_vars)?;
let l_query = self.l_query(header.n_vars - header.n_public - 1)?;
let h_query = self.h_query(header.domain_size as usize)?;
let vk = VerifyingKey::<Bn254> {
alpha_g1: header.verifying_key.alpha_g1,
beta_g2: header.verifying_key.beta_g2,
gamma_g2: header.verifying_key.gamma_g2,
delta_g2: header.verifying_key.delta_g2,
gamma_abc_g1: ic,
};
let pk = ProvingKey::<Bn254> {
vk,
beta_g1: header.verifying_key.beta_g1,
delta_g1: header.verifying_key.delta_g1,
a_query,
b_g1_query,
b_g2_query,
h_query,
l_query,
};
Ok(pk)
}
fn get_section(&self, id: u32) -> Section {
self.sections.get(&id).unwrap()[0].clone()
}
fn groth_header(&mut self) -> IoResult<HeaderGroth> {
let section = self.get_section(2);
let header = HeaderGroth::new(&mut self.reader, &section)?;
Ok(header)
}
fn ic(&mut self, n_public: usize) -> IoResult<Vec<G1Affine>> {
// the range is non-inclusive so we do +1 to get all inputs
self.g1_section(n_public + 1, 3)
}
/// Returns the [`ConstraintMatrices`] corresponding to the zkey
pub fn matrices(&mut self) -> IoResult<ConstraintMatrices<Fr>> {
let header = self.groth_header()?;
let section = self.get_section(4);
self.reader.seek(SeekFrom::Start(section.position))?;
let num_coeffs: u32 = self.reader.read_u32::<LittleEndian>()?;
// insantiate AB
let mut matrices = vec![vec![vec![]; header.domain_size as usize]; 2];
let mut max_constraint_index = 0;
for _ in 0..num_coeffs {
let matrix: u32 = self.reader.read_u32::<LittleEndian>()?;
let constraint: u32 = self.reader.read_u32::<LittleEndian>()?;
let signal: u32 = self.reader.read_u32::<LittleEndian>()?;
let value: Fr = deserialize_field_fr(&mut self.reader)?;
max_constraint_index = std::cmp::max(max_constraint_index, constraint);
matrices[matrix as usize][constraint as usize].push((value, signal as usize));
}
let num_constraints = max_constraint_index as usize - header.n_public;
// Remove the public input constraints, Arkworks adds them later
matrices.iter_mut().for_each(|m| {
m.truncate(num_constraints);
});
// This is taken from Arkworks' to_matrices() function
let a = matrices[0].clone();
let b = matrices[1].clone();
let a_num_non_zero: usize = a.iter().map(|lc| lc.len()).sum();
let b_num_non_zero: usize = b.iter().map(|lc| lc.len()).sum();
let matrices = ConstraintMatrices {
num_instance_variables: header.n_public + 1,
num_witness_variables: header.n_vars - header.n_public,
num_constraints,
a_num_non_zero,
b_num_non_zero,
c_num_non_zero: 0,
a,
b,
c: vec![],
};
Ok(matrices)
}
fn a_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 5)
}
fn b_g1_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 6)
}
fn b_g2_query(&mut self, n_vars: usize) -> IoResult<Vec<G2Affine>> {
self.g2_section(n_vars, 7)
}
fn l_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 8)
}
fn h_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
self.g1_section(n_vars, 9)
}
fn g1_section(&mut self, num: usize, section_id: usize) -> IoResult<Vec<G1Affine>> {
let section = self.get_section(section_id as u32);
self.reader.seek(SeekFrom::Start(section.position))?;
deserialize_g1_vec(self.reader, num as u32)
}
fn g2_section(&mut self, num: usize, section_id: usize) -> IoResult<Vec<G2Affine>> {
let section = self.get_section(section_id as u32);
self.reader.seek(SeekFrom::Start(section.position))?;
deserialize_g2_vec(self.reader, num as u32)
}
}
#[derive(Default, Clone, Debug, CanonicalDeserialize)]
pub struct ZVerifyingKey {
alpha_g1: G1Affine,
beta_g1: G1Affine,
beta_g2: G2Affine,
gamma_g2: G2Affine,
delta_g1: G1Affine,
delta_g2: G2Affine,
}
impl ZVerifyingKey {
fn new<R: Read>(reader: &mut R) -> IoResult<Self> {
let alpha_g1 = deserialize_g1(reader)?;
let beta_g1 = deserialize_g1(reader)?;
let beta_g2 = deserialize_g2(reader)?;
let gamma_g2 = deserialize_g2(reader)?;
let delta_g1 = deserialize_g1(reader)?;
let delta_g2 = deserialize_g2(reader)?;
Ok(Self {
alpha_g1,
beta_g1,
beta_g2,
gamma_g2,
delta_g1,
delta_g2,
})
}
}
#[derive(Clone, Debug)]
struct HeaderGroth {
#[allow(dead_code)]
n8q: u32,
#[allow(dead_code)]
q: BigInteger256,
#[allow(dead_code)]
n8r: u32,
#[allow(dead_code)]
r: BigInteger256,
n_vars: usize,
n_public: usize,
domain_size: u32,
#[allow(dead_code)]
power: u32,
verifying_key: ZVerifyingKey,
}
impl HeaderGroth {
fn new<R: Read + Seek>(reader: &mut R, section: &Section) -> IoResult<Self> {
reader.seek(SeekFrom::Start(section.position))?;
Self::read(reader)
}
fn read<R: Read>(mut reader: &mut R) -> IoResult<Self> {
// TODO: Impl From<u32> in Arkworks
let n8q: u32 = u32::deserialize_uncompressed(&mut reader)?;
// group order r of Bn254
let q = BigInteger256::deserialize_uncompressed(&mut reader)?;
let n8r: u32 = u32::deserialize_uncompressed(&mut reader)?;
// Prime field modulus
let r = BigInteger256::deserialize_uncompressed(&mut reader)?;
let n_vars = u32::deserialize_uncompressed(&mut reader)? as usize;
let n_public = u32::deserialize_uncompressed(&mut reader)? as usize;
let domain_size: u32 = u32::deserialize_uncompressed(&mut reader)?;
let power = log2(domain_size as usize);
let verifying_key = ZVerifyingKey::new(&mut reader)?;
Ok(Self {
n8q,
q,
n8r,
r,
n_vars,
n_public,
domain_size,
power,
verifying_key,
})
}
}
// need to divide by R, since snarkjs outputs the zkey with coefficients
// multiplieid by R^2
fn deserialize_field_fr<R: Read>(reader: &mut R) -> IoResult<Fr> {
let bigint = BigInteger256::deserialize_uncompressed(reader)?;
Ok(Fr::new_unchecked(Fr::new_unchecked(bigint).into_bigint()))
}
// skips the multiplication by R because Circom points are already in Montgomery form
fn deserialize_field<R: Read>(reader: &mut R) -> IoResult<Fq> {
let bigint = BigInteger256::deserialize_uncompressed(reader)?;
// if you use Fq::new it multiplies by R
Ok(Fq::new_unchecked(bigint))
}
pub fn deserialize_field2<R: Read>(reader: &mut R) -> IoResult<Fq2> {
let c0 = deserialize_field(reader)?;
let c1 = deserialize_field(reader)?;
Ok(Fq2::new(c0, c1))
}
fn deserialize_g1<R: Read>(reader: &mut R) -> IoResult<G1Affine> {
let x = deserialize_field(reader)?;
let y = deserialize_field(reader)?;
let infinity = x.is_zero() && y.is_zero();
if infinity {
Ok(G1Affine::identity())
} else {
Ok(G1Affine::new(x, y))
}
}
fn deserialize_g2<R: Read>(reader: &mut R) -> IoResult<G2Affine> {
let f1 = deserialize_field2(reader)?;
let f2 = deserialize_field2(reader)?;
let infinity = f1.is_zero() && f2.is_zero();
if infinity {
Ok(G2Affine::identity())
} else {
Ok(G2Affine::new(f1, f2))
}
}
fn deserialize_g1_vec<R: Read>(reader: &mut R, n_vars: u32) -> IoResult<Vec<G1Affine>> {
(0..n_vars).map(|_| deserialize_g1(reader)).collect()
}
fn deserialize_g2_vec<R: Read>(reader: &mut R, n_vars: u32) -> IoResult<Vec<G2Affine>> {
(0..n_vars).map(|_| deserialize_g2(reader)).collect()
}

View File

@@ -23,7 +23,7 @@ static POSEIDON: Lazy<Poseidon<Fr>> = Lazy::new(|| Poseidon::<Fr>::from(&ROUND_P
pub fn poseidon_hash(input: &[Fr]) -> Fr {
POSEIDON
.hash(input.to_vec())
.hash(input)
.expect("hash with fixed input size can't fail")
}

View File

@@ -1,8 +1,7 @@
#![allow(dead_code)]
pub mod circuit;
#[cfg(not(target_arch = "wasm32"))]
pub mod ffi;
pub mod hashers;
pub mod iden3calc;
#[cfg(feature = "pmtree-ft")]
pub mod pm_tree_adapter;
pub mod poseidon_tree;
@@ -11,5 +10,3 @@ pub mod public;
#[cfg(test)]
pub mod public_api_tests;
pub mod utils;
pub mod ffi;

View File

@@ -244,7 +244,7 @@ impl ZerokitMerkleTree for PmTree {
(0, 0) => Err(Report::msg("no leaves or indices to be removed")),
(1, 0) => self.set(start, leaves[0]),
(0, 1) => self.delete(indices[0]),
(_, 0) => self.set_range(start, leaves),
(_, 0) => self.set_range(start, leaves.into_iter()),
(0, _) => self.remove_indices(&indices),
(_, _) => self.remove_indices_and_set_leaves(start, leaves, &indices),
}

View File

@@ -1,7 +1,6 @@
// This crate collects all the underlying primitives used to implement RLN
use ark_bn254::Fr;
use ark_circom::CircomReduction;
use ark_groth16::{prepare_verifying_key, Groth16, Proof as ArkProof, ProvingKey, VerifyingKey};
use ark_relations::r1cs::{ConstraintMatrices, SynthesisError};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
@@ -16,7 +15,7 @@ use std::time::Instant;
use thiserror::Error;
use tiny_keccak::{Hasher as _, Keccak};
use crate::circuit::{calculate_rln_witness, Curve};
use crate::circuit::{calculate_rln_witness, qap::CircomReduction, Curve};
use crate::hashers::{hash_to_field, poseidon_hash};
use crate::poseidon_tree::*;
use crate::public::RLN_IDENTIFIER;

View File

@@ -8,16 +8,24 @@ use {
utils::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree},
};
use crate::circuit::{graph_from_folder, vk_from_raw, zkey_from_folder, zkey_from_raw, Curve, Fr};
use crate::circuit::{zkey_from_raw, Curve, Fr};
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
use crate::protocol::*;
use crate::utils::*;
#[cfg(not(target_arch = "wasm32"))]
use {
crate::circuit::{graph_from_folder, zkey_from_folder},
std::default::Default,
};
use ark_groth16::{Proof as ArkProof, ProvingKey, VerifyingKey};
use ark_relations::r1cs::ConstraintMatrices;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Write};
use color_eyre::{Report, Result};
use std::{default::Default, io::Cursor};
use std::io::Cursor;
#[cfg(target_arch = "wasm32")]
use num_bigint::BigInt;
/// The application-specific RLN identifier.
///
@@ -32,6 +40,7 @@ pub const RLN_IDENTIFIER: &[u8] = b"zerokit/rln/010203040506070809";
pub struct RLN {
proving_key: (ProvingKey<Curve>, ConstraintMatrices<Fr>),
pub(crate) verification_key: VerifyingKey<Curve>,
#[cfg(not(target_arch = "wasm32"))]
pub(crate) graph_data: Vec<u8>,
#[cfg(not(feature = "stateless"))]
pub(crate) tree: PoseidonTree,
@@ -54,7 +63,7 @@ impl RLN {
/// // We create a new RLN instance
/// let mut rln = RLN::new(tree_height, input);
/// ```
#[cfg(not(feature = "stateless"))]
#[cfg(all(not(target_arch = "wasm32"), not(feature = "stateless")))]
pub fn new<R: Read>(tree_height: usize, mut input_data: R) -> Result<RLN> {
// We read input
let mut input: Vec<u8> = Vec::new();
@@ -63,9 +72,9 @@ impl RLN {
let rln_config: Value = serde_json::from_str(&String::from_utf8(input)?)?;
let tree_config = rln_config["tree_config"].to_string();
let proving_key = zkey_from_folder();
let verification_key = &proving_key.0.vk;
let graph_data = graph_from_folder();
let proving_key = zkey_from_folder().to_owned();
let verification_key = proving_key.0.vk.to_owned();
let graph_data = graph_from_folder().to_owned();
let tree_config: <PoseidonTree as ZerokitMerkleTree>::Config = if tree_config.is_empty() {
<PoseidonTree as ZerokitMerkleTree>::Config::default()
@@ -81,9 +90,9 @@ impl RLN {
)?;
Ok(RLN {
proving_key: proving_key.to_owned(),
verification_key: verification_key.to_owned(),
graph_data: graph_data.to_vec(),
proving_key,
verification_key,
graph_data,
#[cfg(not(feature = "stateless"))]
tree,
})
@@ -98,16 +107,16 @@ impl RLN {
/// let mut rln = RLN::new();
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "stateless")))]
#[cfg(feature = "stateless")]
#[cfg(all(not(target_arch = "wasm32"), feature = "stateless"))]
pub fn new() -> Result<RLN> {
let proving_key = zkey_from_folder();
let verification_key = &proving_key.0.vk;
let graph_data = graph_from_folder();
let proving_key = zkey_from_folder().to_owned();
let verification_key = proving_key.0.vk.to_owned();
let graph_data = graph_from_folder().to_owned();
Ok(RLN {
proving_key: proving_key.to_owned(),
verification_key: verification_key.to_owned(),
graph_data: graph_data.to_vec(),
proving_key,
verification_key,
graph_data,
})
}
@@ -116,6 +125,7 @@ impl RLN {
/// Input parameters are
/// - `tree_height`: the height of the internal Merkle tree
/// - `zkey_vec`: a byte vector containing to the proving key (`rln_final.zkey`) or (`rln_final.arkzkey`) as binary file
/// - `graph_data`: a byte vector containing the graph data (`graph.bin`) as binary file
/// - `tree_config_input`: a reader for a string containing a json with the merkle tree configuration
///
/// Example:
@@ -143,11 +153,10 @@ impl RLN {
/// tree_height,
/// resources[0].clone(),
/// resources[1].clone(),
/// resources[2].clone(),
/// tree_config_buffer,
/// );
/// ```
#[cfg(not(feature = "stateless"))]
#[cfg(all(not(target_arch = "wasm32"), not(feature = "stateless")))]
pub fn new_with_params<R: Read>(
tree_height: usize,
zkey_vec: Vec<u8>,
@@ -155,7 +164,7 @@ impl RLN {
mut tree_config_input: R,
) -> Result<RLN> {
let proving_key = zkey_from_raw(&zkey_vec)?;
let verification_key = vk_from_raw(&zkey_vec)?;
let verification_key = proving_key.0.vk.to_owned();
let mut tree_config_vec: Vec<u8> = Vec::new();
tree_config_input.read_to_end(&mut tree_config_vec)?;
@@ -187,6 +196,7 @@ impl RLN {
///
/// Input parameters are
/// - `zkey_vec`: a byte vector containing to the proving key (`rln_final.zkey`) or (`rln_final.arkzkey`) as binary file
/// - `graph_data`: a byte vector containing the graph data (`graph.bin`) as binary file
///
/// Example:
/// ```
@@ -208,13 +218,12 @@ impl RLN {
/// let mut rln = RLN::new_with_params(
/// resources[0].clone(),
/// resources[1].clone(),
/// resources[2].clone(),
/// );
/// ```
#[cfg(feature = "stateless")]
#[cfg(all(not(target_arch = "wasm32"), feature = "stateless"))]
pub fn new_with_params(zkey_vec: Vec<u8>, graph_data: Vec<u8>) -> Result<RLN> {
let proving_key = zkey_from_raw(&zkey_vec)?;
let verification_key = vk_from_raw(&zkey_vec)?;
let verification_key = proving_key.0.vk.to_owned();
Ok(RLN {
proving_key,
@@ -223,6 +232,36 @@ impl RLN {
})
}
/// Creates a new stateless RLN object by passing circuit resources as a byte vector.
///
/// Input parameters are
/// - `zkey_vec`: a byte vector containing the proving key (`rln_final.zkey`) or (`rln_final.arkzkey`) as binary file
///
/// Example:
/// ```
/// use std::fs::File;
/// use std::io::Read;
///
/// let zkey_path = "./resources/tree_height_20/rln_final.zkey";
///
/// let mut file = File::open(zkey_path).expect("Failed to open file");
/// let metadata = std::fs::metadata(zkey_path).expect("Failed to read metadata");
/// let mut zkey_vec = vec![0; metadata.len() as usize];
/// file.read_exact(&mut zkey_vec).expect("Failed to read file");
///
/// let mut rln = RLN::new_with_params(zkey_vec)?;
/// ```
#[cfg(all(target_arch = "wasm32", feature = "stateless"))]
pub fn new_with_params(zkey_vec: Vec<u8>) -> Result<RLN> {
let proving_key = zkey_from_raw(&zkey_vec)?;
let verification_key = proving_key.0.vk.to_owned();
Ok(RLN {
proving_key,
verification_key,
})
}
////////////////////////////////////////////////////////
// Merkle-tree APIs
////////////////////////////////////////////////////////
@@ -346,7 +385,7 @@ impl RLN {
// We set the leaves
self.tree
.override_range(index, leaves, [])
.override_range(index, leaves.into_iter(), [].into_iter())
.map_err(|_| Report::msg("Could not set leaves"))?;
Ok(())
}
@@ -429,7 +468,7 @@ impl RLN {
// We set the leaves
self.tree
.override_range(index, leaves, indices)
.override_range(index, leaves.into_iter(), indices.into_iter())
.map_err(|e| Report::msg(format!("Could not perform the batch operation: {e}")))?;
Ok(())
}
@@ -698,15 +737,16 @@ impl RLN {
/// rln.prove(&mut input_buffer, &mut output_buffer).unwrap();
/// let zk_proof = output_buffer.into_inner();
/// ```
#[cfg(not(target_arch = "wasm32"))]
pub fn prove<R: Read, W: Write>(
&mut self,
mut input_data: R,
mut output_data: W,
) -> Result<()> {
// We read input RLN witness and we serialize_compressed it
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (rln_witness, _) = deserialize_witness(&serialized)?;
let mut serialized_witness: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized_witness)?;
let (rln_witness, _) = deserialize_witness(&serialized_witness)?;
let proof = generate_proof(&self.proving_key, &rln_witness, &self.graph_data)?;
@@ -818,7 +858,7 @@ impl RLN {
/// // proof_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32>]
/// let mut proof_data = output_buffer.into_inner();
/// ```
#[cfg(not(feature = "stateless"))]
#[cfg(all(not(target_arch = "wasm32"), not(feature = "stateless")))]
pub fn generate_rln_proof<R: Read, W: Write>(
&mut self,
mut input_data: R,
@@ -840,18 +880,18 @@ impl RLN {
Ok(())
}
// Generate RLN Proof using a witness calculated from outside zerokit
//
// output_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32>]
// we skip it from documentation for now
/// Generate RLN Proof using a witness calculated from outside zerokit
///
/// output_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32>]
#[cfg(not(target_arch = "wasm32"))]
pub fn generate_rln_proof_with_witness<R: Read, W: Write>(
&mut self,
mut input_data: R,
mut output_data: W,
) -> Result<()> {
let mut witness_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut witness_byte)?;
let (rln_witness, _) = deserialize_witness(&witness_byte)?;
let mut serialized_witness: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized_witness)?;
let (rln_witness, _) = deserialize_witness(&serialized_witness)?;
let proof_values = proof_values_from_witness(&rln_witness)?;
let proof = generate_proof(&self.proving_key, &rln_witness, &self.graph_data)?;
@@ -863,6 +903,28 @@ impl RLN {
Ok(())
}
/// Generate RLN Proof using a witness calculated from outside zerokit
///
/// output_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32>]
#[cfg(target_arch = "wasm32")]
pub fn generate_rln_proof_with_witness<W: Write>(
&mut self,
calculated_witness: Vec<BigInt>,
serialized_witness: Vec<u8>,
mut output_data: W,
) -> Result<()> {
let (rln_witness, _) = deserialize_witness(&serialized_witness[..])?;
let proof_values = proof_values_from_witness(&rln_witness)?;
let proof = generate_proof_with_witness(calculated_witness, &self.proving_key).unwrap();
// Note: we export a serialization of ark-groth16::Proof not semaphore::Proof
// This proof is compressed, i.e. 128 bytes long
proof.serialize_compressed(&mut output_data)?;
output_data.write_all(&serialize_proof_values(&proof_values))?;
Ok(())
}
/// Verifies a zkSNARK RLN proof against the provided proof values and the state of the internal Merkle tree.
///
/// Input values are:
@@ -1301,6 +1363,7 @@ impl RLN {
}
}
#[cfg(not(target_arch = "wasm32"))]
impl Default for RLN {
fn default() -> Self {
#[cfg(not(feature = "stateless"))]

View File

@@ -486,7 +486,6 @@ mod tree_test {
assert_eq!(received_leaf, Fr::from(0));
}
#[allow(unused_must_use)]
#[test]
// This test checks if `set_leaves_from` throws an error when the index is out of bounds
fn test_set_leaves_bad_index() {
@@ -511,6 +510,8 @@ mod tree_test {
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
#[allow(unused_must_use)]
rln.set_leaves_from(bad_index, &mut buffer)
.expect_err("Should throw an error");

View File

@@ -136,9 +136,15 @@ pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>> {
}
}
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
/// On 32-bit systems, the result is zero-padded to 8 bytes.
/// On 64-bit systems, it directly represents the `usize` value.
#[inline(always)]
pub fn normalize_usize(input: usize) -> [u8; 8] {
input.to_le_bytes()
let mut bytes = [0u8; 8];
let input_bytes = input.to_le_bytes();
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
bytes
}
#[inline(always)] // using for test

View File

@@ -156,6 +156,7 @@ mod test {
// random number between 0..no_of_leaves
let mut rng = thread_rng();
let set_index = rng.gen_range(0..NO_OF_LEAVES) as usize;
println!("set_index: {}", set_index);
// We add leaves in a batch into the tree
set_leaves_init(rln_pointer, &leaves);
@@ -176,7 +177,10 @@ mod test {
// We get the root of the tree obtained adding leaves in batch
let root_batch_with_custom_index = get_tree_root(rln_pointer);
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
assert_eq!(
root_batch_with_init, root_batch_with_custom_index,
"root batch !="
);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
@@ -192,7 +196,10 @@ mod test {
// We get the root of the tree obtained adding leaves using the internal index
let root_single_additions = get_tree_root(rln_pointer);
assert_eq!(root_batch_with_init, root_single_additions);
assert_eq!(
root_batch_with_init, root_single_additions,
"root single additions !="
);
}
#[test]

View File

@@ -23,6 +23,7 @@ mod test {
assert_eq!(proof.leaf_index(), i);
tree_opt.set(i, leaves[i]).unwrap();
assert_eq!(tree_opt.root(), tree_full.root());
let proof = tree_opt.proof(i).expect("index should be set");
assert_eq!(proof.leaf_index(), i);
}
@@ -37,11 +38,11 @@ mod test {
#[test]
fn test_subtree_root() {
const DEPTH: usize = 3;
const LEAVES_LEN: usize = 6;
const LEAVES_LEN: usize = 8;
let mut tree = PoseidonTree::default(DEPTH).unwrap();
let leaves: Vec<Fr> = (0..LEAVES_LEN).map(|s| Fr::from(s as i32)).collect();
let _ = tree.set_range(0, leaves);
let _ = tree.set_range(0, leaves.into_iter());
for i in 0..LEAVES_LEN {
// check leaves
@@ -78,7 +79,7 @@ mod test {
let leaves: Vec<Fr> = (0..nof_leaves).map(|s| Fr::from(s as i32)).collect();
// check set_range
let _ = tree.set_range(0, leaves.clone());
let _ = tree.set_range(0, leaves.clone().into_iter());
assert!(tree.get_empty_leaves_indices().is_empty());
let mut vec_idxs = Vec::new();
@@ -98,26 +99,28 @@ mod test {
// check remove_indices_and_set_leaves inside override_range function
assert!(tree.get_empty_leaves_indices().is_empty());
let leaves_2: Vec<Fr> = (0..2).map(|s| Fr::from(s as i32)).collect();
tree.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
tree.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
// check remove_indices inside override_range function
tree.override_range(0, [], [0, 1]).unwrap();
tree.override_range(0, [].into_iter(), [0, 1].into_iter())
.unwrap();
assert_eq!(tree.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
// check set_range inside override_range function
tree.override_range(0, leaves_2.clone(), []).unwrap();
tree.override_range(0, leaves_2.clone().into_iter(), [].into_iter())
.unwrap();
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
let leaves_4: Vec<Fr> = (0..4).map(|s| Fr::from(s as i32)).collect();
// check if the indexes for write and delete are the same
tree.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
tree.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert!(tree.get_empty_leaves_indices().is_empty());
// check if indexes for deletion are before indexes for overwriting
tree.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
tree.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
// The result will be like this, because in the set_range function in pmtree
// the next_index value is increased not by the number of elements to insert,
@@ -128,7 +131,7 @@ mod test {
);
// check if the indices for write and delete do not overlap completely
tree.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
tree.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
// The result will be like this, because in the set_range function in pmtree
// the next_index value is increased not by the number of elements to insert,

View File

@@ -12,29 +12,34 @@ repository = "https://github.com/vacp2p/zerokit"
bench = false
[dependencies]
ark-ff = { version = "0.5.0", features = ["asm"] }
ark-ff = { version = "0.5.0", default-features = false, features = [
"parallel",
] }
num-bigint = { version = "0.4.6", default-features = false, features = [
"rand",
] }
color-eyre = "0.6.2"
pmtree = { package = "vacp2p_pmtree", version = "=2.0.2", optional = true }
color-eyre = "0.6.3"
pmtree = { package = "vacp2p_pmtree", version = "2.0.2", optional = true }
sled = "0.34.7"
serde = "1.0"
lazy_static = "1.4.0"
lazy_static = "1.5.0"
hex = "0.4"
[dev-dependencies]
ark-bn254 = { version = "0.5.0", features = ["std"] }
num-traits = "0.2.19"
hex-literal = "0.3.4"
hex-literal = "1.0.0"
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
criterion = { version = "0.4.0", features = ["html_reports"] }
[features]
default = ["parallel"]
parallel = ["ark-ff/parallel"]
default = []
pmtree-ft = ["pmtree"]
[[bench]]
name = "merkle_tree_benchmark"
harness = false
[[bench]]
name = "poseidon_benchmark"
harness = false

View File

@@ -8,4 +8,4 @@ args = ["test", "--release"]
[tasks.bench]
command = "cargo"
args = ["bench"]
args = ["bench"]

View File

@@ -20,6 +20,40 @@ The crate supports two interchangeable Merkle tree implementations:
- **OptimalMerkleTree**
- Only stores nodes used to prove accumulation of set leaves
### Implementation notes
Glossary:
* depth: level of leaves if we count from levels from 0
* number of levels: depth + 1
* capacity (== number of leaves) -- 1 << depth
* total number of nodes: 1 << (depth + 1)) - 1
So for instance:
* depth: 3
* number of levels: 4
* capacity (number of leaves): 8
* total number of nodes: 15
```mermaid
flowchart TD
A[Root] --> N1
A[Root] --> N2
N1 --> N3
N1 --> N4
N2 --> N5
N2 --> N6
N3 -->|Leaf| L1
N3 -->|Leaf| L2
N4 -->|Leaf| L3
N4 -->|Leaf| L4
N5 -->|Leaf| L5
N5 -->|Leaf| L6
N6 -->|Leaf| L7
N6 -->|Leaf| L8
```
## Poseidon Hash Implementation
This crate provides an implementation to compute the Poseidon hash round constants and MDS matrices:
@@ -56,13 +90,13 @@ zerokit-utils = "0.5.1"
```bash
# Build the crate
cargo build
cargo make build
# Run tests
cargo test
cargo make test
# Run benchmarks
cargo bench
cargo make bench
```
To view the results of the benchmark, open the `target/criterion/report/index.html` file generated after the bench

View File

@@ -75,7 +75,8 @@ pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
c.bench_function("OptimalMerkleTree::override_range", |b| {
b.iter(|| {
tree.override_range(0, *LEAVES, [0, 1, 2, 3]).unwrap();
tree.override_range(0, LEAVES.into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
})
});
@@ -123,7 +124,8 @@ pub fn full_merkle_tree_benchmark(c: &mut Criterion) {
c.bench_function("FullMerkleTree::override_range", |b| {
b.iter(|| {
tree.override_range(0, *LEAVES, [0, 1, 2, 3]).unwrap();
tree.override_range(0, LEAVES.into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
})
});

View File

@@ -0,0 +1,65 @@
use ark_bn254::Fr;
use criterion::{
black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput,
};
use zerokit_utils::Poseidon;
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
(2, 8, 56, 0),
(3, 8, 57, 0),
(4, 8, 56, 0),
(5, 8, 60, 0),
(6, 8, 60, 0),
(7, 8, 63, 0),
(8, 8, 64, 0),
(9, 8, 63, 0),
];
pub fn poseidon_benchmark(c: &mut Criterion) {
let hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let mut group = c.benchmark_group("poseidon Fr");
for size in [10u32, 100, 1000].iter() {
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("Array hash", size), size, |b, &size| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let mut values = Vec::with_capacity(size as usize);
for i in 0..size {
values.push([Fr::from(i)]);
}
values
},
// Actual benchmark
|values| {
for v in values.iter() {
let _ = hasher.hash(black_box(&v[..]));
}
},
BatchSize::SmallInput,
)
});
}
// Benchmark single hash operation separately
group.bench_function("Single hash", |b| {
let input = [Fr::from(u64::MAX)];
b.iter(|| {
let _ = hasher.hash(black_box(&input[..]));
})
});
group.finish();
}
criterion_group! {
name = benches;
config = Criterion::default()
.warm_up_time(std::time::Duration::from_millis(500))
.measurement_time(std::time::Duration::from_secs(4))
.sample_size(20);
targets = poseidon_benchmark
}
criterion_main!(benches);

View File

@@ -3,7 +3,7 @@ use color_eyre::{Report, Result};
use std::{
cmp::max,
fmt::Debug,
iter::{once, repeat, successors},
iter::{once, repeat_n, successors},
str::FromStr,
};
@@ -89,7 +89,7 @@ where
.iter()
.rev()
.enumerate()
.flat_map(|(levels, hash)| repeat(hash).take(1 << levels))
.flat_map(|(levels, hash)| repeat_n(hash, 1 << levels))
.cloned()
.collect::<Vec<_>>();
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
@@ -125,7 +125,6 @@ where
self.next_index
}
#[must_use]
// Returns the root of the tree
fn root(&self) -> FrOf<Self::Hasher> {
self.nodes[0]
@@ -237,7 +236,7 @@ where
self.cached_leaves_indices[i] = 0;
}
self.set_range(start, set_values)
self.set_range(start, set_values.into_iter())
.map_err(|e| Report::msg(e.to_string()))
}
@@ -341,14 +340,12 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
type Index = u8;
type Hasher = H;
#[must_use]
// Returns the length of a Merkle proof
fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
fn leaf_index(&self) -> usize {
self.0.iter().rev().fold(0, |index, branch| match branch {
FullMerkleBranch::Left(_) => index << 1,
@@ -356,7 +353,6 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
})
}
#[must_use]
/// Returns the path elements forming a Merkle proof
fn get_path_elements(&self) -> Vec<FrOf<Self::Hasher>> {
self.0
@@ -368,7 +364,6 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
fn get_path_index(&self) -> Vec<Self::Index> {
self.0
.iter()
@@ -380,7 +375,6 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
}
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
#[must_use]
fn compute_root_from(&self, hash: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher> {
self.0.iter().fold(*hash, |hash, branch| match branch {
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),

View File

@@ -54,13 +54,13 @@ pub trait ZerokitMerkleTree {
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()>;
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<()>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>;
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>;
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>>;
fn get_empty_leaves_indices(&self) -> Vec<usize>;
fn override_range<I, J>(&mut self, start: usize, leaves: I, to_remove_indices: J) -> Result<()>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>,
J: IntoIterator<Item = usize>;
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
J: ExactSizeIterator<Item = usize>;
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()>;
fn delete(&mut self, index: usize) -> Result<()>;
fn proof(&self, index: usize) -> Result<Self::Proof>;

View File

@@ -1,10 +1,10 @@
use crate::merkle_tree::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
use crate::FrOf;
use color_eyre::{Report, Result};
use std::cmp::min;
use std::collections::HashMap;
use std::str::FromStr;
use std::{cmp::max, fmt::Debug};
////////////////////////////////////////////////////////////
///// Optimal Merkle Tree Implementation
////////////////////////////////////////////////////////////
@@ -81,9 +81,9 @@ where
}
cached_nodes.reverse();
Ok(OptimalMerkleTree {
cached_nodes: cached_nodes.clone(),
cached_nodes,
depth,
nodes: HashMap::new(),
nodes: HashMap::with_capacity(1 << depth),
cached_leaves_indices: vec![0; 1 << depth],
next_index: 0,
metadata: Vec::new(),
@@ -109,7 +109,6 @@ where
self.next_index
}
#[must_use]
// Returns the root of the tree
fn root(&self) -> H::Fr {
self.get_node(0, 0)
@@ -137,7 +136,7 @@ where
return Err(Report::msg("index exceeds set size"));
}
self.nodes.insert((self.depth, index), leaf);
self.recalculate_from(index)?;
self.update_hashes(index, 1)?;
self.next_index = max(self.next_index, index + 1);
self.cached_leaves_indices[index] = 1;
Ok(())
@@ -162,25 +161,29 @@ where
}
// Sets multiple leaves from the specified tree index
fn set_range<I: IntoIterator<Item = H::Fr>>(&mut self, start: usize, leaves: I) -> Result<()> {
let leaves = leaves.into_iter().collect::<Vec<_>>();
fn set_range<I: ExactSizeIterator<Item = H::Fr>>(
&mut self,
start: usize,
leaves: I,
) -> Result<()> {
// check if the range is valid
if start + leaves.len() > self.capacity() {
let leaves_len = leaves.len();
if start + leaves_len > self.capacity() {
return Err(Report::msg("provided range exceeds set size"));
}
for (i, leaf) in leaves.iter().enumerate() {
self.nodes.insert((self.depth, start + i), *leaf);
for (i, leaf) in leaves.enumerate() {
self.nodes.insert((self.depth, start + i), leaf);
self.cached_leaves_indices[start + i] = 1;
self.recalculate_from(start + i)?;
}
self.next_index = max(self.next_index, start + leaves.len());
self.update_hashes(start, leaves_len)?;
self.next_index = max(self.next_index, start + leaves_len);
Ok(())
}
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>,
J: IntoIterator<Item = usize>,
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
J: ExactSizeIterator<Item = usize>,
{
let indices = indices.into_iter().collect::<Vec<_>>();
let min_index = *indices.first().unwrap();
@@ -205,7 +208,7 @@ where
self.cached_leaves_indices[i] = 0;
}
self.set_range(start, set_values)
self.set_range(start, set_values.into_iter())
.map_err(|e| Report::msg(e.to_string()))
}
@@ -317,6 +320,60 @@ where
}
Ok(())
}
/// Update hashes after some leaves have been set or updated
/// index - first leaf index (which has been set or updated)
/// length - number of elements set or updated
fn update_hashes(&mut self, index: usize, length: usize) -> Result<()> {
// parent depth & index (used to store in the tree)
let mut parent_depth = self.depth - 1; // tree depth (or leaves depth) - 1
let mut parent_index = index >> 1;
let mut parent_index_bak = parent_index;
// maximum index at this depth
let parent_max_index_0 = (1 << parent_depth) / 2;
// Based on given length (number of elements we will update)
// we could restrict the parent_max_index
let current_index_max = if (index + length) % 2 == 0 {
index + length + 2
} else {
index + length + 1
};
let mut parent_max_index = min(current_index_max >> 1, parent_max_index_0);
// current depth & index (used to compute the hash)
// current depth initially == tree depth (or leaves depth)
let mut current_depth = self.depth;
let mut current_index = if index % 2 == 0 { index } else { index - 1 };
let mut current_index_bak = current_index;
loop {
// Hash 2 values at (current depth, current_index) & (current_depth, current_index + 1)
let n_hash = self.hash_couple(current_depth, current_index);
// Insert this hash at (parent_depth, parent_index)
self.nodes.insert((parent_depth, parent_index), n_hash);
if parent_depth == 0 {
// We just set the root hash of the tree - nothing to do anymore
break;
}
// Incr parent index
parent_index += 1;
// Incr current index (+2 because we've just hashed current index & current_index + 1)
current_index += 2;
if parent_index >= parent_max_index {
// reset (aka decr depth & reset indexes)
parent_depth -= 1;
parent_index = parent_index_bak >> 1;
parent_index_bak = parent_index;
parent_max_index >>= 1;
current_depth -= 1;
current_index = current_index_bak >> 1;
current_index_bak = current_index;
}
}
Ok(())
}
}
impl<H: Hasher> ZerokitMerkleProof for OptimalMerkleProof<H>
@@ -326,14 +383,12 @@ where
type Index = u8;
type Hasher = H;
#[must_use]
// Returns the length of a Merkle proof
fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
fn leaf_index(&self) -> usize {
// In current implementation the path indexes in a proof correspond to the binary representation of the leaf index
let mut binary_repr = self.get_path_index();
@@ -343,19 +398,16 @@ where
.fold(0, |acc, digit| (acc << 1) + usize::from(digit))
}
#[must_use]
/// Returns the path elements forming a Merkle proof
fn get_path_elements(&self) -> Vec<H::Fr> {
self.0.iter().map(|x| x.0).collect()
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
fn get_path_index(&self) -> Vec<u8> {
self.0.iter().map(|x| x.1).collect()
}
#[must_use]
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
let mut acc: H::Fr = *leaf;

View File

@@ -9,8 +9,6 @@
// The following implementation was adapted from https://github.com/arkworks-rs/sponge/blob/7d9b3a474c9ddb62890014aeaefcb142ac2b3776/src/poseidon/grain_lfsr.rs
#![allow(dead_code)]
use ark_ff::PrimeField;
use num_bigint::BigUint;
@@ -20,7 +18,6 @@ pub struct PoseidonGrainLFSR {
pub head: usize,
}
#[allow(unused_variables)]
impl PoseidonGrainLFSR {
pub fn new(
is_field: u64,

View File

@@ -7,7 +7,7 @@ use crate::poseidon_constants::find_poseidon_ark_and_mds;
use ark_ff::PrimeField;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RoundParamenters<F: PrimeField> {
pub struct RoundParameters<F: PrimeField> {
pub t: usize,
pub n_rounds_f: usize,
pub n_rounds_p: usize,
@@ -17,16 +17,16 @@ pub struct RoundParamenters<F: PrimeField> {
}
pub struct Poseidon<F: PrimeField> {
round_params: Vec<RoundParamenters<F>>,
round_params: Vec<RoundParameters<F>>,
}
impl<F: PrimeField> Poseidon<F> {
// Loads round parameters and generates round constants
// poseidon_params is a vector containing tuples (t, RF, RP, skip_matrices)
// where: t is the rate (input lenght + 1), RF is the number of full rounds, RP is the number of partial rounds
// where: t is the rate (input length + 1), RF is the number of full rounds, RP is the number of partial rounds
// and skip_matrices is a (temporary) parameter used to generate secure MDS matrices (see comments in the description of find_poseidon_ark_and_mds)
// TODO: implement automatic generation of round parameters
pub fn from(poseidon_params: &[(usize, usize, usize, usize)]) -> Self {
let mut read_params = Vec::<RoundParamenters<F>>::new();
let mut read_params = Vec::<RoundParameters<F>>::with_capacity(poseidon_params.len());
for &(t, n_rounds_f, n_rounds_p, skip_matrices) in poseidon_params {
let (ark, mds) = find_poseidon_ark_and_mds::<F>(
@@ -38,7 +38,7 @@ impl<F: PrimeField> Poseidon<F> {
n_rounds_p as u64,
skip_matrices,
);
let rp = RoundParamenters {
let rp = RoundParameters {
t,
n_rounds_p,
n_rounds_f,
@@ -54,24 +54,24 @@ impl<F: PrimeField> Poseidon<F> {
}
}
pub fn get_parameters(&self) -> Vec<RoundParamenters<F>> {
self.round_params.clone()
pub fn get_parameters(&self) -> &Vec<RoundParameters<F>> {
&self.round_params
}
pub fn ark(&self, state: &mut [F], c: &[F], it: usize) {
for i in 0..state.len() {
state[i] += c[it + i];
}
state.iter_mut().enumerate().for_each(|(i, elem)| {
*elem += c[it + i];
});
}
pub fn sbox(&self, n_rounds_f: usize, n_rounds_p: usize, state: &mut [F], i: usize) {
if (i < n_rounds_f / 2) || (i >= n_rounds_f / 2 + n_rounds_p) {
for current_state in &mut state.iter_mut() {
state.iter_mut().for_each(|current_state| {
let aux = *current_state;
*current_state *= *current_state;
*current_state *= *current_state;
*current_state *= aux;
}
})
} else {
let aux = state[0];
state[0] *= state[0];
@@ -80,21 +80,20 @@ impl<F: PrimeField> Poseidon<F> {
}
}
pub fn mix(&self, state: &[F], m: &[Vec<F>]) -> Vec<F> {
let mut new_state: Vec<F> = Vec::new();
pub fn mix_2(&self, state: &[F], m: &[Vec<F>], state_2: &mut [F]) {
for i in 0..state.len() {
new_state.push(F::zero());
for (j, state_item) in state.iter().enumerate() {
let mut mij = m[i][j];
mij *= state_item;
new_state[i] += mij;
// Cache the row reference
let row = &m[i];
let mut acc = F::ZERO;
for j in 0..state.len() {
acc += row[j] * state[j];
}
state_2[i] = acc;
}
new_state.clone()
}
pub fn hash(&self, inp: Vec<F>) -> Result<F, String> {
// Note that the rate t becomes input lenght + 1, hence for lenght N we pick parameters with T = N + 1
pub fn hash(&self, inp: &[F]) -> Result<F, String> {
// Note that the rate t becomes input length + 1; hence for length N we pick parameters with T = N + 1
let t = inp.len() + 1;
// We seek the index (Poseidon's round_params is an ordered vector) for the parameters corresponding to t
@@ -106,8 +105,9 @@ impl<F: PrimeField> Poseidon<F> {
let param_index = param_index.unwrap();
let mut state = vec![F::zero(); t];
state[1..].clone_from_slice(&inp);
let mut state = vec![F::ZERO; t];
let mut state_2 = state.clone();
state[1..].clone_from_slice(inp);
for i in 0..(self.round_params[param_index].n_rounds_f
+ self.round_params[param_index].n_rounds_p)
@@ -123,7 +123,8 @@ impl<F: PrimeField> Poseidon<F> {
&mut state,
i,
);
state = self.mix(&state, &self.round_params[param_index].m);
self.mix_2(&state, &self.round_params[param_index].m, &mut state_2);
std::mem::swap(&mut state, &mut state_2);
}
Ok(state[0])

View File

@@ -107,7 +107,7 @@ pub mod test {
let leaves_4: Vec<TestFr> = (0u32..4).map(TestFr::from).collect();
let mut tree_full = default_full_merkle_tree(depth);
let _ = tree_full.set_range(0, leaves.clone());
let _ = tree_full.set_range(0, leaves.clone().into_iter());
assert!(tree_full.get_empty_leaves_indices().is_empty());
let mut vec_idxs = Vec::new();
@@ -125,31 +125,31 @@ pub mod test {
// Check situation when the number of items to insert is less than the number of items to delete
tree_full
.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
// check if the indexes for write and delete are the same
tree_full
.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree_full.get_empty_leaves_indices(), vec![]);
// check if indexes for deletion are before indexes for overwriting
tree_full
.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
// check if the indices for write and delete do not overlap completely
tree_full
.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1]);
//// Optimal Merkle Tree Trest
let mut tree_opt = default_optimal_merkle_tree(depth);
let _ = tree_opt.set_range(0, leaves.clone());
let _ = tree_opt.set_range(0, leaves.clone().into_iter());
assert!(tree_opt.get_empty_leaves_indices().is_empty());
let mut vec_idxs = Vec::new();
@@ -166,24 +166,24 @@ pub mod test {
// Check situation when the number of items to insert is less than the number of items to delete
tree_opt
.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
// check if the indexes for write and delete are the same
tree_opt
.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![]);
// check if indexes for deletion are before indexes for overwriting
tree_opt
.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
// check if the indices for write and delete do not overlap completely
tree_opt
.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1]);
}
@@ -191,7 +191,7 @@ pub mod test {
#[test]
fn test_subtree_root() {
let depth = 3;
let nof_leaves: usize = 6;
let nof_leaves: usize = 4;
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
let mut tree_full = default_optimal_merkle_tree(depth);

View File

@@ -3530,7 +3530,8 @@ mod test {
{
// We check if the round constants and matrices correspond to the one generated when instantiating Poseidon with ROUND_PARAMS
let (loaded_c, loaded_m) = load_constants();
let poseidon_parameters = Poseidon::<Fr>::from(&ROUND_PARAMS).get_parameters();
let poseidon_hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let poseidon_parameters = poseidon_hasher.get_parameters();
for i in 0..poseidon_parameters.len() {
assert_eq!(loaded_c[i], poseidon_parameters[i].c);
assert_eq!(loaded_m[i], poseidon_parameters[i].m);

View File

@@ -0,0 +1,131 @@
#[cfg(test)]
mod test {
use ark_bn254::Fr;
use ark_ff::{AdditiveGroup, Field};
use std::collections::HashMap;
use std::str::FromStr;
use zerokit_utils::poseidon_hash::Poseidon;
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
(2, 8, 56, 0),
(3, 8, 57, 0),
(4, 8, 56, 0),
(5, 8, 60, 0),
(6, 8, 60, 0),
(7, 8, 63, 0),
(8, 8, 64, 0),
(9, 8, 63, 0),
];
#[test]
fn test_poseidon_hash_basic() {
let map = HashMap::from([
(
Fr::ZERO,
Fr::from_str(
"19014214495641488759237505126948346942972912379615652741039992445865937985820",
)
.unwrap(),
),
(
Fr::ONE,
Fr::from_str(
"18586133768512220936620570745912940619677854269274689475585506675881198879027",
)
.unwrap(),
),
(
Fr::from(255),
Fr::from_str(
"20026131459732984724454933360292530547665726761019872861025481903072111625788",
)
.unwrap(),
),
(
Fr::from(u16::MAX),
Fr::from_str(
"12358868638722666642632413418981275677998688723398440898957566982787708451243",
)
.unwrap(),
),
(
Fr::from(u64::MAX),
Fr::from_str(
"17449307747295017006142981453320720946812828330895590310359634430146721583189",
)
.unwrap(),
),
]);
// map (key: what to hash, value: expected value)
for (k, v) in map.into_iter() {
let hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let h = hasher.hash(&[k]);
assert_eq!(h.unwrap(), v);
}
}
#[test]
fn test_poseidon_hash_multi() {
// All hashes done in a merkle tree (with leaves: [0, 1, 2, 3, 4, 5, 6, 7])
// ~ leaves
let fr_0 = Fr::ZERO;
let fr_1 = Fr::ONE;
let fr_2 = Fr::from(2);
let fr_3 = Fr::from(3);
let fr_4 = Fr::from(4);
let fr_5 = Fr::from(5);
let fr_6 = Fr::from(6);
let fr_7 = Fr::from(7);
let fr_0_1 = Fr::from_str(
"12583541437132735734108669866114103169564651237895298778035846191048104863326",
)
.unwrap();
let fr_2_3 = Fr::from_str(
"17197790661637433027297685226742709599380837544520340689137581733613433332983",
)
.unwrap();
let fr_4_5 = Fr::from_str(
"756592041685769348226045093946546956867261766023639881791475046640232555043",
)
.unwrap();
let fr_6_7 = Fr::from_str(
"5558359459771725727593826278265342308584225092343962757289948761260561575479",
)
.unwrap();
let fr_0_3 = Fr::from_str(
"3720616653028013822312861221679392249031832781774563366107458835261883914924",
)
.unwrap();
let fr_4_7 = Fr::from_str(
"7960741062684589801276390367952372418815534638314682948141519164356522829957",
)
.unwrap();
// ~ root
let fr_0_7 = Fr::from_str(
"11780650233517635876913804110234352847867393797952240856403268682492028497284",
)
.unwrap();
// map (key: what to hash, value: expected value)
let map = HashMap::from([
((fr_0, fr_1), fr_0_1),
((fr_2, fr_3), fr_2_3),
((fr_4, fr_5), fr_4_5),
((fr_6, fr_7), fr_6_7),
((fr_0_1, fr_2_3), fr_0_3),
((fr_4_5, fr_6_7), fr_4_7),
((fr_0_3, fr_4_7), fr_0_7),
]);
for (k, v) in map.into_iter() {
let hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let h = hasher.hash(&[k.0, k.1]);
assert_eq!(h.unwrap(), v);
}
}
}