Compare commits

...

40 Commits

Author SHA1 Message Date
vinhtc27
9cb4ff7571 refactor: update tree depth to 10 and adjust resource paths accordingly 2025-09-19 19:49:01 +07:00
vinhtc27
b5afb847f1 refactor: update tree depth to 30 and adjust resource paths accordingly 2025-09-19 19:39:17 +07:00
vinhtc27
a7d58926e4 chore: update Cargo.lock 2025-09-17 20:08:21 +07:00
vinhtc27
e160ac2524 Merge remote-tracking branch 'origin/master' into fix-cross-build-and-update-deps 2025-09-17 20:02:00 +07:00
0xc1c4da
eb8eedfdb4 Allow flake to be consumed, and nix build .#rln (#340)
I had been trying to consume zerokit (specifically rln on x86_64), to
build libwaku (nwaku) and was having issues, this PR at least allows a
build to occur.

```bash
$ nix flake show github:vacp2p/zerokit
error: syntax error, unexpected '=', expecting ';'
       at «github:vacp2p/zerokit/0b00c639a059a2cfde74bcf68fdf75db3b6898a4»/flake.nix:36:25:
           35|
           36|         rln-linux-arm64 = buildRln {
             |                         ^
           37|           target-platform = "aarch64-multiplatform";
```

`Cargo.lock` is required in repo for this to be possible, otherwise:
```bash
$ nix build .#rln --show-trace
warning: Git tree '/home/j/experiments/zerokit' is dirty
error:
       … while calling the 'derivationStrict' builtin
         at <nix/derivation-internal.nix>:37:12:
           36|
           37|   strict = derivationStrict drvAttrs;
             |            ^
           38|

       … while evaluating derivation 'zerokit-nightly'
         whose name attribute is located at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/stdenv/generic/make-derivation.nix:336:7

       … while evaluating attribute 'cargoDeps' of derivation 'zerokit-nightly'
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/build-rust-package/default.nix:157:5:
          156|   // {
          157|     cargoDeps = cargoDeps';
             |     ^
          158|     inherit buildAndTestSubdir;

       … while calling the 'getAttr' builtin
         at <nix/derivation-internal.nix>:50:17:
           49|     value = commonAttrs // {
           50|       outPath = builtins.getAttr outputName strict;
             |                 ^
           51|       drvPath = strict.drvPath;

       … while calling the 'derivationStrict' builtin
         at <nix/derivation-internal.nix>:37:12:
           36|
           37|   strict = derivationStrict drvAttrs;
             |            ^
           38|

       … while evaluating derivation 'cargo-vendor-dir'
         whose name attribute is located at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/stdenv/generic/make-derivation.nix:336:7

       … while evaluating attribute 'buildCommand' of derivation 'cargo-vendor-dir'
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/trivial-builders/default.nix:59:17:
           58|         enableParallelBuilding = true;
           59|         inherit buildCommand name;
             |                 ^
           60|         passAsFile = [ "buildCommand" ]

       … while calling the 'toString' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:264:20:
          263|
          264|     for crate in ${toString depCrates}; do
             |                    ^
          265|       # Link the crate directory, removing the output path hash from the destination.

       … while calling the 'deepSeq' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:68:15:
           67|   # being evaluated otherwise, since there could be no git dependencies.
           68|   depCrates = builtins.deepSeq gitShaOutputHash (builtins.map mkCrate depPackages);
             |               ^
           69|

       … while calling the 'map' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:68:50:
           67|   # being evaluated otherwise, since there could be no git dependencies.
           68|   depCrates = builtins.deepSeq gitShaOutputHash (builtins.map mkCrate depPackages);
             |                                                  ^
           69|

       … while calling the 'filter' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:61:17:
           60|   # safely skip it.
           61|   depPackages = builtins.filter (p: p ? "source") packages;
             |                 ^
           62|

       … while calling the 'fromTOML' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:50:20:
           49|
           50|   parsedLockFile = builtins.fromTOML lockFileContents;
             |                    ^
           51|

       … while evaluating the argument passed to builtins.fromTOML

       … while calling the 'readFile' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:47:10:
           46|     if lockFile != null
           47|     then builtins.readFile lockFile
             |          ^
           48|     else args.lockFileContents;

       error: opening file '/nix/store/qh8gf0sl8znhnjwc1ksif7pwik26dsyd-source/Cargo.lock': No such file or directory
```

The PR allows for a successful build:
```bash
$ ls -R result
result:
target

result/target:
release

result/target/release:
librln.a  librln.d  librln.rlib  librln.so
```

---------

Co-authored-by: Jarrad Hope <jarrad@logos.co>
Co-authored-by: Vinh Trịnh <108657096+vinhtc27@users.noreply.github.com>
2025-09-17 14:57:06 +02:00
vinhtc27
c78f1f1534 chore: remove duplicate feature flag 2025-09-16 15:08:38 +07:00
Vinh Trịnh
4d62a4d60d Merge branch 'master' into fix-cross-build-and-update-deps 2025-09-16 14:57:46 +07:00
Vinh Trịnh
57b694db5d chore(rln-wasm): remove wasm-bindgen-cli installation (#341)
Currently, the new wasm-bindgen-cli version [causes CI to
fail](https://github.com/vacp2p/zerokit/actions/runs/17699917161/job/50313998747),
and it isn't needed for the parallel feature anymore.
So it's better to remove it from the codebase.
2025-09-16 14:55:18 +07:00
vinhtc27
fd568c17b3 chore(rln-wasm-utils): update wasm-bindgen-test version 2025-09-14 16:31:56 +07:00
vinhtc27
cf845c6a74 docs: update contributing guidelines to include rln-wasm-utils and improve clippy command 2025-09-14 16:24:01 +07:00
vinhtc27
fe566b3314 fix: exlude rln-cli, adjust pmtree-ft feature flags to avoid feature config when build --no-default-features --features optimalmerkletree | fullmerkletree 2025-09-14 16:16:59 +07:00
Vinh Trịnh
0b00c639a0 feat(rln): improve the PmTreeConfig initialization process with builder pattern (#334) 2025-09-03 18:54:08 +07:00
Vinh Trịnh
7c801a804e chore: remove cmake due to CI error and skip tests and benchmarks on draft pull requests (#339) 2025-09-03 15:56:09 +07:00
Joe Wanga
9da80dd807 docs: add comprehensive CONTRIBUTING.md with contributing guidelines (#331)
## Description
Adds a comprehensive CONTRIBUTING.md document that addresses all
requirements from the issue #309 .

---------

Co-authored-by: Ekaterina Broslavskaya <seemenkina@gmail.com>
2025-08-19 11:56:05 +03:00
Vinh Trịnh
bcbd6a97af chore: consistent naming and update docs for merkle trees (#333) 2025-08-18 21:37:28 +07:00
Ekaterina Broslavskaya
6965cf2852 feat(rln-wasm-utils): extracting identity generation and hash functions into a separate module (#332)
- separated all identity generation functions as separate functions,
rather than RLN methods
- added BE support - only for these functions so far
- covered the functions with tests, as well as conversion to big endian
- prepared for publication, but is actually awaiting the initial
publication of the RLN module

@vinhtc27, please check that everything is correct from the wasm point
of view. This module does not require parallel computing, so if there
are any unnecessary dependencies, builds, etc., please let me know.

---------

Co-authored-by: vinhtc27 <vinhtc27@gmail.com>
2025-07-31 16:05:46 +03:00
Vinh Trịnh
578e0507b3 feat: add wasm parallel testcase and simplify the witness_calculator.js (#328)
- Tested the parallel feature for rln-wasm on this branch:
https://github.com/vacp2p/zerokit/tree/benchmark-v0.9.0
- Simplified the test case by using the default generated
witness_calculator.js file for both Node and browser tests
- Added a WASM parallel test case using the latest wasm-bindgen-rayon
version 1.3.0
- [Successful CI
run](https://github.com/vacp2p/zerokit/actions/runs/16570298449) with
Cargo.lock is included, but it fails if ignored from the codebase.
- Requires publishing new pmtree version [on this
PR](https://github.com/vacp2p/pmtree/pull/4) before merging this branch.
2025-07-30 19:18:30 +07:00
Vinh Trịnh
bf1e184da9 feat: resolve overlap between stateless and merkletree feature flags (#329)
- Resolved overlap between stateless and merkletree feature flags.
- Updated every testcase related to stateless feature.
- Added compile-time feature check to avoid feature overlap.
- Added --no-default-features for all builds in nightly-release.yml
[(tested)](https://github.com/vacp2p/zerokit/actions/runs/16525062203).

---------

Co-authored-by: Ekaterina Broslavskaya <seemenkina@gmail.com>
2025-07-28 16:52:45 +07:00
Vinh Trịnh
4473688efa feat: support feature-specific binary generation and make arkzkey the default (#326)
- Integrated missing options for generating feature-specific binaries
[(tested)](https://github.com/vacp2p/zerokit/actions/runs/16408191766).
- Made arkzkey the default feature for improved consistency.
- Created a script to convert arkzkey from zkey.
- Updated nightly-release.yaml file.
- Updated documentation.
2025-07-28 15:11:41 +07:00
Vinh Trịnh
c80569d518 feat: restore parallel flag, improve CI, resolve clippy warnings, bump deps (#325) 2025-07-14 15:00:24 +07:00
Sydhds
fd99b6af74 Add pmtree delete function docstring (#324) 2025-07-10 08:25:10 +02:00
Sydhds
65f53e3da3 Initial impl for IdSecret (#320) 2025-07-08 09:48:04 +02:00
Vinh Trịnh
042f8a9739 feat: use stateless as default feature for rln in wasm module (#322) 2025-07-04 13:50:26 +07:00
Sydhds
baf474e747 Use Vec::with_capacity for bytes_le_to_vec_fr (#321) 2025-06-23 10:13:39 +02:00
Ekaterina Broslavskaya
dc0b31752c release v0.8.0 (#315) 2025-06-05 12:23:06 +03:00
Sydhds
36013bf4ba Remove not explicit use statement (#317) 2025-06-05 10:32:43 +02:00
Sydhds
211b2d4830 Add error for return type of compute_id_secret function (#316) 2025-06-04 09:00:27 +02:00
Sydhds
5f4bcb74ce Eyre removal 2 (#311)
Co-authored-by: Ekaterina Broslavskaya <seemenkina@gmail.com>
2025-06-02 10:32:13 +02:00
Jakub Sokołowski
de5fd36add nix: add RLN targets for different platforms
Wanted to be able to build `wakucanary` without having to build `zerokit` manually.
Also adds the `release` flag which can be set to `false` for a debug build.

Signed-off-by: Jakub Sokołowski <jakub@status.im>
2025-05-29 10:30:02 +02:00
Jakub Sokołowski
19c0f551c8 nix: use rust tooling from rust-overlay for builds
Noticed the builds in `nix/default.nix` were not using the tooling
from `rust-overlay` but instead using older one from `pkgs`.

This also removes the need to compile LLVM before building Zerokit.

Signed-off-by: Jakub Sokołowski <jakub@status.im>
2025-05-29 09:56:31 +02:00
vinhtc27
4133f1f8c3 fix: bumps deps, downgrade hex-literal to avoid Rust edition 2024 issue
Signed-off-by: Jakub Sokołowski <jakub@status.im>
2025-05-29 09:56:30 +02:00
markoburcul
149096f7a6 flake: add rust overlay and shell dependencies 2025-05-15 11:51:55 +02:00
Vinh Trịnh
7023e85fce Enable parallel execution for Merkle Tree (#306) 2025-05-14 12:19:37 +07:00
Vinh Trịnh
a4cafa6adc Enable parallel execution for rln-wasm module (#296)
## Changes

- Enabled parallelism in the browser for `rln-wasm` with the
`multithread` feature flag.
- Added browser tests for both single-threaded and multi-threaded modes.
- Enabled browser tests in the CI workflow.
- Pending: resolving hanging issue with `wasm-bindgen-rayon`
([comment](https://github.com/RReverser/wasm-bindgen-rayon/issues/6#issuecomment-2814372940)).
- Forked [this
commit](42887c80e6)
into a separate
[branch](https://github.com/vacp2p/zerokit/tree/benchmark-v0.8.0), which
includes an HTML benchmark file and a test case for the multithreaded
feature in `rln-wasm`.
- The test case still has the known issue above, so it's temporarily
disabled in this PR and will be addressed in the future.
- Improve the `make installdeps` which resolves the issue of NVM not
enabling Node.js in the current terminal session.
- Reduce the build size of the `.wasm` blob using the `wasm-opt` tool
from [Binaryen](https://github.com/WebAssembly/binaryen).
- Maybe we can close this draft
[PR](https://github.com/vacp2p/zerokit/pull/226), which is already very
outdated?
2025-05-13 13:15:05 +07:00
Sydhds
4077357e3f Add merkle tree glossary + mermaid graph example (#298) 2025-04-18 15:11:55 +02:00
Sydhds
84d9799d09 Add poseidon hash benchmark + optim (#300) 2025-04-18 15:08:43 +02:00
Sydhds
c576af8e62 fix(tree): fix OptimalMerkleTree set_range & override_range performance issue (#295) 2025-04-16 17:40:58 +02:00
Sydhds
81470b9678 Add poseidon hash unit test (against ref values) (#299) 2025-04-16 16:23:58 +02:00
Vinh Trịnh
9d4198c205 feat(rln-wasm): bring back wasm support for zerokit
# Bring Back WebAssembly Support for ZeroKit

- Update minor versions of all dependencies.
- Update documentation to reflect these changes.
- ~~Vendor `wasmer` v4.4.0 in [my git
repository](https://github.com/vinhtc27/wasmer) for `ark-circom`
v0.5.0.~~
- Resolve `wasm-pack` build failures (`os error 2`) caused by a Node.js
version mismatch.
- Restore the previous CI pipeline for the `rln-wasm` feature and update
to the stable toolchain.
- ~~Use `ark-circom` with the `wasm` feature for WebAssembly
compatibility and the `rln.wasm` file for witness calculation.~~
- ~~Fix dependency issues related to `ark-circom` v0.5.0, which
currently uses `wasmer` v4.4.0 and is affected by this
[issue](https://github.com/rust-lang/rust/issues/91632#issuecomment-1477914703).~~
- Install WABT with `brew` and `apt-get` instead of cloning to fix
`wasm-strip not found` issue in the CI workflow.
- Install `wasm-pack` with `curl` instead of using `wasm-pack-action` to
fix parse exception error in the CI workflow.
- Use the `.wasm` file with JS bindings for witness calculation, which
is generated from [`iden3/circom`](https://github.com/iden3/circom)
during circuit compilation. This allows witness computation outside RLN
instance.
- Refactor the `rln` module by moving circuit-related files to the
`src/circuit` folder for better organization.
- Remove `ark-circom` and `wasmer` by cloning the
[CircomReduction](3c95ed98e2/src/circom/qap.rs (L12))
struct and the
[read_zkey](3c95ed98e2/src/zkey.rs (L53))
function into the `rln` module, which reduces the repository's build
size and speeds up compilation time and the CI workflow duration.
- These change also address
[#282](https://github.com/vacp2p/zerokit/issues/282) by removing
`wasmer` and `wasmer-wasix`, which lack x32 system support.
- Benchmark `rln-wasm` with `wasm_bindgen_test`, covering RLN instance
creation, key generation, witness calculation, proving, and
verification. Also, add them to `v0.6.1` in
[benchmark-v0.6.1](https://github.com/vacp2p/zerokit/tree/benchmark-v0.6.1)
for comparison.
- Add `arkzkey` feature for rln-wasm, including tests, benchmarks, CI
workflow updates, and related documentation.
- Benchmark rln-wasm in the browser using HTML, covering initialization,
RLN instance creation, proving, and verification; fork to the
`benchmark-v0.7.0` branch for later use
[here](https://github.com/vacp2p/zerokit/tree/benchmark-v0.7.0).
- Fix clippy error: "this `repeat().take()` can be written more
concisely" on CI workflow for `utils` module.
([error](https://github.com/vacp2p/zerokit/actions/runs/14258579070/job/39965568013))
- Update Makefile.toml to be able to run `make build`, `make test`, and
`make bench` from root and inside each modules.
2025-04-08 13:37:18 +07:00
markoburcul
c60e0c33fc nix: add flake and derivation for android-arm64 arch
Referenced issue: https://github.com/waku-org/nwaku/issues/3232
2025-04-04 10:50:26 +02:00
94 changed files with 6895 additions and 6198 deletions

View File

@@ -5,69 +5,70 @@ on:
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
- "!rln-wasm/**"
- "!rln/src/**"
- "!rln/resources/**"
- "!utils/src/**"
- "!rln-wasm-utils/**"
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
- "!rln-wasm/**"
- "!rln/src/**"
- "!rln/resources/**"
- "!utils/src/**"
- "!rln-wasm-utils/**"
name: Tests
name: CI
jobs:
utils-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ ubuntu-latest, macos-latest ]
crate: [ utils ]
platform: [ubuntu-latest, macos-latest]
crate: [utils]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: test - ${{ matrix.crate }} - ${{ matrix.platform }}
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cargo-make test
- name: Test utils
run: |
cargo make test --release
working-directory: ${{ matrix.crate }}
rln-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ ubuntu-latest, macos-latest ]
crate: [ rln ]
feature: [ "default", "arkzkey", "stateless" ]
platform: [ubuntu-latest, macos-latest]
crate: [rln]
feature: ["default", "stateless"]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cargo-make test
- name: Test rln
run: |
if [ ${{ matrix.feature }} == default ]; then
cargo make test --release
@@ -76,54 +77,133 @@ jobs:
fi
working-directory: ${{ matrix.crate }}
rln-wasm-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
crate: [rln-wasm]
feature: ["default"]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: Build rln-wasm
run: cargo make build
working-directory: ${{ matrix.crate }}
- name: Test rln-wasm on node
run: cargo make test --release
working-directory: ${{ matrix.crate }}
- name: Test rln-wasm on browser
run: cargo make test_browser --release
working-directory: ${{ matrix.crate }}
rln-wasm-parallel-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
crate: [rln-wasm]
feature: ["parallel"]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- uses: actions/checkout@v4
- name: Install nightly toolchain
uses: dtolnay/rust-toolchain@nightly
with:
components: rust-src
targets: wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: Build rln-wasm in parallel mode
run: cargo make build_parallel
working-directory: ${{ matrix.crate }}
- name: Test rln-wasm in parallel mode on browser
run: cargo make test_parallel --release
working-directory: ${{ matrix.crate }}
rln-wasm-utils-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
crate: [rln-wasm-utils]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: Test rln-wasm-utils
run: cargo make test --release
working-directory: ${{ matrix.crate }}
lint:
# run on both ready and draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
# we run lint tests only on ubuntu
platform: [ ubuntu-latest ]
crate: [ rln, utils ]
platform: [ubuntu-latest]
crate: [rln, rln-wasm, rln-wasm-utils, utils]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: lint - ${{ matrix.crate }} - ${{ matrix.platform }}
name: Lint - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
override: true
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
- name: Install dependencies
run: make installdeps
- name: cargo fmt
- name: Check formatting
if: success() || failure()
run: cargo fmt -- --check
working-directory: ${{ matrix.crate }}
- name: cargo clippy
- name: Check clippy
if: success() || failure()
run: |
cargo clippy --release -- -D warnings
cargo clippy --all-targets --release -- -D warnings
working-directory: ${{ matrix.crate }}
benchmark-utils:
# run only in pull requests
if: github.event_name == 'pull_request'
# run only on ready pull requests
if: github.event_name == 'pull_request' && !github.event.pull_request.draft
strategy:
matrix:
# we run benchmark tests only on ubuntu
platform: [ ubuntu-latest ]
crate: [ utils ]
platform: [ubuntu-latest]
crate: [utils]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: benchmark - ${{ matrix.platform }} - ${{ matrix.crate }}
name: Benchmark - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
- uses: boa-dev/criterion-compare-action@v3
with:
@@ -131,24 +211,24 @@ jobs:
cwd: ${{ matrix.crate }}
benchmark-rln:
# run only in pull requests
if: github.event_name == 'pull_request'
# run only on ready pull requests
if: github.event_name == 'pull_request' && !github.event.pull_request.draft
strategy:
matrix:
# we run benchmark tests only on ubuntu
platform: [ ubuntu-latest ]
crate: [ rln ]
feature: [ "default", "arkzkey" ]
platform: [ubuntu-latest]
crate: [rln]
feature: ["default"]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: benchmark - ${{ matrix.platform }} - ${{ matrix.crate }} - ${{ matrix.feature }}
name: Benchmark - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- uses: Swatinem/rust-cache@v2
- uses: boa-dev/criterion-compare-action@v3
with:
branchName: ${{ github.base_ref }}
cwd: ${{ matrix.crate }}
features: ${{ matrix.feature }}
features: ${{ matrix.feature }}

View File

@@ -6,38 +6,38 @@ on:
jobs:
linux:
name: Linux build
runs-on: ubuntu-latest
strategy:
matrix:
feature: [ "default", "arkzkey", "stateless" ]
feature:
- "stateless"
- "stateless,parallel"
- "pmtree-ft"
- "pmtree-ft,parallel"
- "fullmerkletree"
- "fullmerkletree,parallel"
- "optimalmerkletree"
- "optimalmerkletree,parallel"
target:
- x86_64-unknown-linux-gnu
- aarch64-unknown-linux-gnu
# - i686-unknown-linux-gnu
include:
- feature: stateless
cargo_args: --exclude rln-cli
name: Linux build
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
override: true
target: ${{ matrix.target }}
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cross build
- name: Cross build
run: |
cross build --release --target ${{ matrix.target }} --features ${{ matrix.feature }} --workspace ${{ matrix.cargo_args }}
cross build --release --target ${{ matrix.target }} --no-default-features --features ${{ matrix.feature }} --workspace
mkdir release
cp target/${{ matrix.target }}/release/librln* release/
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
@@ -50,33 +50,34 @@ jobs:
runs-on: macos-latest
strategy:
matrix:
feature: [ "default", "arkzkey", "stateless" ]
feature:
- "stateless"
- "stateless,parallel"
- "pmtree-ft"
- "pmtree-ft,parallel"
- "fullmerkletree"
- "fullmerkletree,parallel"
- "optimalmerkletree"
- "optimalmerkletree,parallel"
target:
- x86_64-apple-darwin
- aarch64-apple-darwin
include:
- feature: stateless
cargo_args: --exclude rln-cli
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
override: true
target: ${{ matrix.target }}
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cross build
- name: Cross build
run: |
cross build --release --target ${{ matrix.target }} --features ${{ matrix.feature }} --workspace ${{ matrix.cargo_args }}
cross build --release --target ${{ matrix.target }} --no-default-features --features ${{ matrix.feature }} --workspace
mkdir release
cp target/${{ matrix.target }}/release/librln* release/
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
@@ -84,18 +85,120 @@ jobs:
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
retention-days: 2
rln-wasm:
name: Build rln-wasm
runs-on: ubuntu-latest
strategy:
matrix:
feature:
- "default"
- "parallel"
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-unknown
- name: Install nightly toolchain
uses: dtolnay/rust-toolchain@nightly
with:
targets: wasm32-unknown-unknown
components: rust-src
- uses: Swatinem/rust-cache@v2
with:
key: rln-wasm-${{ matrix.feature }}
- name: Install dependencies
run: make installdeps
- name: Install wasm-pack
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
- name: Install binaryen
run: |
sudo apt-get update
sudo apt-get install -y binaryen
- name: Build rln-wasm package
run: |
if [[ ${{ matrix.feature }} == *parallel* ]]; then
env RUSTFLAGS="-C target-feature=+atomics,+bulk-memory,+mutable-globals" \
rustup run nightly wasm-pack build --release --target web --scope waku \
--features ${{ matrix.feature }} -Z build-std=panic_abort,std
else
wasm-pack build --release --target web --scope waku
fi
sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak
wasm-opt pkg/rln_wasm_bg.wasm -Oz --strip-debug --strip-dwarf \
--remove-unused-module-elements --vacuum -o pkg/rln_wasm_bg.wasm
mkdir release
cp -r pkg/* release/
tar -czvf rln-wasm-${{ matrix.feature }}.tar.gz release/
working-directory: rln-wasm
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
name: rln-wasm-${{ matrix.feature }}-archive
path: rln-wasm/rln-wasm-${{ matrix.feature }}.tar.gz
retention-days: 2
rln-wasm-utils:
name: Build rln-wasm-utils
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-unknown
- name: Install nightly toolchain
uses: dtolnay/rust-toolchain@nightly
with:
targets: wasm32-unknown-unknown
components: rust-src
- uses: Swatinem/rust-cache@v2
with:
key: rln-wasm-utils
- name: Install dependencies
run: make installdeps
- name: Install wasm-pack
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
- name: Install binaryen
run: |
sudo apt-get update
sudo apt-get install -y binaryen
- name: Build rln-wasm-utils package
run: |
wasm-pack build --release --target web --scope waku
sed -i.bak 's/rln-wasm-utils/zerokit-rln-wasm-utils/g' pkg/package.json && rm pkg/package.json.bak
wasm-opt pkg/rln_wasm_utils_bg.wasm -Oz --strip-debug --strip-dwarf \
--remove-unused-module-elements --vacuum -o pkg/rln_wasm_utils_bg.wasm
mkdir release
cp -r pkg/* release/
tar -czvf rln-wasm-utils.tar.gz release/
working-directory: rln-wasm-utils
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
name: rln-wasm-utils-archive
path: rln-wasm-utils/rln-wasm-utils.tar.gz
retention-days: 2
prepare-prerelease:
name: Prepare pre-release
needs: [ linux, macos ]
needs: [linux, macos, rln-wasm, rln-wasm-utils]
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: master
- name: Download artifacts
uses: actions/download-artifact@v4
- name: Delete tag
uses: dev-drprasad/delete-tag-and-release@v0.2.1
with:
@@ -103,7 +206,6 @@ jobs:
tag_name: nightly
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create prerelease
run: |
start_tag=$(gh release list -L 2 --exclude-drafts | grep -v nightly | cut -d$'\t' -f3 | sed -n '1p')
@@ -115,7 +217,6 @@ jobs:
*-archive/*.tar.gz \
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Delete artifacts
uses: geekyeggo/delete-artifact@v5
with:

View File

@@ -9,7 +9,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: micnncim/action-label-syncer@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

14
.gitignore vendored
View File

@@ -1,15 +1,17 @@
# Common files to ignore in Rust projects
.DS_Store
.idea
*.log
tmp/
rln/pmtree_db
# Generated by Cargo will have compiled files and executables
/target
# Generated by rln-cli
rln-cli/database
# Generated by Cargo
# will have compiled files and executables
debug/
target/
wabt/
# Generated by Nix
result
# These are backup files generated by rustfmt
**/*.rs.bk

View File

@@ -1,3 +1,5 @@
# CHANGE LOG
## 2023-02-28 v0.2
This release contains:
@@ -10,7 +12,6 @@ This release contains:
- Dual License under Apache 2.0 and MIT
- RLN compiles as a static library, which can be consumed through a C FFI
## 2022-09-19 v0.1
Initial beta release.

205
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,205 @@
# Contributing to Zerokit
Thank you for your interest in contributing to Zerokit!
This guide will discuss how the Zerokit team handles [Commits](#commits),
[Pull Requests](#pull-requests) and [Merging](#merging).
**Note:** We won't force external contributors to follow this verbatim.
Following these guidelines definitely helps us in accepting your contributions.
## Getting Started
1. Fork the repository
2. Create a feature branch: `git checkout -b fix/your-bug-fix` or `git checkout -b feat/your-feature-name`
3. Make your changes following our guidelines
4. Ensure relevant tests pass (see [testing guidelines](#building-and-testing))
5. Commit your changes (signed commits are highly encouraged - see [commit guidelines](#commits))
6. Push and create a Pull Request
## Development Setup
### Prerequisites
Install the required dependencies:
```bash
make installdeps
```
Or use Nix:
```bash
nix develop
```
### Building and Testing
```bash
# Build all crates
make build
# Run standard tests
make test
# Module-specific testing
cd rln && cargo make test_stateless # Test stateless features
cd rln-wasm && cargo make test_browser # Test in browser headless mode
cd rln-wasm && cargo make test_parallel # Test parallel features
```
Choose the appropriate test commands based on your changes:
- Core RLN changes: `make test`
- Stateless features: `cargo make test_stateless`
- WASM/browser features: `cargo make test_browser`
- Parallel computation: `cargo make test_parallel`
### Tools
We recommend using the [markdownlint extension](https://marketplace.visualstudio.com/items?itemName=DavidAnson.vscode-markdownlint)
for VS Code to maintain consistent documentation formatting.
## Commits
We want to keep our commits small and focused.
This allows for easily reviewing individual commits and/or
splitting up pull requests when they grow too big.
Additionally, this allows us to merge smaller changes quicker and release more often.
**All commits must be GPG signed.**
This ensures the authenticity and integrity of contributions.
### Conventional Commits
When making the commit, write the commit message
following the [Conventional Commits (v1.0.0)](https://www.conventionalcommits.org/en/v1.0.0/) specification.
Following this convention allows us to provide an automated release process
that also generates a detailed Changelog.
As described by the specification, our commit messages should be written as:
```markdown
<type>[optional scope]: <description>
[optional body]
[optional footer(s)]
```
Some examples of this pattern include:
```markdown
feat(rln): add parallel witness calculation support
```
```markdown
fix(rln-wasm): resolve memory leak in browser threading
```
```markdown
docs: update RLN protocol flow documentation
```
#### Scopes
Use scopes to improve the Changelog:
- `rln` - Core RLN implementation
- `rln-cli` - Command-line interface
- `rln-wasm` - WebAssembly bindings
- `rln-wasm-utils` - WebAssembly utilities
- `utils` - Cryptographic utilities (Merkle trees, Poseidon hash)
- `ci` - Continuous integration
#### Breaking Changes
Mark breaking changes by adding `!` after the type:
```markdown
feat(rln)!: change proof generation API
```
## Pull Requests
Before creating a pull request, search for related issues.
If none exist, create an issue describing the problem you're solving.
### CI Flow
Our continuous integration automatically runs when you create a Pull Request:
- **Build verification**: All crates compile successfully
- **Test execution**: Comprehensive testing across all modules and feature combinations
- **Code formatting**: `cargo fmt` compliance
- **Linting**: `cargo clippy` checks
- **Cross-platform builds**: Testing on multiple platforms
Ensure the following commands pass before submitting:
```bash
# Format code
cargo fmt --all
# Check for common mistakes
cargo clippy --all-targets
# Run all tests
make test
```
### Adding Tests
Include tests for new functionality:
- **Unit tests** for specific functions
- **Integration tests** for broader functionality
- **WASM tests** for browser compatibility
### Typos and Small Changes
For minor fixes like typos, please report them as issues instead of opening PRs.
This helps us manage resources effectively and ensures meaningful contributions.
## Merging
We use "squash merging" for all pull requests.
This combines all commits into one commit, so keep pull requests small and focused.
### Requirements
- CI checks must pass
- At least one maintainer review and approval
- All review feedback addressed
### Squash Guidelines
When squashing, update the commit title to be a proper Conventional Commit and
include any other relevant commits in the body:
```markdown
feat(rln): implement parallel witness calculation (#123)
fix(tests): resolve memory leak in test suite
chore(ci): update rust toolchain version
```
## Roadmap Alignment
Please refer to our [project roadmap](https://roadmap.vac.dev/) for current development priorities.
Consider how your changes align with these strategic goals, when contributing.
## Getting Help
- **Issues**: Create a GitHub issue for bugs or feature requests
- **Discussions**: Use GitHub Discussions for questions
- **Documentation**: Check existing docs and unit tests for examples
## License
By contributing to Zerokit, you agree that your contributions will be licensed under both MIT and
Apache 2.0 licenses, consistent with the project's dual licensing.
## Additional Resources
- [Conventional Commits Guide](https://www.conventionalcommits.org/en/v1.0.0/)
- [Project GitHub Repository](https://github.com/vacp2p/zerokit)

4550
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[workspace]
members = ["rln", "rln-cli", "utils"]
default-members = ["rln", "rln-cli", "utils"]
members = ["rln", "utils"]
exclude = ["rln-cli", "rln-wasm", "rln-wasm-utils"]
resolver = "2"
# Compilation profile for any non-workspace member.

View File

@@ -1,6 +1,6 @@
.PHONY: all installdeps build test clean
.PHONY: all installdeps build test bench clean
all: .pre-build build
all: installdeps build
.fetch-submodules:
@git submodule update --init --recursive
@@ -13,26 +13,26 @@ endif
installdeps: .pre-build
ifeq ($(shell uname),Darwin)
# commented due to https://github.com/orgs/Homebrew/discussions/4612
# @brew update
@brew install cmake ninja
@git -C "wabt" pull || git clone --recursive https://github.com/WebAssembly/wabt.git "wabt"
@cd wabt && mkdir -p build && make
@brew install ninja binaryen
else ifeq ($(shell uname),Linux)
@sudo apt-get update
@sudo apt-get install -y cmake ninja-build
@git -C "wabt" pull || git clone --recursive https://github.com/WebAssembly/wabt.git "wabt"
@cd wabt && mkdir -p build && cd build && cmake .. -GNinja && ninja && sudo ninja install
@if [ -f /etc/os-release ] && grep -q "ID=nixos" /etc/os-release; then \
echo "Detected NixOS, skipping apt-get installation."; \
else \
sudo apt-get install -y cmake ninja-build binaryen; \
fi
endif
# nvm already checks if it's installed, and no-ops if it is
@curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
@. ${HOME}/.nvm/nvm.sh && nvm install 18.20.2 && nvm use 18.20.2;
@which wasm-pack > /dev/null && wasm-pack --version | grep -q "0.13.1" || cargo install wasm-pack --version=0.13.1
@test -s "$$HOME/.nvm/nvm.sh" || curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.2/install.sh | bash
@bash -c '. "$$HOME/.nvm/nvm.sh"; [ "$$(node -v 2>/dev/null)" = "v22.14.0" ] || nvm install 22.14.0; nvm use 22.14.0; nvm alias default 22.14.0'
build: .pre-build
build: installdeps
@cargo make build
test: .pre-build
test: build
@cargo make test
bench: build
@cargo make bench
clean:
@cargo clean

View File

@@ -12,7 +12,8 @@ A collection of Zero Knowledge modules written in Rust and designed to be used i
Zerokit provides zero-knowledge cryptographic primitives with a focus on performance, security, and usability.
The current focus is on Rate-Limiting Nullifier [RLN](https://github.com/Rate-Limiting-Nullifier) implementation.
Current implementation is based on the following [specification](https://github.com/vacp2p/rfc-index/blob/main/vac/raw/rln-v2.md)
Current implementation is based on the following
[specification](https://github.com/vacp2p/rfc-index/blob/main/vac/raw/rln-v2.md)
and focused on RLNv2 which allows to set a rate limit for the number of messages that can be sent by a user.
## Features
@@ -24,19 +25,23 @@ and focused on RLNv2 which allows to set a rate limit for the number of messages
## Architecture
Zerokit currently focuses on RLN (Rate-Limiting Nullifier) implementation using [Circom](https://iden3.io/circom) circuits through ark-circom, providing an alternative to existing native Rust implementations.
Zerokit currently focuses on RLN (Rate-Limiting Nullifier) implementation using [Circom](https://iden3.io/circom)
circuits through ark-circom, providing an alternative to existing native Rust implementations.
## Build and Test
> [!IMPORTANT]
> For WASM support or x32 architecture builds, use version `0.6.1`. The current version has dependency issues for these platforms. WASM support will return in a future release.
### Install Dependencies
```bash
make installdeps
```
#### Use Nix to install dependencies
```bash
nix develop
```
### Build and Test All Crates
```bash
@@ -69,8 +74,8 @@ The execution graph file used by this code has been generated by means of the sa
> [!IMPORTANT]
> The circom-witnesscalc code fragments have been borrowed instead of depending on this crate,
because its types of input and output data were incompatible with the corresponding zerokit code fragments,
and circom-witnesscalc has some dependencies, which are redundant for our purpose.
> because its types of input and output data were incompatible with the corresponding zerokit code fragments,
> and circom-witnesscalc has some dependencies, which are redundant for our purpose.
## Documentation

48
flake.lock generated Normal file
View File

@@ -0,0 +1,48 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1740603184,
"narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1748399823,
"narHash": "sha256-kahD8D5hOXOsGbNdoLLnqCL887cjHkx98Izc37nDjlA=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "d68a69dc71bc19beb3479800392112c2f6218159",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

69
flake.nix Normal file
View File

@@ -0,0 +1,69 @@
{
description = "A flake for building zerokit";
inputs = {
# Version 24.11
nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, rust-overlay }:
let
stableSystems = [
"x86_64-linux" "aarch64-linux"
"x86_64-darwin" "aarch64-darwin"
"x86_64-windows" "i686-linux"
"i686-windows"
];
forAllSystems = nixpkgs.lib.genAttrs stableSystems;
overlays = [
(import rust-overlay)
(f: p: { inherit rust-overlay; })
];
pkgsFor = forAllSystems (system: import nixpkgs { inherit system overlays; });
in rec
{
packages = forAllSystems (system: let
pkgs = pkgsFor.${system};
buildPackage = pkgs.callPackage ./nix/default.nix;
buildRln = (buildPackage { src = self; project = "rln"; }).override;
in rec {
rln = buildRln { };
rln-linux-arm64 = buildRln {
target-platform = "aarch64-multiplatform";
rust-target = "aarch64-unknown-linux-gnu";
};
rln-android-arm64 = buildRln {
target-platform = "aarch64-android-prebuilt";
rust-target = "aarch64-linux-android";
};
rln-ios-arm64 = buildRln {
target-platform = "aarch64-darwin";
rust-target = "aarch64-apple-ios";
};
# TODO: Remove legacy name for RLN android library
zerokit-android-arm64 = rln-android-arm64;
default = rln;
});
devShells = forAllSystems (system: let
pkgs = pkgsFor.${system};
in {
default = pkgs.mkShell {
buildInputs = with pkgs; [
git cmake cargo-make rustup
binaryen ninja gnuplot
rust-bin.stable.latest.default
];
};
});
};
}

64
nix/default.nix Normal file
View File

@@ -0,0 +1,64 @@
{
pkgs,
rust-overlay,
project,
src ? ../.,
release ? true,
target-platform ? null,
rust-target ? null,
features ? null,
}:
let
# Use cross-compilation if target-platform is specified.
targetPlatformPkgs = if target-platform != null
then pkgs.pkgsCross.${target-platform}
else pkgs;
rust-bin = rust-overlay.lib.mkRustBin { } targetPlatformPkgs.buildPackages;
# Use Rust and Cargo versions from rust-overlay.
rustPlatform = targetPlatformPkgs.makeRustPlatform {
cargo = rust-bin.stable.latest.minimal;
rustc = rust-bin.stable.latest.minimal;
};
in rustPlatform.buildRustPackage {
pname = "zerokit";
version = if src ? rev then src.rev else "nightly";
# Improve caching of sources
src = builtins.path { path = src; name = "zerokit"; };
cargoLock = {
lockFile = src + "/Cargo.lock";
allowBuiltinFetchGit = true;
};
nativeBuildInputs = [ pkgs.rust-cbindgen ];
doCheck = false;
CARGO_HOME = "/tmp";
buildPhase = ''
cargo build --lib \
${if release then "--release" else ""} \
${if rust-target != null then "--target=${rust-target}" else ""} \
${if features != null then "--features=${features}" else ""} \
--manifest-path ${project}/Cargo.toml
'';
installPhase = ''
set -eu
mkdir -p $out/lib
find target -type f -name 'librln.*' -not -path '*/deps/*' -exec cp -v '{}' "$out/lib/" \;
mkdir -p $out/include
cbindgen ${src}/rln -l c > "$out/include/rln.h"
'';
meta = with pkgs.lib; {
description = "Zerokit";
license = licenses.mit;
};
}

View File

@@ -13,14 +13,13 @@ path = "src/examples/stateless.rs"
required-features = ["stateless"]
[dependencies]
rln = { path = "../rln", default-features = true, features = ["pmtree-ft"] }
zerokit_utils = { path = "../utils" }
clap = { version = "4.5.29", features = ["cargo", "derive", "env"] }
clap_derive = { version = "4.5.28" }
color-eyre = "0.6.2"
serde_json = "1.0.138"
serde = { version = "1.0.217", features = ["derive"] }
rln = { path = "../rln", version = "0.8.0", default-features = false }
zerokit_utils = { path = "../utils", version = "0.6.0", default-features = false }
clap = { version = "4.5.41", features = ["cargo", "derive", "env"] }
color-eyre = "0.6.5"
serde_json = "1.0.141"
serde = { version = "1.0", features = ["derive"] }
[features]
arkzkey = ["rln/arkzkey"]
stateless = ["rln/stateless"]
default = ["rln/pmtree-ft", "rln/parallel"]
stateless = ["rln/stateless", "rln/parallel"]

View File

@@ -1,8 +1,9 @@
[tasks.build]
command = "cargo"
args = ["build", "--release"]
args = ["build"]
[tasks.test]
command = "cargo"
args = ["test", "--release"]
disabled = true
[tasks.bench]
disabled = true

View File

@@ -27,16 +27,6 @@ If the configuration file is empty, default settings will be used, but the tree
We recommend using the example config, as all commands (except `new` and `create-with-params`) require an initialized RLN instance.
## Feature Flags
The CLI supports optional features. To enable the **arkzkey** feature, run:
```bash
cargo run --features arkzkey -- <SUBCOMMAND> [OPTIONS]
```
For more details, refer to the [Zerokit RLN Module](../rln/README.md) documentation.
## Relay Example
The following [Example](src/examples/relay.rs) demonstrates how RLN enables spam prevention in anonymous environments for multple users.
@@ -47,15 +37,9 @@ You can run the example using the following command:
cargo run --example relay
```
or with the **arkzkey** feature flag:
You can also change **MESSAGE_LIMIT** and **TREE_DEPTH** in the [relay.rs](src/examples/relay.rs) file to see how the RLN instance behaves with different parameters.
```bash
cargo run --example relay --features arkzkey
```
You can also change **MESSAGE_LIMIT** and **TREEE_HEIGHT** in the [relay.rs](src/examples/relay.rs) file to see how the RLN instance behaves with different parameters.
The customize **TREEE_HEIGHT** constant differs from the default value of `20` should follow [Custom Circuit Compilation](../rln/README.md#advanced-custom-circuit-compilation) instructions.
The customize **TREE_DEPTH** constant differs from the default value of `20` should follow [Custom Circuit Compilation](../rln/README.md#advanced-custom-circuit-compilation) instructions.
## Stateless Example
@@ -66,13 +50,7 @@ This example function similarly to the [Relay Example](#relay-example) but uses
You can run the example using the following command:
```bash
cargo run --example stateless --features stateless
```
or with the **arkzkey** feature flag:
```bash
cargo run --example stateless --features stateless,arkzkey
cargo run --example stateless --no-default-features --features stateless
```
## CLI Commands
@@ -82,19 +60,19 @@ cargo run --example stateless --features stateless,arkzkey
To initialize a new RLN instance:
```bash
cargo run new --tree-height <HEIGHT>
cargo run new --tree-depth <DEPTH>
```
To initialize an RLN instance with custom parameters:
```bash
cargo run new-with-params --resources-path <PATH> --tree-height <HEIGHT>
cargo run new-with-params --resources-path <PATH> --tree-depth <DEPTH>
```
To update the Merkle tree height:
To update the Merkle tree depth:
```bash
cargo run set-tree --tree-height <HEIGHT>
cargo run set-tree --tree-depth <DEPTH>
```
### Leaf Operations

View File

@@ -1,11 +1,9 @@
{
"tree_config": {
"path": "database",
"temporary": false,
"cache_capacity": 150000,
"flush_every_ms": 12000,
"mode": "HighThroughput",
"use_compression": false
},
"tree_height": 20
"path": "database",
"temporary": false,
"cache_capacity": 1073741824,
"flush_every_ms": 500,
"mode": "HighThroughput",
"use_compression": false,
"tree_depth": 20
}

View File

@@ -1,23 +1,23 @@
use std::path::PathBuf;
use clap::Subcommand;
use rln::circuit::TEST_TREE_HEIGHT;
use rln::circuit::TEST_TREE_DEPTH;
#[derive(Subcommand)]
pub(crate) enum Commands {
New {
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
tree_height: usize,
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
tree_depth: usize,
},
NewWithParams {
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
tree_height: usize,
#[arg(short, long, default_value = "../rln/resources/tree_height_20")]
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
tree_depth: usize,
#[arg(short, long, default_value = "../rln/resources/tree_depth_10")]
resources_path: PathBuf,
},
SetTree {
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
tree_height: usize,
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
tree_depth: usize,
},
SetLeaf {
#[arg(short, long)]

View File

@@ -6,15 +6,9 @@ use serde_json::Value;
pub const RLN_CONFIG_PATH: &str = "RLN_CONFIG_PATH";
#[derive(Default, Serialize, Deserialize)]
#[derive(Serialize, Deserialize)]
pub(crate) struct Config {
pub inner: Option<InnerConfig>,
}
#[derive(Default, Serialize, Deserialize)]
pub(crate) struct InnerConfig {
pub tree_height: usize,
pub tree_config: Value,
pub tree_config: Option<String>,
}
impl Config {
@@ -25,14 +19,13 @@ impl Config {
let mut file = File::open(path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let inner: InnerConfig = serde_json::from_str(&contents)?;
Ok(Config { inner: Some(inner) })
let tree_config: Value = serde_json::from_str(&contents)?;
println!("Initializing RLN with custom config");
Ok(Config {
tree_config: Some(tree_config.to_string()),
})
}
Err(_) => Ok(Config::default()),
Err(_) => Ok(Config { tree_config: None }),
}
}
pub(crate) fn as_bytes(&self) -> Vec<u8> {
serde_json::to_string(&self.inner).unwrap().into_bytes()
}
}

View File

@@ -6,18 +6,18 @@ use std::{
};
use clap::{Parser, Subcommand};
use color_eyre::{eyre::eyre, Result};
use color_eyre::{eyre::eyre, Report, Result};
use rln::{
circuit::Fr,
hashers::{hash_to_field, poseidon_hash},
hashers::{hash_to_field_le, poseidon_hash},
protocol::{keygen, prepare_prove_input, prepare_verify_input},
public::RLN,
utils::{bytes_le_to_fr, fr_to_bytes_le, generate_input_buffer},
utils::{fr_to_bytes_le, generate_input_buffer, IdSecret},
};
const MESSAGE_LIMIT: u32 = 1;
const TREEE_HEIGHT: usize = 20;
const TREE_DEPTH: usize = 10;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
@@ -44,7 +44,7 @@ enum Commands {
#[derive(Debug, Clone)]
struct Identity {
identity_secret_hash: Fr,
identity_secret_hash: IdSecret,
id_commitment: Fr,
}
@@ -67,11 +67,8 @@ struct RLNSystem {
impl RLNSystem {
fn new() -> Result<Self> {
let mut resources: Vec<Vec<u8>> = Vec::new();
let resources_path: PathBuf = format!("../rln/resources/tree_height_{TREEE_HEIGHT}").into();
#[cfg(feature = "arkzkey")]
let resources_path: PathBuf = format!("../rln/resources/tree_depth_{TREE_DEPTH}").into();
let filenames = ["rln_final.arkzkey", "graph.bin"];
#[cfg(not(feature = "arkzkey"))]
let filenames = ["rln_final.zkey", "graph.bin"];
for filename in filenames {
let fullpath = resources_path.join(Path::new(filename));
let mut file = File::open(&fullpath)?;
@@ -81,7 +78,7 @@ impl RLNSystem {
resources.push(output_buffer);
}
let rln = RLN::new_with_params(
TREEE_HEIGHT,
TREE_DEPTH,
resources[0].clone(),
resources[1].clone(),
generate_input_buffer(),
@@ -103,7 +100,7 @@ impl RLNSystem {
println!("Registered users:");
for (index, identity) in &self.local_identities {
println!("User Index: {index}");
println!("+ Identity Secret Hash: {}", identity.identity_secret_hash);
println!("+ Identity Secret Hash: {}", *identity.identity_secret_hash);
println!("+ Identity Commitment: {}", identity.id_commitment);
println!();
}
@@ -118,12 +115,12 @@ impl RLNSystem {
match self.rln.set_next_leaf(&mut buffer) {
Ok(_) => {
println!("Registered User Index: {index}");
println!("+ Identity secret hash: {}", identity.identity_secret_hash);
println!("+ Identity secret hash: {}", *identity.identity_secret_hash);
println!("+ Identity commitment: {},", identity.id_commitment);
self.local_identities.insert(index, identity);
}
Err(_) => {
println!("Maximum user limit reached: 2^{TREEE_HEIGHT}");
println!("Maximum user limit reached: 2^{TREE_DEPTH}");
}
};
@@ -143,7 +140,7 @@ impl RLNSystem {
};
let serialized = prepare_prove_input(
identity.identity_secret_hash,
identity.identity_secret_hash.clone(),
user_index,
Fr::from(MESSAGE_LIMIT),
Fr::from(message_id),
@@ -182,7 +179,7 @@ impl RLNSystem {
Ok(false) => {
println!("Verification failed: message_id must be unique within the epoch and satisfy 0 <= message_id < MESSAGE_LIMIT: {MESSAGE_LIMIT}");
}
Err(err) => return Err(err),
Err(err) => return Err(Report::new(err)),
}
Ok(())
}
@@ -211,7 +208,7 @@ impl RLNSystem {
{
Ok(_) => {
let output_data = output.into_inner();
let (leaked_identity_secret_hash, _) = bytes_le_to_fr(&output_data);
let (leaked_identity_secret_hash, _) = IdSecret::from_bytes_le(&output_data);
if let Some((user_index, identity)) = self
.local_identities
@@ -221,20 +218,21 @@ impl RLNSystem {
})
.map(|(index, identity)| (*index, identity))
{
let real_identity_secret_hash = identity.identity_secret_hash;
let real_identity_secret_hash = identity.identity_secret_hash.clone();
if leaked_identity_secret_hash != real_identity_secret_hash {
Err(eyre!("identity secret hash mismatch {leaked_identity_secret_hash} != {real_identity_secret_hash}"))
Err(eyre!("identity secret hash mismatch: leaked_identity_secret_hash != real_identity_secret_hash"))
} else {
println!("DUPLICATE message ID detected! Reveal identity secret hash: {leaked_identity_secret_hash}");
println!(
"DUPLICATE message ID detected! Reveal identity secret hash: {}",
*leaked_identity_secret_hash
);
self.local_identities.remove(&user_index);
self.rln.delete_leaf(user_index)?;
println!("User index {user_index} has been SLASHED");
Ok(())
}
} else {
Err(eyre!(
"user identity secret hash {leaked_identity_secret_hash} not found"
))
Err(eyre!("user identity secret hash ******** not found"))
}
}
Err(err) => Err(eyre!("Failed to recover identity secret: {err}")),
@@ -246,8 +244,8 @@ fn main() -> Result<()> {
println!("Initializing RLN instance...");
print!("\x1B[2J\x1B[1;1H");
let mut rln_system = RLNSystem::new()?;
let rln_epoch = hash_to_field(b"epoch");
let rln_identifier = hash_to_field(b"rln-identifier");
let rln_epoch = hash_to_field_le(b"epoch");
let rln_identifier = hash_to_field_le(b"rln-identifier");
let external_nullifier = poseidon_hash(&[rln_epoch, rln_identifier]);
println!("RLN Relay Example:");
println!("Message Limit: {MESSAGE_LIMIT}");

View File

@@ -1,4 +1,5 @@
#![cfg(feature = "stateless")]
use std::{
collections::HashMap,
io::{stdin, stdout, Cursor, Write},
@@ -7,14 +8,13 @@ use std::{
use clap::{Parser, Subcommand};
use color_eyre::{eyre::eyre, Result};
use rln::{
circuit::{Fr, TEST_TREE_HEIGHT},
hashers::{hash_to_field, poseidon_hash},
poseidon_tree::PoseidonTree,
circuit::{Fr, TEST_TREE_DEPTH},
hashers::{hash_to_field_le, poseidon_hash, PoseidonHash},
protocol::{keygen, prepare_verify_input, rln_witness_from_values, serialize_witness},
public::RLN,
utils::{bytes_le_to_fr, fr_to_bytes_le},
utils::{fr_to_bytes_le, IdSecret},
};
use zerokit_utils::ZerokitMerkleTree;
use zerokit_utils::{OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
const MESSAGE_LIMIT: u32 = 1;
@@ -45,7 +45,7 @@ enum Commands {
#[derive(Debug, Clone)]
struct Identity {
identity_secret_hash: Fr,
identity_secret_hash: IdSecret,
id_commitment: Fr,
}
@@ -61,7 +61,7 @@ impl Identity {
struct RLNSystem {
rln: RLN,
tree: PoseidonTree,
tree: OptimalMerkleTree<PoseidonHash>,
used_nullifiers: HashMap<[u8; 32], Vec<u8>>,
local_identities: HashMap<usize, Identity>,
}
@@ -70,11 +70,12 @@ impl RLNSystem {
fn new() -> Result<Self> {
let rln = RLN::new()?;
let default_leaf = Fr::from(0);
let tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
let tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
)?;
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
.unwrap();
Ok(RLNSystem {
rln,
@@ -93,7 +94,7 @@ impl RLNSystem {
println!("Registered users:");
for (index, identity) in &self.local_identities {
println!("User Index: {index}");
println!("+ Identity Secret Hash: {}", identity.identity_secret_hash);
println!("+ Identity Secret Hash: {}", *identity.identity_secret_hash);
println!("+ Identity Commitment: {}", identity.id_commitment);
println!();
}
@@ -107,7 +108,7 @@ impl RLNSystem {
self.tree.update_next(rate_commitment)?;
println!("Registered User Index: {index}");
println!("+ Identity secret hash: {}", identity.identity_secret_hash);
println!("+ Identity secret hash: {}", *identity.identity_secret_hash);
println!("+ Identity commitment: {}", identity.id_commitment);
self.local_identities.insert(index, identity);
@@ -127,11 +128,13 @@ impl RLNSystem {
};
let merkle_proof = self.tree.proof(user_index)?;
let x = hash_to_field_le(signal.as_bytes());
let rln_witness = rln_witness_from_values(
identity.identity_secret_hash,
&merkle_proof,
hash_to_field(signal.as_bytes()),
identity.identity_secret_hash.clone(),
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x,
external_nullifier,
Fr::from(MESSAGE_LIMIT),
Fr::from(message_id),
@@ -157,12 +160,12 @@ impl RLNSystem {
let mut input_buffer = Cursor::new(proof_with_signal);
let root = self.tree.root();
let root_serialized = fr_to_bytes_le(&root);
let mut root_buffer = Cursor::new(root_serialized);
let roots_serialized = fr_to_bytes_le(&root);
let mut roots_buffer = Cursor::new(roots_serialized);
match self
.rln
.verify_with_roots(&mut input_buffer, &mut root_buffer)
.verify_with_roots(&mut input_buffer, &mut roots_buffer)
{
Ok(true) => {
let nullifier = &proof_data[256..288];
@@ -207,7 +210,7 @@ impl RLNSystem {
{
Ok(_) => {
let output_data = output.into_inner();
let (leaked_identity_secret_hash, _) = bytes_le_to_fr(&output_data);
let (leaked_identity_secret_hash, _) = IdSecret::from_bytes_le(&output_data);
if let Some((user_index, identity)) = self
.local_identities
@@ -217,19 +220,19 @@ impl RLNSystem {
})
.map(|(index, identity)| (*index, identity))
{
let real_identity_secret_hash = identity.identity_secret_hash;
let real_identity_secret_hash = identity.identity_secret_hash.clone();
if leaked_identity_secret_hash != real_identity_secret_hash {
Err(eyre!("identity secret hash mismatch {leaked_identity_secret_hash} != {real_identity_secret_hash}"))
Err(eyre!("identity secret hash mismatch: leaked_identity_secret_hash != real_identity_secret_hash"))
} else {
println!("DUPLICATE message ID detected! Reveal identity secret hash: {leaked_identity_secret_hash}");
println!(
"DUPLICATE message ID detected! Reveal identity secret hash: ********"
);
self.local_identities.remove(&user_index);
println!("User index {user_index} has been SLASHED");
Ok(())
}
} else {
Err(eyre!(
"user identity secret hash {leaked_identity_secret_hash} not found"
))
Err(eyre!("user identity secret hash ******** not found"))
}
}
Err(err) => Err(eyre!("Failed to recover identity secret: {err}")),
@@ -241,8 +244,8 @@ fn main() -> Result<()> {
println!("Initializing RLN instance...");
print!("\x1B[2J\x1B[1;1H");
let mut rln_system = RLNSystem::new()?;
let rln_epoch = hash_to_field(b"epoch");
let rln_identifier = hash_to_field(b"rln-identifier");
let rln_epoch = hash_to_field_le(b"epoch");
let rln_identifier = hash_to_field_le(b"rln-identifier");
let external_nullifier = poseidon_hash(&[rln_epoch, rln_identifier]);
println!("RLN Stateless Relay Example:");
println!("Message Limit: {MESSAGE_LIMIT}");

View File

@@ -7,7 +7,7 @@ use std::{
use clap::Parser;
use color_eyre::{eyre::Report, Result};
use commands::Commands;
use config::{Config, InnerConfig};
use config::Config;
use rln::{
public::RLN,
utils::{bytes_le_to_fr, bytes_le_to_vec_fr},
@@ -35,43 +35,36 @@ fn main() -> Result<()> {
};
match cli.command {
Some(Commands::New { tree_height }) => {
Some(Commands::New { tree_depth }) => {
let config = Config::load_config()?;
state.rln = if let Some(InnerConfig { tree_height, .. }) = config.inner {
state.rln = if let Some(tree_config) = config.tree_config {
println!("Initializing RLN with custom config");
Some(RLN::new(tree_height, Cursor::new(config.as_bytes()))?)
Some(RLN::new(tree_depth, Cursor::new(tree_config.as_bytes()))?)
} else {
println!("Initializing RLN with default config");
Some(RLN::new(tree_height, Cursor::new(json!({}).to_string()))?)
Some(RLN::new(tree_depth, Cursor::new(json!({}).to_string()))?)
};
Ok(())
}
Some(Commands::NewWithParams {
tree_height,
tree_depth,
resources_path,
}) => {
let mut resources: Vec<Vec<u8>> = Vec::new();
#[cfg(feature = "arkzkey")]
let filenames = ["rln_final.arkzkey", "graph.bin"];
#[cfg(not(feature = "arkzkey"))]
let filenames = ["rln_final.zkey", "graph.bin"];
for filename in filenames {
let fullpath = resources_path.join(Path::new(filename));
let mut file = File::open(&fullpath)?;
let metadata = std::fs::metadata(&fullpath)?;
let mut output_buffer = vec![0; metadata.len() as usize];
file.read_exact(&mut output_buffer)?;
resources.push(output_buffer);
let mut buffer = vec![0; metadata.len() as usize];
file.read_exact(&mut buffer)?;
resources.push(buffer);
}
let config = Config::load_config()?;
if let Some(InnerConfig {
tree_height,
tree_config,
}) = config.inner
{
if let Some(tree_config) = config.tree_config {
println!("Initializing RLN with custom config");
state.rln = Some(RLN::new_with_params(
tree_height,
tree_depth,
resources[0].clone(),
resources[1].clone(),
Cursor::new(tree_config.to_string().as_bytes()),
@@ -79,7 +72,7 @@ fn main() -> Result<()> {
} else {
println!("Initializing RLN with default config");
state.rln = Some(RLN::new_with_params(
tree_height,
tree_depth,
resources[0].clone(),
resources[1].clone(),
Cursor::new(json!({}).to_string()),
@@ -87,11 +80,11 @@ fn main() -> Result<()> {
};
Ok(())
}
Some(Commands::SetTree { tree_height }) => {
Some(Commands::SetTree { tree_depth }) => {
state
.rln
.ok_or(Report::msg("no RLN instance initialized"))?
.set_tree(tree_height)?;
.set_tree(tree_depth)?;
Ok(())
}
Some(Commands::SetLeaf { index, input }) => {
@@ -141,7 +134,7 @@ fn main() -> Result<()> {
.ok_or(Report::msg("no RLN instance initialized"))?
.prove(input_data, &mut output_buffer)?;
let proof = output_buffer.into_inner();
println!("proof: {:?}", proof);
println!("proof: {proof:?}");
Ok(())
}
Some(Commands::Verify { input }) => {
@@ -150,7 +143,7 @@ fn main() -> Result<()> {
.rln
.ok_or(Report::msg("no RLN instance initialized"))?
.verify(input_data)?;
println!("verified: {:?}", verified);
println!("verified: {verified:?}");
Ok(())
}
Some(Commands::GenerateProof { input }) => {
@@ -161,7 +154,7 @@ fn main() -> Result<()> {
.ok_or(Report::msg("no RLN instance initialized"))?
.generate_rln_proof(input_data, &mut output_buffer)?;
let proof = output_buffer.into_inner();
println!("proof: {:?}", proof);
println!("proof: {proof:?}");
Ok(())
}
Some(Commands::VerifyWithRoots { input, roots }) => {
@@ -193,7 +186,7 @@ fn main() -> Result<()> {
let output_buffer_inner = output_buffer.into_inner();
let (path_elements, _) = bytes_le_to_vec_fr(&output_buffer_inner)?;
for (index, element) in path_elements.iter().enumerate() {
println!("path element {}: {}", index, element);
println!("path element {index}: {element}");
}
Ok(())
}

View File

@@ -1,9 +1,10 @@
use std::io::Cursor;
use color_eyre::Result;
use rln::public::RLN;
use rln::{circuit::TEST_TREE_DEPTH, public::RLN};
use serde_json::Value;
use crate::config::{Config, InnerConfig};
use crate::config::Config;
#[derive(Default)]
pub(crate) struct State {
@@ -13,8 +14,15 @@ pub(crate) struct State {
impl State {
pub(crate) fn load_state() -> Result<State> {
let config = Config::load_config()?;
let rln = if let Some(InnerConfig { tree_height, .. }) = config.inner {
Some(RLN::new(tree_height, Cursor::new(config.as_bytes()))?)
let rln = if let Some(tree_config) = config.tree_config {
let config_json: Value = serde_json::from_str(&tree_config)?;
let tree_depth = config_json["tree_depth"]
.as_u64()
.unwrap_or(TEST_TREE_DEPTH as u64);
Some(RLN::new(
tree_depth as usize,
Cursor::new(tree_config.as_bytes()),
)?)
} else {
None
};

21
rln-wasm-utils/.gitignore vendored Normal file
View File

@@ -0,0 +1,21 @@
# Common files to ignore in Rust projects
.DS_Store
.idea
*.log
tmp/
# Generated by Cargo will have compiled files and executables
/target
Cargo.lock
# Generated by rln-wasm
pkg/
# Generated by Nix
result
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

35
rln-wasm-utils/Cargo.toml Normal file
View File

@@ -0,0 +1,35 @@
[package]
name = "rln-wasm-utils"
version = "0.1.0"
edition = "2024"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
# TODO: remove this once we have a proper release
rln = { path = "../rln", default-features = false, features = ["stateless"] }
js-sys = "0.3.77"
wasm-bindgen = "0.2.100"
rand = "0.8.5"
# The `console_error_panic_xhook` crate provides better debugging of panics by
# logging them with `console.error`. This is great for development, but requires
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
# code size when deploying.
console_error_panic_hook = { version = "0.1.7", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.16", features = ["js"] }
[dev-dependencies]
wasm-bindgen-test = "0.3.50"
web-sys = { version = "0.3.77", features = ["console"] }
ark-std = { version = "0.5.0", default-features = false }
[features]
default = ["console_error_panic_hook"]
[package.metadata.docs.rs]
all-features = true

View File

@@ -0,0 +1,36 @@
[tasks.build]
clear = true
dependencies = ["pack_build", "pack_rename", "pack_resize"]
[tasks.pack_build]
command = "wasm-pack"
args = ["build", "--release", "--target", "web", "--scope", "waku"]
[tasks.pack_rename]
script = "sed -i.bak 's/rln-wasm-utils/zerokit-rln-wasm-utils/g' pkg/package.json && rm pkg/package.json.bak"
[tasks.pack_resize]
command = "wasm-opt"
args = [
"pkg/rln_wasm_utils_bg.wasm",
"-Oz",
"--strip-debug",
"--strip-dwarf",
"--remove-unused-module-elements",
"--vacuum",
"-o",
"pkg/rln_wasm_utils_bg.wasm",
]
[tasks.test]
command = "wasm-pack"
args = [
"test",
"--release",
"--node",
"--target",
"wasm32-unknown-unknown",
"--",
"--nocapture",
]
dependencies = ["build"]

206
rln-wasm-utils/README.md Normal file
View File

@@ -0,0 +1,206 @@
# RLN WASM Utils
[![npm version](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm.svg)](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm-utils)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
The Zerokit RLN WASM Utils Module provides WebAssembly bindings for Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) cryptographic primitives.
This module offers comprehensive functionality for identity generation and hashing needed for RLN applications.
## Features
### Identity Generation
- **Random Identity Generation**: Generate cryptographically secure random identities
- **Seeded Identity Generation**: Generate deterministic identities from seeds
- **Extended Identity Generation**: Generate extended identities with additional parameters
- **Seeded Extended Identity Generation**: Generate deterministic extended identities from seeds
- **Endianness Support**: Both little-endian and big-endian serialization support
### Hashing
- **Standard Hashing**: Hash arbitrary data to field elements
- **Poseidon Hashing**: Advanced cryptographic hashing using Poseidon hash function
- **Endianness Support**: Both little-endian and big-endian serialization support
## API Reference
### Identity Generation Functions
#### `generateMembershipKey(isLittleEndian: boolean): Uint8Array`
Generates a random membership key pair (identity secret and commitment).
**Inputs:**
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized identity pair as `Uint8Array` in corresponding endianness
#### `generateExtendedMembershipKey(isLittleEndian: boolean): Uint8Array`
Generates an extended membership key with additional parameters.
**Inputs:**
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized extended identity tuple as `Uint8Array` in corresponding endianness
#### `generateSeededMembershipKey(seed: Uint8Array, isLittleEndian: boolean): Uint8Array`
Generates a deterministic membership key from a seed.
**Inputs:**
- `seed`: Seed data as `Uint8Array`
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized identity pair as `Uint8Array` in corresponding endianness
#### `generateSeededExtendedMembershipKey(seed: Uint8Array, isLittleEndian: boolean): Uint8Array`
Generates a deterministic extended membership key from a seed.
**Inputs:**
- `seed`: Seed data as `Uint8Array`
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized extended identity tuple as `Uint8Array` in corresponding endianness
### Hashing Functions
#### `hash(input: Uint8Array, isLittleEndian: boolean): Uint8Array`
Hashes input data to a field element.
**Inputs:**
- `input`: Input data as `Uint8Array`
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized hash result as `Uint8Array` in corresponding endianness
#### `poseidonHash(input: Uint8Array, isLittleEndian: boolean): Uint8Array`
Computes Poseidon hash of input field elements.
**Inputs:**
- `input`: Serialized field elements as `Uint8Array` (format: length + field elements)
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized hash result as `Uint8Array` in corresponding endianness
## Usage Examples
### JavaScript/TypeScript
```javascript
import init, {
generateMembershipKey,
generateSeededMembershipKey,
hash,
poseidonHash
} from '@waku/zerokit-rln-wasm-utils';
// Initialize the WASM module
await init();
// Generate a random membership key
const membershipKey = generateMembershipKey(true); // little-endian
console.log('Membership key:', membershipKey);
// Generate a deterministic membership key from seed
const seed = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
const seededKey = generateSeededMembershipKey(seed, true);
console.log('Seeded key:', seededKey);
// Hash some data
const input = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
const hashResult = hash(input, true);
console.log('Hash result:', hashResult);
// Poseidon hash with field elements
const fieldElements = new Uint8Array([
// Length (8 bytes) + field elements (32 bytes each)
1, 0, 0, 0, 0, 0, 0, 0, // length = 1
// field element data...
]);
const poseidonResult = poseidonHash(fieldElements, true);
console.log('Poseidon hash:', poseidonResult);
```
## Install Dependencies
> [!NOTE]
> This project requires the following tools:
>
> - `wasm-pack` - for compiling Rust to WebAssembly
> - `cargo-make` - for running build commands
> - `nvm` - to install and manage Node.js
>
> Ensure all dependencies are installed before proceeding.
### Manually
#### Install `wasm-pack`
```bash
cargo install wasm-pack --version=0.13.1
```
#### Install `cargo-make`
```bash
cargo install cargo-make
```
#### Install `Node.js`
If you don't have `nvm` (Node Version Manager), install it by following
the [installation instructions](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script).
After installing `nvm`, install and use Node.js `v22.14.0`:
```bash
nvm install 22.14.0
nvm use 22.14.0
nvm alias default 22.14.0
```
If you already have Node.js installed,
check your version with `node -v` command — the version must be strictly greater than 22.
### Or install everything
You can run the following command from the root of the repository to install all required dependencies for `zerokit`
```bash
make installdeps
```
## Building the library
First, navigate to the rln-wasm-utils directory:
```bash
cd rln-wasm-utils
```
Compile rln-wasm-utils for `wasm32-unknown-unknown`:
```bash
cargo make build
```
## Running tests
```bash
cargo make test
```
## License
This project is licensed under both MIT and Apache 2.0 licenses. See the LICENSE files for details.

112
rln-wasm-utils/src/lib.rs Normal file
View File

@@ -0,0 +1,112 @@
#![cfg(target_arch = "wasm32")]
use js_sys::Uint8Array;
use rln::public::{
extended_key_gen, hash, key_gen, poseidon_hash, seeded_extended_key_gen, seeded_key_gen,
};
use std::vec::Vec;
use wasm_bindgen::prelude::*;
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateMembershipKey)]
pub fn wasm_key_gen(is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = key_gen(&mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership keys, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
pub fn wasm_extended_key_gen(is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = extended_key_gen(&mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership keys, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
pub fn wasm_seeded_key_gen(seed: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &seed.to_vec()[..];
if let Err(err) = seeded_key_gen(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership key, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
pub fn wasm_seeded_extended_key_gen(
seed: Uint8Array,
is_little_endian: bool,
) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &seed.to_vec()[..];
if let Err(err) = seeded_extended_key_gen(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership key, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[wasm_bindgen(js_name = hash)]
pub fn wasm_hash(input: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &input.to_vec()[..];
if let Err(err) = hash(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!("Msg: could not generate hash, Error: {:#?}", err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[wasm_bindgen(js_name = poseidonHash)]
pub fn wasm_poseidon_hash(input: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &input.to_vec()[..];
if let Err(err) = poseidon_hash(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate poseidon hash, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}

View File

@@ -0,0 +1,114 @@
#![cfg(target_arch = "wasm32")]
#[cfg(test)]
mod test {
use ark_std::{UniformRand, rand::thread_rng};
use rand::Rng;
use rln::circuit::Fr;
use rln::hashers::{ROUND_PARAMS, hash_to_field_le, poseidon_hash};
use rln::protocol::{
deserialize_identity_pair_be, deserialize_identity_pair_le, deserialize_identity_tuple_be,
deserialize_identity_tuple_le,
};
use rln::utils::{bytes_le_to_fr, vec_fr_to_bytes_le};
use rln_wasm_utils::{
wasm_extended_key_gen, wasm_hash, wasm_key_gen, wasm_poseidon_hash,
wasm_seeded_extended_key_gen, wasm_seeded_key_gen,
};
use wasm_bindgen_test::*;
#[wasm_bindgen_test]
fn test_wasm_key_gen() {
let result_le = wasm_key_gen(true);
assert!(result_le.is_ok());
deserialize_identity_pair_le(result_le.unwrap().to_vec());
let result_be = wasm_key_gen(false);
assert!(result_be.is_ok());
deserialize_identity_pair_be(result_be.unwrap().to_vec());
}
#[wasm_bindgen_test]
fn test_wasm_extended_key_gen() {
let result_le = wasm_extended_key_gen(true);
assert!(result_le.is_ok());
deserialize_identity_tuple_le(result_le.unwrap().to_vec());
let result_be = wasm_extended_key_gen(false);
assert!(result_be.is_ok());
deserialize_identity_tuple_be(result_be.unwrap().to_vec());
}
#[wasm_bindgen_test]
fn test_wasm_seeded_key_gen() {
// Create a test seed
let seed_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let seed = js_sys::Uint8Array::from(&seed_data[..]);
let result_le = wasm_seeded_key_gen(seed.clone(), true);
assert!(result_le.is_ok());
let fr_le = deserialize_identity_pair_le(result_le.unwrap().to_vec());
let result_be = wasm_seeded_key_gen(seed, false);
assert!(result_be.is_ok());
let fr_be = deserialize_identity_pair_be(result_be.unwrap().to_vec());
assert_eq!(fr_le, fr_be);
}
#[wasm_bindgen_test]
fn test_wasm_seeded_extended_key_gen() {
// Create a test seed
let seed_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let seed = js_sys::Uint8Array::from(&seed_data[..]);
let result_le = wasm_seeded_extended_key_gen(seed.clone(), true);
assert!(result_le.is_ok());
let fr_le = deserialize_identity_tuple_le(result_le.unwrap().to_vec());
let result_be = wasm_seeded_extended_key_gen(seed, false);
assert!(result_be.is_ok());
let fr_be = deserialize_identity_tuple_be(result_be.unwrap().to_vec());
assert_eq!(fr_le, fr_be);
}
#[wasm_bindgen_test]
fn test_wasm_hash() {
// Create test input data
let signal: [u8; 32] = [0; 32];
let input = js_sys::Uint8Array::from(&signal[..]);
let result_le = wasm_hash(input.clone(), true);
assert!(result_le.is_ok());
let serialized_hash = result_le.unwrap().to_vec();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field_le(&signal);
assert_eq!(hash1, hash2);
}
#[wasm_bindgen_test]
fn test_wasm_poseidon_hash() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_le(&inputs);
let input = js_sys::Uint8Array::from(&inputs_ser[..]);
let expected_hash = poseidon_hash(inputs.as_ref());
let result_le = wasm_poseidon_hash(input.clone(), true);
assert!(result_le.is_ok());
let serialized_hash = result_le.unwrap().to_vec();
let (received_hash, _) = bytes_le_to_fr(&serialized_hash);
assert_eq!(received_hash, expected_hash);
}
}

21
rln-wasm/.gitignore vendored Normal file
View File

@@ -0,0 +1,21 @@
# Common files to ignore in Rust projects
.DS_Store
.idea
*.log
tmp/
# Generated by Cargo will have compiled files and executables
/target
Cargo.lock
# Generated by rln-wasm
pkg/
# Generated by Nix
result
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

47
rln-wasm/Cargo.toml Normal file
View File

@@ -0,0 +1,47 @@
[package]
name = "rln-wasm"
version = "0.2.0"
edition = "2021"
license = "MIT or Apache2"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
rln = { path = "../rln", version = "0.8.0", default-features = false, features = [
"stateless",
] }
rln-wasm-utils = { path = "../rln-wasm-utils", version = "0.1.0", default-features = false }
zerokit_utils = { path = "../utils", version = "0.6.0", default-features = false }
num-bigint = { version = "0.4.6", default-features = false }
js-sys = "0.3.77"
wasm-bindgen = "0.2.100"
serde-wasm-bindgen = "0.6.5"
wasm-bindgen-rayon = { version = "1.3.0", features = [
"no-bundler",
], optional = true }
# The `console_error_panic_xhook` crate provides better debugging of panics by
# logging them with `console.error`. This is great for development, but requires
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
# code size when deploying.
console_error_panic_hook = { version = "0.1.7", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.16", features = ["js"] }
[dev-dependencies]
serde_json = "1.0.141"
wasm-bindgen-test = "0.3.50"
wasm-bindgen-futures = "0.4.50"
[dev-dependencies.web-sys]
version = "0.3.77"
features = ["Window", "Navigator"]
[features]
default = ["console_error_panic_hook"]
parallel = ["rln/parallel", "wasm-bindgen-rayon"]
[package.metadata.docs.rs]
all-features = true

99
rln-wasm/Makefile.toml Normal file
View File

@@ -0,0 +1,99 @@
[tasks.build]
clear = true
dependencies = ["pack_build", "pack_rename", "pack_resize"]
[tasks.build_parallel]
clear = true
dependencies = ["pack_build_parallel", "pack_rename", "pack_resize"]
[tasks.pack_build]
command = "wasm-pack"
args = ["build", "--release", "--target", "web", "--scope", "waku"]
[tasks.pack_build_parallel]
command = "env"
args = [
"RUSTFLAGS=-C target-feature=+atomics,+bulk-memory,+mutable-globals",
"rustup",
"run",
"nightly",
"wasm-pack",
"build",
"--release",
"--target",
"web",
"--scope",
"waku",
"--features",
"parallel",
"-Z",
"build-std=panic_abort,std",
]
[tasks.pack_rename]
script = "sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak"
[tasks.pack_resize]
command = "wasm-opt"
args = [
"pkg/rln_wasm_bg.wasm",
"-Oz",
"--strip-debug",
"--strip-dwarf",
"--remove-unused-module-elements",
"--vacuum",
"-o",
"pkg/rln_wasm_bg.wasm",
]
[tasks.test]
command = "wasm-pack"
args = [
"test",
"--release",
"--node",
"--target",
"wasm32-unknown-unknown",
"--",
"--nocapture",
]
dependencies = ["build"]
[tasks.test_browser]
command = "wasm-pack"
args = [
"test",
"--release",
"--chrome",
"--headless",
"--target",
"wasm32-unknown-unknown",
"--",
"--nocapture",
]
dependencies = ["build"]
[tasks.test_parallel]
command = "env"
args = [
"RUSTFLAGS=-C target-feature=+atomics,+bulk-memory,+mutable-globals",
"rustup",
"run",
"nightly",
"wasm-pack",
"test",
"--release",
"--chrome",
"--headless",
"--target",
"wasm32-unknown-unknown",
"--features",
"parallel",
"-Z",
"build-std=panic_abort,std",
"--",
"--nocapture",
]
dependencies = ["build_parallel"]
[tasks.bench]
disabled = true

146
rln-wasm/README.md Normal file
View File

@@ -0,0 +1,146 @@
# RLN for WASM
[![npm version](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm.svg)](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
The Zerokit RLN WASM Module provides WebAssembly bindings for working with
Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and primitives.
This module is used by [waku-org/js-rln](https://github.com/waku-org/js-rln/) to enable
RLN functionality in JavaScript/TypeScript applications.
## Install Dependencies
> [!NOTE]
> This project requires the following tools:
>
> - `wasm-pack` - for compiling Rust to WebAssembly
> - `cargo-make` - for running build commands
> - `nvm` - to install and manage Node.js
>
> Ensure all dependencies are installed before proceeding.
### Manually
#### Install `wasm-pack`
```bash
cargo install wasm-pack --version=0.13.1
```
#### Install `cargo-make`
```bash
cargo install cargo-make
```
#### Install `Node.js`
If you don't have `nvm` (Node Version Manager), install it by following
the [installation instructions](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script).
After installing `nvm`, install and use Node.js `v22.14.0`:
```bash
nvm install 22.14.0
nvm use 22.14.0
nvm alias default 22.14.0
```
If you already have Node.js installed,
check your version with `node -v` command — the version must be strictly greater than 22.
### Or install everything
You can run the following command from the root of the repository to install all required dependencies for `zerokit`
```bash
make installdeps
```
## Building the library
First, navigate to the rln-wasm directory:
```bash
cd rln-wasm
```
Compile zerokit for `wasm32-unknown-unknown`:
```bash
cargo make build
```
## Running tests and benchmarks
```bash
cargo make test
```
If you want to run the tests in browser headless mode, you can use the following command:
```bash
cargo make test_browser
```
## Parallel computation
The library supports parallel computation using the `wasm-bindgen-rayon` crate,
enabling multi-threaded execution in the browser.
> [!NOTE]
> Parallel support is not enabled by default due to WebAssembly and browser limitations. \
> Compiling this feature requires `nightly` Rust.
### Build Setup
#### Install `nightly` Rust
```bash
rustup install nightly
```
### Build Commands
To enable parallel computation for WebAssembly threads, you can use the following command:
```bash
cargo make build_parallel
```
### WebAssembly Threading Support
Most modern browsers support WebAssembly threads,
but they require the following headers to enable `SharedArrayBuffer`, which is necessary for multithreading:
- Cross-Origin-Opener-Policy: same-origin
- Cross-Origin-Embedder-Policy: require-corp
Without these, the application will fall back to single-threaded mode.
## Feature detection
If you're targeting [older browser versions that didn't support WebAssembly threads yet](https://webassembly.org/roadmap/),
you'll likely want to create two builds - one with thread support and one without -
and use feature detection to choose the right one on the JavaScript side.
You can use [wasm-feature-detect](https://github.com/GoogleChromeLabs/wasm-feature-detect) library for this purpose.
For example, your code might look like this:
```js
import { threads } from 'wasm-feature-detect';
let wasmPkg;
if (await threads()) {
wasmPkg = await import('./pkg-with-threads/index.js');
await wasmPkg.default();
await wasmPkg.initThreadPool(navigator.hardwareConcurrency);
} else {
wasmPkg = await import('./pkg-without-threads/index.js');
await wasmPkg.default();
}
wasmPkg.nowCallAnyExportedFuncs();
```

View File

@@ -0,0 +1,328 @@
// File generated with https://github.com/iden3/circom
// following the instructions from:
// https://github.com/vacp2p/zerokit/tree/master/rln#advanced-custom-circuit-compilation
export async function builder(code, options) {
options = options || {};
let wasmModule;
try {
wasmModule = await WebAssembly.compile(code);
} catch (err) {
console.log(err);
console.log(
"\nTry to run circom --c in order to generate c++ code instead\n"
);
throw new Error(err);
}
let wc;
let errStr = "";
let msgStr = "";
const instance = await WebAssembly.instantiate(wasmModule, {
runtime: {
exceptionHandler: function (code) {
let err;
if (code == 1) {
err = "Signal not found.\n";
} else if (code == 2) {
err = "Too many signals set.\n";
} else if (code == 3) {
err = "Signal already set.\n";
} else if (code == 4) {
err = "Assert Failed.\n";
} else if (code == 5) {
err = "Not enough memory.\n";
} else if (code == 6) {
err = "Input signal array access exceeds the size.\n";
} else {
err = "Unknown error.\n";
}
throw new Error(err + errStr);
},
printErrorMessage: function () {
errStr += getMessage() + "\n";
// console.error(getMessage());
},
writeBufferMessage: function () {
const msg = getMessage();
// Any calls to `log()` will always end with a `\n`, so that's when we print and reset
if (msg === "\n") {
console.log(msgStr);
msgStr = "";
} else {
// If we've buffered other content, put a space in between the items
if (msgStr !== "") {
msgStr += " ";
}
// Then append the message to the message we are creating
msgStr += msg;
}
},
showSharedRWMemory: function () {
printSharedRWMemory();
},
},
});
const sanityCheck = options;
// options &&
// (
// options.sanityCheck ||
// options.logGetSignal ||
// options.logSetSignal ||
// options.logStartComponent ||
// options.logFinishComponent
// );
wc = new WitnessCalculator(instance, sanityCheck);
return wc;
function getMessage() {
var message = "";
var c = instance.exports.getMessageChar();
while (c != 0) {
message += String.fromCharCode(c);
c = instance.exports.getMessageChar();
}
return message;
}
function printSharedRWMemory() {
const shared_rw_memory_size = instance.exports.getFieldNumLen32();
const arr = new Uint32Array(shared_rw_memory_size);
for (let j = 0; j < shared_rw_memory_size; j++) {
arr[shared_rw_memory_size - 1 - j] =
instance.exports.readSharedRWMemory(j);
}
// If we've buffered other content, put a space in between the items
if (msgStr !== "") {
msgStr += " ";
}
// Then append the value to the message we are creating
msgStr += fromArray32(arr).toString();
}
}
class WitnessCalculator {
constructor(instance, sanityCheck) {
this.instance = instance;
this.version = this.instance.exports.getVersion();
this.n32 = this.instance.exports.getFieldNumLen32();
this.instance.exports.getRawPrime();
const arr = new Uint32Array(this.n32);
for (let i = 0; i < this.n32; i++) {
arr[this.n32 - 1 - i] = this.instance.exports.readSharedRWMemory(i);
}
this.prime = fromArray32(arr);
this.witnessSize = this.instance.exports.getWitnessSize();
this.sanityCheck = sanityCheck;
}
circom_version() {
return this.instance.exports.getVersion();
}
async _doCalculateWitness(input, sanityCheck) {
//input is assumed to be a map from signals to arrays of bigints
this.instance.exports.init(this.sanityCheck || sanityCheck ? 1 : 0);
const keys = Object.keys(input);
var input_counter = 0;
keys.forEach((k) => {
const h = fnvHash(k);
const hMSB = parseInt(h.slice(0, 8), 16);
const hLSB = parseInt(h.slice(8, 16), 16);
const fArr = flatArray(input[k]);
let signalSize = this.instance.exports.getInputSignalSize(hMSB, hLSB);
if (signalSize < 0) {
throw new Error(`Signal ${k} not found\n`);
}
if (fArr.length < signalSize) {
throw new Error(`Not enough values for input signal ${k}\n`);
}
if (fArr.length > signalSize) {
throw new Error(`Too many values for input signal ${k}\n`);
}
for (let i = 0; i < fArr.length; i++) {
const arrFr = toArray32(BigInt(fArr[i]) % this.prime, this.n32);
for (let j = 0; j < this.n32; j++) {
this.instance.exports.writeSharedRWMemory(j, arrFr[this.n32 - 1 - j]);
}
try {
this.instance.exports.setInputSignal(hMSB, hLSB, i);
input_counter++;
} catch (err) {
// console.log(`After adding signal ${i} of ${k}`)
throw new Error(err);
}
}
});
if (input_counter < this.instance.exports.getInputSize()) {
throw new Error(
`Not all inputs have been set. Only ${input_counter} out of ${this.instance.exports.getInputSize()}`
);
}
}
async calculateWitness(input, sanityCheck) {
const w = [];
await this._doCalculateWitness(input, sanityCheck);
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const arr = new Uint32Array(this.n32);
for (let j = 0; j < this.n32; j++) {
arr[this.n32 - 1 - j] = this.instance.exports.readSharedRWMemory(j);
}
w.push(fromArray32(arr));
}
return w;
}
async calculateBinWitness(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize * this.n32);
const buff = new Uint8Array(buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const pos = i * this.n32;
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
}
return buff;
}
async calculateWTNSBin(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize * this.n32 + this.n32 + 11);
const buff = new Uint8Array(buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
//"wtns"
buff[0] = "w".charCodeAt(0);
buff[1] = "t".charCodeAt(0);
buff[2] = "n".charCodeAt(0);
buff[3] = "s".charCodeAt(0);
//version 2
buff32[1] = 2;
//number of sections: 2
buff32[2] = 2;
//id section 1
buff32[3] = 1;
const n8 = this.n32 * 4;
//id section 1 length in 64bytes
const idSection1length = 8 + n8;
const idSection1lengthHex = idSection1length.toString(16);
buff32[4] = parseInt(idSection1lengthHex.slice(0, 8), 16);
buff32[5] = parseInt(idSection1lengthHex.slice(8, 16), 16);
//this.n32
buff32[6] = n8;
//prime number
this.instance.exports.getRawPrime();
var pos = 7;
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
// witness size
buff32[pos] = this.witnessSize;
pos++;
//id section 2
buff32[pos] = 2;
pos++;
// section 2 length
const idSection2length = n8 * this.witnessSize;
const idSection2lengthHex = idSection2length.toString(16);
buff32[pos] = parseInt(idSection2lengthHex.slice(0, 8), 16);
buff32[pos + 1] = parseInt(idSection2lengthHex.slice(8, 16), 16);
pos += 2;
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
}
return buff;
}
}
function toArray32(rem, size) {
const res = []; //new Uint32Array(size); //has no unshift
const radix = BigInt(0x100000000);
while (rem) {
res.unshift(Number(rem % radix));
rem = rem / radix;
}
if (size) {
var i = size - res.length;
while (i > 0) {
res.unshift(0);
i--;
}
}
return res;
}
function fromArray32(arr) {
//returns a BigInt
var res = BigInt(0);
const radix = BigInt(0x100000000);
for (let i = 0; i < arr.length; i++) {
res = res * radix + BigInt(arr[i]);
}
return res;
}
function flatArray(a) {
var res = [];
fillArray(res, a);
return res;
function fillArray(res, a) {
if (Array.isArray(a)) {
for (let i = 0; i < a.length; i++) {
fillArray(res, a[i]);
}
} else {
res.push(a);
}
}
}
function fnvHash(str) {
const uint64_max = BigInt(2) ** BigInt(64);
let hash = BigInt("0xCBF29CE484222325");
for (var i = 0; i < str.length; i++) {
hash ^= BigInt(str[i].charCodeAt());
hash *= BigInt(0x100000001b3);
hash %= uint64_max;
}
let shash = hash.toString(16);
let n = 16 - shash.length;
shash = "0".repeat(n).concat(shash);
return shash;
}

212
rln-wasm/src/lib.rs Normal file
View File

@@ -0,0 +1,212 @@
#![cfg(target_arch = "wasm32")]
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
use num_bigint::BigInt;
use rln::public::RLN;
use std::vec::Vec;
use wasm_bindgen::prelude::*;
#[cfg(feature = "parallel")]
pub use wasm_bindgen_rayon::init_thread_pool;
#[wasm_bindgen(js_name = initPanicHook)]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
#[wasm_bindgen(js_name = RLN)]
pub struct RLNWrapper {
// The purpose of this wrapper is to hold a RLN instance with the 'static lifetime
// because wasm_bindgen does not allow returning elements with lifetimes
instance: RLN,
}
// Macro to call methods with arbitrary amount of arguments,
// which have the last argument is output buffer pointer
// First argument to the macro is context,
// second is the actual method on `RLN`
// third is the aforementioned output buffer argument
// rest are all other arguments to the method
macro_rules! call_with_output_and_error_msg {
// this variant is needed for the case when
// there are zero other arguments
($instance:expr, $method:ident, $error_msg:expr) => {
{
let mut output_data: Vec<u8> = Vec::new();
let new_instance = $instance.process();
if let Err(err) = new_instance.instance.$method(&mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
($instance:expr, $method:ident, $error_msg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
let new_instance = $instance.process();
if let Err(err) = new_instance.instance.$method($($arg.process()),*, &mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
}
macro_rules! call {
($instance:expr, $method:ident $(, $arg:expr)*) => {
{
let new_instance: &mut RLNWrapper = $instance.process();
new_instance.instance.$method($($arg.process()),*)
}
}
}
macro_rules! call_bool_method_with_error_msg {
($instance:expr, $method:ident, $error_msg:expr $(, $arg:expr)*) => {
{
let new_instance: &RLNWrapper = $instance.process();
new_instance.instance.$method($($arg.process()),*).map_err(|err| format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
}
}
}
trait ProcessArg {
type ReturnType;
fn process(self) -> Self::ReturnType;
}
impl ProcessArg for usize {
type ReturnType = usize;
fn process(self) -> Self::ReturnType {
self
}
}
impl<T> ProcessArg for Vec<T> {
type ReturnType = Vec<T>;
fn process(self) -> Self::ReturnType {
self
}
}
impl ProcessArg for *const RLN {
type ReturnType = &'static RLN;
fn process(self) -> Self::ReturnType {
unsafe { &*self }
}
}
impl ProcessArg for *const RLNWrapper {
type ReturnType = &'static RLNWrapper;
fn process(self) -> Self::ReturnType {
unsafe { &*self }
}
}
impl ProcessArg for *mut RLNWrapper {
type ReturnType = &'static mut RLNWrapper;
fn process(self) -> Self::ReturnType {
unsafe { &mut *self }
}
}
impl<'a> ProcessArg for &'a [u8] {
type ReturnType = &'a [u8];
fn process(self) -> Self::ReturnType {
self
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = newRLN)]
pub fn wasm_new(zkey: Uint8Array) -> Result<*mut RLNWrapper, String> {
let instance = RLN::new_with_params(zkey.to_vec()).map_err(|err| format!("{:#?}", err))?;
let wrapper = RLNWrapper { instance };
Ok(Box::into_raw(Box::new(wrapper)))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = rlnWitnessToJson)]
pub fn wasm_rln_witness_to_json(
ctx: *mut RLNWrapper,
serialized_witness: Uint8Array,
) -> Result<Object, String> {
let inputs = call!(
ctx,
get_rln_witness_bigint_json,
&serialized_witness.to_vec()[..]
)
.map_err(|err| err.to_string())?;
let js_value = serde_wasm_bindgen::to_value(&inputs).map_err(|err| err.to_string())?;
Object::from_entries(&js_value).map_err(|err| format!("{:#?}", err))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateRLNProofWithWitness)]
pub fn wasm_generate_rln_proof_with_witness(
ctx: *mut RLNWrapper,
calculated_witness: Vec<JsBigInt>,
serialized_witness: Uint8Array,
) -> Result<Uint8Array, String> {
let mut witness_vec: Vec<BigInt> = vec![];
for v in calculated_witness {
witness_vec.push(
v.to_string(10)
.map_err(|err| format!("{:#?}", err))?
.as_string()
.ok_or("not a string error")?
.parse::<BigInt>()
.map_err(|err| format!("{:#?}", err))?,
);
}
call_with_output_and_error_msg!(
ctx,
generate_rln_proof_with_witness,
"could not generate proof",
witness_vec,
serialized_witness.to_vec()
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = recovedIDSecret)]
pub fn wasm_recover_id_secret(
ctx: *const RLNWrapper,
input_proof_data_1: Uint8Array,
input_proof_data_2: Uint8Array,
) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(
ctx,
recover_id_secret,
"could not recover id secret",
&input_proof_data_1.to_vec()[..],
&input_proof_data_2.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = verifyWithRoots)]
pub fn wasm_verify_with_roots(
ctx: *const RLNWrapper,
proof: Uint8Array,
roots: Uint8Array,
) -> Result<bool, String> {
call_bool_method_with_error_msg!(
ctx,
verify_with_roots,
"error while verifying proof with roots".to_string(),
&proof.to_vec()[..],
&roots.to_vec()[..]
)
}

266
rln-wasm/tests/browser.rs Normal file
View File

@@ -0,0 +1,266 @@
#![cfg(target_arch = "wasm32")]
#[cfg(test)]
mod tests {
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::hashers::{hash_to_field_le, poseidon_hash, PoseidonHash};
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, IdSecret};
use rln_wasm::{
wasm_generate_rln_proof_with_witness, wasm_new, wasm_rln_witness_to_json,
wasm_verify_with_roots,
};
use rln_wasm_utils::wasm_key_gen;
use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
use wasm_bindgen_test::{console_log, wasm_bindgen_test, wasm_bindgen_test_configure};
use zerokit_utils::{
OptimalMerkleProof, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree,
};
#[cfg(feature = "parallel")]
use {rln_wasm::init_thread_pool, wasm_bindgen_futures::JsFuture, web_sys::window};
#[wasm_bindgen(inline_js = r#"
export function isThreadpoolSupported() {
return typeof SharedArrayBuffer !== 'undefined' &&
typeof Atomics !== 'undefined' &&
typeof crossOriginIsolated !== 'undefined' &&
crossOriginIsolated;
}
export function initWitnessCalculator(jsCode) {
const processedCode = jsCode
.replace(/export\s+async\s+function\s+builder/, 'async function builder')
.replace(/export\s*\{\s*builder\s*\};?/g, '');
const moduleFunc = new Function(processedCode + '\nreturn { builder };');
const witnessCalculatorModule = moduleFunc();
window.witnessCalculatorBuilder = witnessCalculatorModule.builder;
if (typeof window.witnessCalculatorBuilder !== 'function') {
return false;
}
return true;
}
export function readFile(data) {
return new Uint8Array(data);
}
export async function calculateWitness(circom_data, inputs) {
const wasmBuffer = circom_data instanceof Uint8Array ? circom_data : new Uint8Array(circom_data);
const witnessCalculator = await window.witnessCalculatorBuilder(wasmBuffer);
const calculatedWitness = await witnessCalculator.calculateWitness(inputs, false);
return JSON.stringify(calculatedWitness, (key, value) =>
typeof value === "bigint" ? value.toString() : value
);
}
"#)]
extern "C" {
#[wasm_bindgen(catch)]
fn isThreadpoolSupported() -> Result<bool, JsValue>;
#[wasm_bindgen(catch)]
fn initWitnessCalculator(js: &str) -> Result<bool, JsValue>;
#[wasm_bindgen(catch)]
fn readFile(data: &[u8]) -> Result<Uint8Array, JsValue>;
#[wasm_bindgen(catch)]
async fn calculateWitness(circom_data: &[u8], inputs: Object) -> Result<JsValue, JsValue>;
}
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator.js");
const ARKZKEY_BYTES: &[u8] =
include_bytes!("../../rln/resources/tree_depth_10/rln_final.arkzkey");
const CIRCOM_BYTES: &[u8] = include_bytes!("../../rln/resources/tree_depth_10/rln.wasm");
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
pub async fn rln_wasm_benchmark() {
// Check if thread pool is supported
#[cfg(feature = "parallel")]
if !isThreadpoolSupported().expect("Failed to check thread pool support") {
panic!("Thread pool is NOT supported");
} else {
// Initialize thread pool
let cpu_count = window()
.expect("Failed to get window")
.navigator()
.hardware_concurrency() as usize;
JsFuture::from(init_thread_pool(cpu_count))
.await
.expect("Failed to initialize thread pool");
}
// Initialize witness calculator
initWitnessCalculator(WITNESS_CALCULATOR_JS)
.expect("Failed to initialize witness calculator");
let mut results = String::from("\nbenchmarks:\n");
let iterations = 10;
let zkey = readFile(&ARKZKEY_BYTES).expect("Failed to read zkey file");
// Benchmark wasm_new
let start_wasm_new = Date::now();
for _ in 0..iterations {
let _ = wasm_new(zkey.clone()).expect("Failed to create RLN instance");
}
let wasm_new_result = Date::now() - start_wasm_new;
// Create RLN instance for other benchmarks
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
let mut tree: OptimalMerkleTree<PoseidonHash> =
OptimalMerkleTree::default(TEST_TREE_DEPTH).expect("Failed to create tree");
// Benchmark wasm_key_gen
let start_wasm_key_gen = Date::now();
for _ in 0..iterations {
let _ = wasm_key_gen(true).expect("Failed to generate keys");
}
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
// Generate identity pair for other benchmarks
let mem_keys = wasm_key_gen(true).expect("Failed to generate keys");
let id_key = mem_keys.subarray(0, 32);
let (identity_secret_hash, _) = IdSecret::from_bytes_le(&id_key.to_vec());
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
let identity_index = tree.leaves_set();
let user_message_limit = Fr::from(100);
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
tree.update_next(rate_commitment)
.expect("Failed to update tree");
let message_id = Fr::from(0);
let signal: [u8; 32] = [0; 32];
let x = hash_to_field_le(&signal);
let merkle_proof: OptimalMerkleProof<PoseidonHash> = tree
.proof(identity_index)
.expect("Failed to generate merkle proof");
let rln_witness = rln_witness_from_values(
identity_secret_hash,
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x,
external_nullifier,
user_message_limit,
message_id,
)
.expect("Failed to create RLN witness");
let serialized_witness =
serialize_witness(&rln_witness).expect("Failed to serialize witness");
let witness_buffer = Uint8Array::from(&serialized_witness[..]);
let json_inputs = wasm_rln_witness_to_json(rln_instance, witness_buffer.clone())
.expect("Failed to convert witness to JSON");
// Benchmark calculateWitness
let start_calculate_witness = Date::now();
for _ in 0..iterations {
let _ = calculateWitness(&CIRCOM_BYTES, json_inputs.clone())
.await
.expect("Failed to calculate witness");
}
let calculate_witness_result = Date::now() - start_calculate_witness;
// Calculate witness for other benchmarks
let calculated_witness_json = calculateWitness(&CIRCOM_BYTES, json_inputs)
.await
.expect("Failed to calculate witness")
.as_string()
.expect("Failed to convert calculated witness to string");
let calculated_witness_vec_str: Vec<String> =
serde_json::from_str(&calculated_witness_json).expect("Failed to parse JSON");
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
.iter()
.map(|x| JsBigInt::new(&x.into()).expect("Failed to create JsBigInt"))
.collect();
// Benchmark wasm_generate_rln_proof_with_witness
let start_wasm_generate_rln_proof_with_witness = Date::now();
for _ in 0..iterations {
let _ = wasm_generate_rln_proof_with_witness(
rln_instance,
calculated_witness.clone(),
witness_buffer.clone(),
)
.expect("Failed to generate proof");
}
let wasm_generate_rln_proof_with_witness_result =
Date::now() - start_wasm_generate_rln_proof_with_witness;
// Generate a proof for other benchmarks
let proof =
wasm_generate_rln_proof_with_witness(rln_instance, calculated_witness, witness_buffer)
.expect("Failed to generate proof");
let proof_data = proof.to_vec();
let verify_input = prepare_verify_input(proof_data, &signal);
let input_buffer = Uint8Array::from(&verify_input[..]);
let root = tree.root();
let roots_serialized = fr_to_bytes_le(&root);
let roots_buffer = Uint8Array::from(&roots_serialized[..]);
// Benchmark wasm_verify_with_roots
let start_wasm_verify_with_roots = Date::now();
for _ in 0..iterations {
let _ =
wasm_verify_with_roots(rln_instance, input_buffer.clone(), roots_buffer.clone())
.expect("Failed to verify proof");
}
let wasm_verify_with_roots_result = Date::now() - start_wasm_verify_with_roots;
// Verify the proof with the root
let is_proof_valid = wasm_verify_with_roots(rln_instance, input_buffer, roots_buffer)
.expect("Failed to verify proof");
assert!(is_proof_valid, "verification failed");
// Format and display results
let format_duration = |duration_ms: f64| -> String {
let avg_ms = duration_ms / (iterations as f64);
if avg_ms >= 1000.0 {
format!("{:.3} s", avg_ms / 1000.0)
} else {
format!("{:.3} ms", avg_ms)
}
};
results.push_str(&format!("wasm_new: {}\n", format_duration(wasm_new_result)));
results.push_str(&format!(
"wasm_key_gen: {}\n",
format_duration(wasm_key_gen_result)
));
results.push_str(&format!(
"calculateWitness: {}\n",
format_duration(calculate_witness_result)
));
results.push_str(&format!(
"wasm_generate_rln_proof_with_witness: {}\n",
format_duration(wasm_generate_rln_proof_with_witness_result)
));
results.push_str(&format!(
"wasm_verify_with_roots: {}\n",
format_duration(wasm_verify_with_roots_result)
));
// Log the results
console_log!("{results}");
}
}

247
rln-wasm/tests/node.rs Normal file
View File

@@ -0,0 +1,247 @@
#![cfg(not(feature = "parallel"))]
#![cfg(target_arch = "wasm32")]
#[cfg(test)]
mod tests {
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::hashers::{hash_to_field_le, poseidon_hash, PoseidonHash};
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, IdSecret};
use rln_wasm::{
wasm_generate_rln_proof_with_witness, wasm_new, wasm_rln_witness_to_json,
wasm_verify_with_roots,
};
use rln_wasm_utils::wasm_key_gen;
use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
use wasm_bindgen_test::{console_log, wasm_bindgen_test};
use zerokit_utils::{
OptimalMerkleProof, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree,
};
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator.js");
#[wasm_bindgen(inline_js = r#"
const fs = require("fs");
let witnessCalculatorModule = null;
module.exports = {
initWitnessCalculator: function(code) {
const processedCode = code
.replace(/export\s+async\s+function\s+builder/, 'async function builder')
.replace(/export\s*\{\s*builder\s*\};?/g, '');
const moduleFunc = new Function(processedCode + '\nreturn { builder };');
witnessCalculatorModule = moduleFunc();
if (typeof witnessCalculatorModule.builder !== 'function') {
return false;
}
return true;
},
readFile: function (path) {
return fs.readFileSync(path);
},
calculateWitness: async function (circom_path, inputs) {
const wasmFile = fs.readFileSync(circom_path);
const wasmFileBuffer = wasmFile.slice(
wasmFile.byteOffset,
wasmFile.byteOffset + wasmFile.byteLength
);
const witnessCalculator = await witnessCalculatorModule.builder(wasmFileBuffer);
const calculatedWitness = await witnessCalculator.calculateWitness(
inputs,
false
);
return JSON.stringify(calculatedWitness, (key, value) =>
typeof value === "bigint" ? value.toString() : value
);
},
};
"#)]
extern "C" {
#[wasm_bindgen(catch)]
fn initWitnessCalculator(code: &str) -> Result<bool, JsValue>;
#[wasm_bindgen(catch)]
fn readFile(path: &str) -> Result<Uint8Array, JsValue>;
#[wasm_bindgen(catch)]
async fn calculateWitness(circom_path: &str, input: Object) -> Result<JsValue, JsValue>;
}
const ARKZKEY_PATH: &str = "../rln/resources/tree_depth_10/rln_final.arkzkey";
const CIRCOM_PATH: &str = "../rln/resources/tree_depth_10/rln.wasm";
#[wasm_bindgen_test]
pub async fn rln_wasm_benchmark() {
// Initialize witness calculator
initWitnessCalculator(WITNESS_CALCULATOR_JS)
.expect("Failed to initialize witness calculator");
let mut results = String::from("\nbenchmarks:\n");
let iterations = 10;
let zkey = readFile(&ARKZKEY_PATH).expect("Failed to read zkey file");
// Benchmark wasm_new
let start_wasm_new = Date::now();
for _ in 0..iterations {
let _ = wasm_new(zkey.clone()).expect("Failed to create RLN instance");
}
let wasm_new_result = Date::now() - start_wasm_new;
// Create RLN instance for other benchmarks
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
let mut tree: OptimalMerkleTree<PoseidonHash> =
OptimalMerkleTree::default(TEST_TREE_DEPTH).expect("Failed to create tree");
// Benchmark wasm_key_gen
let start_wasm_key_gen = Date::now();
for _ in 0..iterations {
let _ = wasm_key_gen(true).expect("Failed to generate keys");
}
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
// Generate identity pair for other benchmarks
let mem_keys = wasm_key_gen(true).expect("Failed to generate keys");
let id_key = mem_keys.subarray(0, 32);
let (identity_secret_hash, _) = IdSecret::from_bytes_le(&id_key.to_vec());
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
let identity_index = tree.leaves_set();
let user_message_limit = Fr::from(100);
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
tree.update_next(rate_commitment)
.expect("Failed to update tree");
let message_id = Fr::from(0);
let signal: [u8; 32] = [0; 32];
let x = hash_to_field_le(&signal);
let merkle_proof: OptimalMerkleProof<PoseidonHash> = tree
.proof(identity_index)
.expect("Failed to generate merkle proof");
let rln_witness = rln_witness_from_values(
identity_secret_hash,
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x,
external_nullifier,
user_message_limit,
message_id,
)
.expect("Failed to create RLN witness");
let serialized_witness =
serialize_witness(&rln_witness).expect("Failed to serialize witness");
let witness_buffer = Uint8Array::from(&serialized_witness[..]);
let json_inputs = wasm_rln_witness_to_json(rln_instance, witness_buffer.clone())
.expect("Failed to convert witness to JSON");
// Benchmark calculateWitness
let start_calculate_witness = Date::now();
for _ in 0..iterations {
let _ = calculateWitness(&CIRCOM_PATH, json_inputs.clone())
.await
.expect("Failed to calculate witness");
}
let calculate_witness_result = Date::now() - start_calculate_witness;
// Calculate witness for other benchmarks
let calculated_witness_json = calculateWitness(&CIRCOM_PATH, json_inputs)
.await
.expect("Failed to calculate witness")
.as_string()
.expect("Failed to convert calculated witness to string");
let calculated_witness_vec_str: Vec<String> =
serde_json::from_str(&calculated_witness_json).expect("Failed to parse JSON");
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
.iter()
.map(|x| JsBigInt::new(&x.into()).expect("Failed to create JsBigInt"))
.collect();
// Benchmark wasm_generate_rln_proof_with_witness
let start_wasm_generate_rln_proof_with_witness = Date::now();
for _ in 0..iterations {
let _ = wasm_generate_rln_proof_with_witness(
rln_instance,
calculated_witness.clone(),
witness_buffer.clone(),
)
.expect("Failed to generate proof");
}
let wasm_generate_rln_proof_with_witness_result =
Date::now() - start_wasm_generate_rln_proof_with_witness;
// Generate a proof for other benchmarks
let proof =
wasm_generate_rln_proof_with_witness(rln_instance, calculated_witness, witness_buffer)
.expect("Failed to generate proof");
let proof_data = proof.to_vec();
let verify_input = prepare_verify_input(proof_data, &signal);
let input_buffer = Uint8Array::from(&verify_input[..]);
let root = tree.root();
let roots_serialized = fr_to_bytes_le(&root);
let roots_buffer = Uint8Array::from(&roots_serialized[..]);
// Benchmark wasm_verify_with_roots
let start_wasm_verify_with_roots = Date::now();
for _ in 0..iterations {
let _ =
wasm_verify_with_roots(rln_instance, input_buffer.clone(), roots_buffer.clone())
.expect("Failed to verify proof");
}
let wasm_verify_with_roots_result = Date::now() - start_wasm_verify_with_roots;
// Verify the proof with the root
let is_proof_valid = wasm_verify_with_roots(rln_instance, input_buffer, roots_buffer)
.expect("Failed to verify proof");
assert!(is_proof_valid, "verification failed");
// Format and display results
let format_duration = |duration_ms: f64| -> String {
let avg_ms = duration_ms / (iterations as f64);
if avg_ms >= 1000.0 {
format!("{:.3} s", avg_ms / 1000.0)
} else {
format!("{:.3} ms", avg_ms)
}
};
results.push_str(&format!("wasm_new: {}\n", format_duration(wasm_new_result)));
results.push_str(&format!(
"wasm_key_gen: {}\n",
format_duration(wasm_key_gen_result)
));
results.push_str(&format!(
"calculate_witness: {}\n",
format_duration(calculate_witness_result)
));
results.push_str(&format!(
"wasm_generate_rln_proof_with_witness: {}\n",
format_duration(wasm_generate_rln_proof_with_witness_result)
));
results.push_str(&format!(
"wasm_verify_with_roots: {}\n",
format_duration(wasm_verify_with_roots_result)
));
// Log the results
console_log!("{results}");
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "rln"
version = "0.7.0"
version = "0.8.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "APIs to manage, compute and verify zkSNARK proofs and RLN primitives"
@@ -9,7 +9,7 @@ homepage = "https://vac.dev"
repository = "https://github.com/vacp2p/zerokit"
[lib]
crate-type = ["rlib", "staticlib"]
crate-type = ["rlib", "staticlib", "cdylib"]
bench = false
# This flag disable cargo doctests, i.e. testing example code-snippets in documentation
@@ -18,77 +18,64 @@ doctest = false
[dependencies]
# ZKP Generation
ark-bn254 = { version = "0.5.0", features = ["std"] }
ark-ff = { version = "0.5.0", features = ["std", "asm"] }
ark-serialize = { version = "0.5.0", features = ["derive"] }
ark-relations = { version = "0.5.1", features = ["std"] }
ark-ff = { version = "0.5.0", default-features = false }
ark-ec = { version = "0.5.0", default-features = false }
ark-std = { version = "0.5.0", default-features = false }
ark-groth16 = { version = "0.5.0", features = [
"parallel",
], default-features = false }
ark-relations = { version = "0.5.0", default-features = false, features = [
"std",
] }
ark-circom = { version = "0.5.0" }
ark-r1cs-std = { version = "0.5.0" }
ark-poly = { version = "0.5.0", default-features = false }
ark-groth16 = { version = "0.5.0", default-features = false }
ark-serialize = { version = "0.5.0", default-features = false }
# error handling
color-eyre = "0.6.2"
thiserror = "2.0.11"
thiserror = "2.0.12"
# utilities
byteorder = "1.4.3"
rayon = { version = "1.10.0", optional = true }
byteorder = "1.5.0"
cfg-if = "1.0"
num-bigint = { version = "0.4.6", default-features = false, features = [
"rand",
] }
num-bigint = { version = "0.4.6", default-features = false, features = ["std"] }
num-traits = "0.2.19"
once_cell = "1.19.0"
lazy_static = "1.4.0"
once_cell = "1.21.3"
lazy_static = "1.5.0"
rand = "0.8.5"
rand_chacha = "0.3.1"
ruint = { version = "1.12.4", features = ["rand", "serde", "ark-ff-04"] }
ruint = { version = "1.15.0", features = ["rand", "serde", "ark-ff-04"] }
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
utils = { package = "zerokit_utils", version = "0.5.2", path = "../utils/", default-features = false }
zeroize = "1.8"
tempfile = "3.21.0"
utils = { package = "zerokit_utils", version = "0.6.0", path = "../utils", default-features = false }
# serialization
prost = "0.13.1"
serde_json = "1.0"
prost = "0.14.1"
serde_json = "1.0.141"
serde = { version = "1.0", features = ["derive"] }
document-features = { version = "=0.2.10", optional = true }
document-features = { version = "0.2.11", optional = true }
[dev-dependencies]
sled = "0.34.7"
criterion = { version = "0.4.0", features = ["html_reports"] }
criterion = { version = "0.7.0", features = ["html_reports"] }
[features]
default = ["parallel", "pmtree-ft"]
parallel = [
"ark-ec/parallel",
"ark-ff/parallel",
"ark-std/parallel",
"ark-groth16/parallel",
"utils/parallel",
]
fullmerkletree = ["default"]
arkzkey = []
stateless = []
# Note: pmtree feature is still experimental
pmtree-ft = ["utils/pmtree-ft"]
parallel = [
"rayon",
"utils/parallel",
"ark-ff/parallel",
"ark-ec/parallel",
"ark-std/parallel",
"ark-poly/parallel",
"ark-groth16/parallel",
"ark-serialize/parallel",
]
fullmerkletree = [] # Pre-allocated tree, fastest access
optimalmerkletree = [] # Sparse storage, memory efficient
pmtree-ft = ["utils/pmtree-ft"] # Persistent storage, disk-based
[[bench]]
name = "pmtree_benchmark"
harness = false
[[bench]]
name = "circuit_loading_benchmark"
harness = false
[[bench]]
name = "circuit_loading_arkzkey_benchmark"
harness = false
required-features = ["arkzkey"]
required-features = ["pmtree-ft"]
[[bench]]
name = "poseidon_tree_benchmark"

View File

@@ -8,11 +8,7 @@ args = ["test", "--release", "--", "--nocapture"]
[tasks.test_stateless]
command = "cargo"
args = ["test", "--release", "--features", "stateless"]
[tasks.test_arkzkey]
command = "cargo"
args = ["test", "--release", "--features", "arkzkey"]
args = ["test", "--release", "--no-default-features", "--features", "stateless"]
[tasks.bench]
command = "cargo"

View File

@@ -1,8 +1,12 @@
# Zerokit RLN Module
[![Crates.io](https://img.shields.io/crates/v/rln.svg)](https://crates.io/crates/rln)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
The Zerokit RLN Module provides a Rust implementation for working with Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and primitives. This module allows you to:
The Zerokit RLN Module provides a Rust implementation for working with
Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and primitives.
This module allows you to:
- Generate and verify RLN proofs
- Work with Merkle trees for commitment storage
@@ -11,7 +15,8 @@ The Zerokit RLN Module provides a Rust implementation for working with Rate-Limi
## Quick Start
> [!IMPORTANT]
> Version 0.6.1 is required for WASM support or x32 architecture. Current version doesn't support these platforms due to dependency issues. WASM support will return in a future release.
> Version 0.7.0 is the only version that does not support WASM and x32 architecture.
> WASM support is available in version 0.8.0 and above.
### Add RLN as dependency
@@ -24,9 +29,15 @@ rln = { git = "https://github.com/vacp2p/zerokit" }
## Basic Usage Example
Note that we need to pass to RLN object constructor the path where the graph file (`graph.bin`, built for the input tree size), the corresponding proving key (`rln_final.zkey`) or (`rln_final_uncompr.arkzkey`) and verification key (`verification_key.arkvkey`, optional) are found.
The RLN object constructor requires the following files:
In the following we will use [cursors](https://doc.rust-lang.org/std/io/struct.Cursor.html) as readers/writers for interfacing with RLN public APIs.
- `rln_final.arkzkey`: The proving key in arkzkey format.
- `graph.bin`: The graph file built for the input tree size
Additionally, `rln.wasm` is used for testing in the rln-wasm module.
In the following we will use [cursors](https://doc.rust-lang.org/std/io/struct.Cursor.html)
as readers/writers for interfacing with RLN public APIs.
```rust
use std::io::Cursor;
@@ -34,27 +45,27 @@ use std::io::Cursor;
use rln::{
circuit::Fr,
hashers::{hash_to_field, poseidon_hash},
protocol::{keygen, prepare_verify_input},
protocol::{keygen, prepare_prove_input, prepare_verify_input},
public::RLN,
utils::{fr_to_bytes_le, normalize_usize},
utils::fr_to_bytes_le,
};
use serde_json::json;
fn main() {
// 1. Initialize RLN with parameters:
// - the tree height;
// - the tree depth;
// - the tree config, if it is not defined, the default value will be set
let tree_height = 20;
let tree_depth = 10;
let input = Cursor::new(json!({}).to_string());
let mut rln = RLN::new(tree_height, input).unwrap();
let mut rln = RLN::new(tree_depth, input).unwrap();
// 2. Generate an identity keypair
let (identity_secret_hash, id_commitment) = keygen();
// 3. Add a rate commitment to the Merkle tree
let id_index = 10;
let user_message_limit = 10;
let rate_commitment = poseidon_hash(&[id_commitment, Fr::from(user_message_limit)]);
let user_message_limit = Fr::from(10);
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
rln.set_leaf(id_index, &mut buffer).unwrap();
@@ -65,35 +76,41 @@ fn main() {
// We generate rln_identifier from a date seed and we ensure is
// mapped to a field element by hashing-to-field its content
let rln_identifier = hash_to_field(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < user_message_limit
let message_id = Fr::from(1);
// 5. Generate and verify a proof for a message
let signal = b"RLN is awesome";
// 6. Prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | external_nullifier<32> | user_message_limit<32> | message_id<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut normalize_usize(id_index));
serialized.append(&mut fr_to_bytes_le(&Fr::from(user_message_limit)));
serialized.append(&mut fr_to_bytes_le(&Fr::from(1)));
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
// input_data is [ identity_secret<32> | id_index<8> | external_nullifier<32>
// | user_message_limit<32> | message_id<32> | signal_len<8> | signal<var> ]
let prove_input = prepare_prove_input(
identity_secret_hash,
id_index,
user_message_limit,
message_id,
external_nullifier,
signal,
);
// 7. Generate a RLN proof
// We generate a RLN proof for proof_input
let mut input_buffer = Cursor::new(serialized);
let mut input_buffer = Cursor::new(prove_input);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
.unwrap();
// We get the public outputs returned by the circuit evaluation
// The byte vector `proof_data` is serialized as `[ zk-proof | tree_root | external_nullifier | share_x | share_y | nullifier ]`.
// The byte vector `proof_data` is serialized as
// `[ zk-proof | tree_root | external_nullifier | share_x | share_y | nullifier ]`.
let proof_data = output_buffer.into_inner();
// 8. Verify a RLN proof
// Input buffer is serialized as `[proof_data | signal_len | signal ]`, where `proof_data` is (computed as) the output obtained by `generate_rln_proof`.
// Input buffer is serialized as `[proof_data | signal_len | signal ]`,
// where `proof_data` is (computed as) the output obtained by `generate_rln_proof`.
let verify_data = prepare_verify_input(proof_data, signal);
// We verify the zk-proof against the provided proof values
@@ -110,16 +127,24 @@ fn main() {
The `external nullifier` includes two parameters.
The first one is `epoch` and it's used to identify messages received in a certain time frame.
It usually corresponds to the current UNIX time but can also be set to a random value or generated by a seed, provided that it corresponds to a field element.
It usually corresponds to the current UNIX time but can also be set to a random value or generated by a seed,
provided that it corresponds to a field element.
The second one is `rln_identifier` and it's used to prevent a RLN ZK proof generated for one application to be re-used in another one.
The second one is `rln_identifier` and it's used to prevent a RLN ZK proof generated
for one application to be re-used in another one.
### Features
- **Multiple Backend Support**: Choose between different zkey formats with feature flags
- `arkzkey`: Use the optimized Arkworks-compatible zkey format (faster loading)
- `stateless`: For stateless proof verification
- **Pre-compiled Circuits**: Ready-to-use circuits with Merkle tree height of 20
- **Stateless Mode**: Allows the use of RLN without maintaining state of the Merkle tree.
- **Pre-compiled Circuits**: Ready-to-use circuits with Merkle tree depth of 20
- **Wasm Support**: WebAssembly bindings via rln-wasm crate with features like:
- Browser and Node.js compatibility
- Optional parallel feature support using [wasm-bindgen-rayon](https://github.com/RReverser/wasm-bindgen-rayon)
- Headless browser testing capabilities
- **Merkle Tree Implementations**: Multiple tree variants optimized for different use cases:
- **Full Merkle Tree**: Fastest access with complete pre-allocated tree in memory. Best for frequent random access (enable with `fullmerkletree` feature).
- **Optimal Merkle Tree**: Memory-efficient sparse storage using HashMap. Ideal for partially populated trees (enable with `optimalmerkletree` feature).
- **Persistent Merkle Tree**: Disk-based storage with [sled](https://github.com/spacejam/sled) for persistence across application restarts and large datasets (enable with `pmtree-ft` feature).
## Building and Testing
@@ -140,20 +165,22 @@ cargo make build
# Test with default features
cargo make test
# Test with specific features
cargo make test_arkzkey # For arkzkey feature
cargo make test_stateless # For stateless feature
# Test with stateless features
cargo make test_stateless
```
## Advanced: Custom Circuit Compilation
The `rln` (<https://github.com/rate-limiting-nullifier/circom-rln>) repository, which contains the RLN circuit implementation is using for pre-compiled RLN circuit for zerokit RLN.
The `rln` (<https://github.com/rate-limiting-nullifier/circom-rln>) repository,
which contains the RLN circuit implementation is using for pre-compiled RLN circuit for zerokit RLN.
If you want to compile your own RLN circuit, you can follow the instructions below.
### 1. Compile ZK Circuits for getting the zkey and verification key files
### 1. Compile ZK Circuits for getting the zkey file
This script actually generates not only the zkey and verification key files for the RLN circuit, but also the execution wasm file used for witness calculation.
However, the wasm file is not needed for the `rln` module, because current implementation uses the iden3 graph file for witness calculation.
This script actually generates not only the zkey file for the RLN circuit,
but also the execution wasm file used for witness calculation.
However, the wasm file is not needed for the `rln` module,
because current implementation uses the iden3 graph file for witness calculation.
This graph file is generated by the `circom-witnesscalc` tool in [step 2](#2-generate-witness-calculation-graph).
To customize the circuit parameters, modify `circom-rln/circuits/rln.circom`:
@@ -166,19 +193,27 @@ component main { public [x, externalNullifier] } = RLN(N, M);
Where:
- `N`: Merkle tree height, determining the maximum membership capacity (2^N members).
- `N`: Merkle tree depth, determining the maximum membership capacity (2^N members).
- `M`: Bit size for range checks, setting an upper bound for the number of messages per epoch (2^M messages).
> [!NOTE]
> However, if `N` is too big, this might require a larger Powers of Tau ceremony than the one hardcoded in `./scripts/build-circuits.sh`, which is `2^14`. \
> In such case, we refer to the official [Circom documentation](https://docs.circom.io/getting-started/proving-circuits/#powers-of-tau) for instructions on how to run an appropriate Powers of Tau ceremony and Phase 2 in order to compile the desired circuit. \
> Additionally, while `M` sets an upper bound on the number of messages per epoch (`2^M`), you can configure lower message limit for your use case, as long as it satisfies `user_message_limit ≤ 2^M`. \
> Currently, the `rln` module comes with a [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) RLN circuit with a Merkle tree of height `20` and a bit size of `16`, allowing up to `2^20` registered members and a `2^16` message limit per epoch.
> However, if `N` is too big, this might require a larger Powers of Tau ceremony
> than the one hardcoded in `./scripts/build-circuits.sh`, which is `2^14`.
> In such case, we refer to the official
> [Circom documentation](https://docs.circom.io/getting-started/proving-circuits/#powers-of-tau)
> for instructions on how to run an appropriate Powers of Tau ceremony and Phase 2 in order to compile the desired circuit. \
> Additionally, while `M` sets an upper bound on the number of messages per epoch (`2^M`),
> you can configure lower message limit for your use case, as long as it satisfies `user_message_limit ≤ 2^M`. \
> Currently, the `rln` module comes with a [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources)
> RLN circuit with a Merkle tree of depth `20` and a bit size of `16`,
> allowing up to `2^20` registered members and a `2^16` message limit per epoch.
#### Install circom compiler
You can follow the instructions below or refer to the [installing Circom](https://docs.circom.io/getting-started/installation/#installing-circom) guide for more details, but make sure to use the specific version `v2.1.0`.
You can follow the instructions below or refer to the
[installing Circom](https://docs.circom.io/getting-started/installation/#installing-circom) guide for more details,
but make sure to use the specific version `v2.1.0`.
```sh
# Clone the circom repository
@@ -215,7 +250,8 @@ cp zkeyFiles/rln/final.zkey <path_to_rln_final.zkey>
### 2. Generate Witness Calculation Graph
The execution graph file used for witness calculation can be compiled following instructions in the [circom-witnesscalc](https://github.com/iden3/circom-witnesscalc) repository.
The execution graph file used for witness calculation can be compiled following instructions
in the [circom-witnesscalc](https://github.com/iden3/circom-witnesscalc) repository.
As mentioned in step 1, we should use `rln.circom` file from `circom-rln` repository.
```sh
@@ -232,11 +268,14 @@ cargo build
cargo run --package circom_witnesscalc --bin build-circuit ../circom-rln/circuits/rln.circom <path_to_graph.bin>
```
The `rln` module comes with [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) execution graph files for the RLN circuit.
The `rln` module comes with [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources)
execution graph files for the RLN circuit.
### 3. Generate Arkzkey Representation for zkey and verification key files
### 3. Generate Arkzkey Representation for zkey file
For faster loading, compile the zkey file into the arkzkey format using [ark-zkey](https://github.com/seemenkina/ark-zkey). This is fork of the [original](https://github.com/zkmopro/ark-zkey) repository with the uncompressed zkey support.
For faster loading, compile the zkey file into the arkzkey format using
[ark-zkey](https://github.com/seemenkina/ark-zkey).
This is fork of the [original](https://github.com/zkmopro/ark-zkey) repository with the uncompressed arkzkey support.
```sh
# Clone the ark-zkey repository
@@ -249,7 +288,21 @@ cd ark-zkey && cargo build
cargo run --bin arkzkey-util <path_to_rln_final.zkey>
```
Currently, the `rln` module comes with [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) arkzkey keys for the RLN circuit.
This will generate the `rln_final.arkzkey` file, which is used by the `rln` module.
Currently, the `rln` module comes with
[pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) arkzkey keys for the RLN circuit.
> [!NOTE]
> You can use this [convert_zkey.sh](./convert_zkey.sh) script
> to automate the process of generating the arkzkey file from any zkey file
Run the script as follows:
```sh
chmod +x ./convert_zkey.sh
./convert_zkey.sh <path_to_rln_final.zkey>
```
## Get involved

View File

@@ -1,25 +0,0 @@
use criterion::{criterion_group, criterion_main, Criterion};
use rln::circuit::{read_arkzkey_from_bytes_uncompressed, ARKZKEY_BYTES};
pub fn uncompressed_bench(c: &mut Criterion) {
let arkzkey = ARKZKEY_BYTES.to_vec();
let size = arkzkey.len() as f32;
println!(
"Size of uncompressed arkzkey: {:.2?} MB",
size / 1024.0 / 1024.0
);
c.bench_function("arkzkey::arkzkey_from_raw_uncompressed", |b| {
b.iter(|| {
let r = read_arkzkey_from_bytes_uncompressed(&arkzkey);
assert_eq!(r.is_ok(), true);
})
});
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = uncompressed_bench
}
criterion_main!(benches);

View File

@@ -1,24 +0,0 @@
use ark_circom::read_zkey;
use criterion::{criterion_group, criterion_main, Criterion};
use std::io::Cursor;
pub fn zkey_load_benchmark(c: &mut Criterion) {
let zkey = rln::circuit::ZKEY_BYTES.to_vec();
let size = zkey.len() as f32;
println!("Size of zkey: {:.2?} MB", size / 1024.0 / 1024.0);
c.bench_function("zkey::zkey_from_raw", |b| {
b.iter(|| {
let mut reader = Cursor::new(zkey.clone());
let r = read_zkey(&mut reader);
assert_eq!(r.is_ok(), true);
})
});
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = zkey_load_benchmark
}
criterion_main!(benches);

View File

@@ -5,7 +5,7 @@ use utils::ZerokitMerkleTree;
pub fn pmtree_benchmark(c: &mut Criterion) {
let mut tree = PmTree::default(2).unwrap();
let leaves: Vec<Fr> = (0..4).map(|s| Fr::from(s)).collect();
let leaves: Vec<Fr> = (0..4).map(Fr::from).collect();
c.bench_function("Pmtree::set", |b| {
b.iter(|| {
@@ -21,17 +21,11 @@ pub fn pmtree_benchmark(c: &mut Criterion) {
c.bench_function("Pmtree::override_range", |b| {
b.iter(|| {
tree.override_range(0, leaves.clone(), [0, 1, 2, 3])
tree.override_range(0, leaves.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
})
});
c.bench_function("Pmtree::compute_root", |b| {
b.iter(|| {
tree.compute_root().unwrap();
})
});
c.bench_function("Pmtree::get", |b| {
b.iter(|| {
tree.get(0).unwrap();

View File

@@ -1,18 +1,18 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use rln::{
circuit::{Fr, TEST_TREE_HEIGHT},
circuit::{Fr, TEST_TREE_DEPTH},
hashers::PoseidonHash,
};
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleTree};
pub fn get_leaves(n: u32) -> Vec<Fr> {
(0..n).map(|s| Fr::from(s)).collect()
(0..n).map(Fr::from).collect()
}
pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
c.bench_function("OptimalMerkleTree::<Poseidon>::full_height_gen", |b| {
c.bench_function("OptimalMerkleTree::<Poseidon>::full_depth_gen", |b| {
b.iter(|| {
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
})
});
@@ -20,7 +20,7 @@ pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
for &n in [1u32, 10, 100].iter() {
let leaves = get_leaves(n);
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
group.bench_function(
BenchmarkId::new("OptimalMerkleTree::<Poseidon>::set", n),
|b| {
@@ -41,9 +41,9 @@ pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
}
pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
c.bench_function("FullMerkleTree::<Poseidon>::full_height_gen", |b| {
c.bench_function("FullMerkleTree::<Poseidon>::full_depth_gen", |b| {
b.iter(|| {
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
})
});
@@ -51,7 +51,7 @@ pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
for &n in [1u32, 10, 100].iter() {
let leaves = get_leaves(n);
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
group.bench_function(
BenchmarkId::new("FullMerkleTree::<Poseidon>::set", n),
|b| {

53
rln/convert_zkey.sh Executable file
View File

@@ -0,0 +1,53 @@
#!/bin/bash
# Convert zkey to arkzkey using /tmp directory
# Usage: ./convert.sh <path_to_zkey_file>
set -e
# Check input
if [ $# -eq 0 ]; then
echo "Usage: $0 <path_to_zkey_file>"
exit 1
fi
ZKEY_FILE="$1"
if [ ! -f "$ZKEY_FILE" ]; then
echo "Error: File '$ZKEY_FILE' does not exist"
exit 1
fi
# Get absolute path before changing directories
ZKEY_ABSOLUTE_PATH=$(realpath "$ZKEY_FILE")
# Create temp directory in /tmp
TEMP_DIR="/tmp/ark-zkey-$$"
echo "Using temp directory: $TEMP_DIR"
# Cleanup function
cleanup() {
echo "Cleaning up temp directory: $TEMP_DIR"
rm -rf "$TEMP_DIR"
}
# Setup cleanup trap
trap cleanup EXIT
# Create temp directory and clone ark-zkey
mkdir -p "$TEMP_DIR"
cd "$TEMP_DIR"
git clone https://github.com/seemenkina/ark-zkey.git
cd ark-zkey
cargo build
# Convert
cargo run --bin arkzkey-util "$ZKEY_ABSOLUTE_PATH"
# Check if arkzkey file was created (tool creates it in same directory as input)
ARKZKEY_FILE="${ZKEY_ABSOLUTE_PATH%.zkey}.arkzkey"
if [ ! -f "$ARKZKEY_FILE" ]; then
echo "Could not find generated .arkzkey file at $ARKZKEY_FILE"
exit 1
fi

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

7
rln/src/circuit/error.rs Normal file
View File

@@ -0,0 +1,7 @@
#[derive(Debug, thiserror::Error)]
pub enum ZKeyReadError {
#[error("No proving key found!")]
EmptyBytes,
#[error("{0}")]
SerializationError(#[from] ark_serialize::SerializationError),
}

View File

@@ -8,19 +8,33 @@ pub mod storage;
use ruint::aliases::U256;
use std::collections::HashMap;
use storage::deserialize_witnesscalc_graph;
use zeroize::zeroize_flat_type;
use crate::circuit::iden3calc::graph::fr_to_u256;
use crate::circuit::Fr;
use graph::{fr_to_u256, Node};
use crate::utils::FrOrSecret;
use graph::Node;
pub type InputSignalsInfo = HashMap<String, (usize, usize)>;
pub fn calc_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
pub fn calc_witness<I: IntoIterator<Item = (String, Vec<FrOrSecret>)>>(
inputs: I,
graph_data: &[u8],
) -> Vec<Fr> {
let inputs: HashMap<String, Vec<U256>> = inputs
let mut inputs: HashMap<String, Vec<U256>> = inputs
.into_iter()
.map(|(key, value)| (key, value.iter().map(fr_to_u256).collect()))
.map(|(key, value)| {
(
key,
value
.iter()
.map(|f_| match f_ {
FrOrSecret::IdSecret(s) => s.to_u256(),
FrOrSecret::Fr(f) => fr_to_u256(f),
})
.collect(),
)
})
.collect();
let (nodes, signals, input_mapping): (Vec<Node>, Vec<usize>, InputSignalsInfo) =
@@ -28,8 +42,15 @@ pub fn calc_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
let mut inputs_buffer = get_inputs_buffer(get_inputs_size(&nodes));
populate_inputs(&inputs, &input_mapping, &mut inputs_buffer);
graph::evaluate(&nodes, inputs_buffer.as_slice(), &signals)
if let Some(v) = inputs.get_mut("identitySecret") {
// ~== v[0] = U256::ZERO;
unsafe { zeroize_flat_type(v) };
}
let res = graph::evaluate(&nodes, inputs_buffer.as_slice(), &signals);
inputs_buffer.iter_mut().for_each(|i| {
unsafe { zeroize_flat_type(i) };
});
res
}
fn get_inputs_size(nodes: &[Node]) -> usize {
@@ -56,7 +77,7 @@ fn populate_inputs(
for (key, value) in input_list {
let (offset, len) = inputs_info[key];
if len != value.len() {
panic!("Invalid input length for {}", key);
panic!("Invalid input length for {key}");
}
for (i, v) in value.iter().enumerate() {

View File

@@ -10,11 +10,11 @@ use std::{
cmp::Ordering,
collections::HashMap,
error::Error,
ops::{BitAnd, BitOr, BitXor, Deref, Shl, Shr},
ops::{Deref, Shl, Shr},
};
use crate::circuit::iden3calc::proto;
use crate::circuit::Fr;
use crate::iden3calc::proto;
pub const M: U256 =
uint!(21888242871839275222246405745257275088548364400416034343698204186575808495617_U256);
@@ -944,14 +944,14 @@ mod tests {
let x = M.div(uint!(2_U256));
println!("x: {:?}", x.as_limbs());
println!("x: {}", M);
println!("x: {M}");
}
#[test]
fn test_2() {
let nodes: Vec<Node> = vec![];
// let node = nodes[0];
let node = nodes.get(0);
println!("{:?}", node);
let node = nodes.first();
println!("{node:?}");
}
}

View File

@@ -7,7 +7,7 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use prost::Message;
use std::io::{Read, Write};
use crate::iden3calc::{
use crate::circuit::iden3calc::{
graph,
graph::{Operation, TresOperation, UnoOperation},
proto, InputSignalsInfo,
@@ -419,13 +419,13 @@ mod tests {
let mut r = WriteBackReader::new(std::io::Cursor::new(&data));
let buf = &mut [0u8; 5];
r.read(buf).unwrap();
r.read_exact(buf).unwrap();
assert_eq!(buf, &[1, 2, 3, 4, 5]);
// return [4, 5] to reader
r.write(&buf[3..]).unwrap();
r.write_all(&buf[3..]).unwrap();
// return [2, 3] to reader
r.write(&buf[1..3]).unwrap();
r.write_all(&buf[1..3]).unwrap();
buf.fill(0);

View File

@@ -1,46 +1,35 @@
// This crate provides interfaces for the zero-knowledge circuit and keys
pub mod error;
pub mod iden3calc;
pub mod qap;
use ::lazy_static::lazy_static;
use ark_bn254::{
Bn254, Fq as ArkFq, Fq2 as ArkFq2, Fr as ArkFr, G1Affine as ArkG1Affine,
G1Projective as ArkG1Projective, G2Affine as ArkG2Affine, G2Projective as ArkG2Projective,
};
use ark_groth16::{ProvingKey, VerifyingKey};
use ark_groth16::ProvingKey;
use ark_relations::r1cs::ConstraintMatrices;
use cfg_if::cfg_if;
use color_eyre::{Report, Result};
use crate::iden3calc::calc_witness;
use crate::circuit::error::ZKeyReadError;
use crate::circuit::iden3calc::calc_witness;
#[cfg(feature = "arkzkey")]
use {
ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::CanonicalSerialize,
color_eyre::eyre::WrapErr,
};
use {ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::CanonicalSerialize};
#[cfg(not(feature = "arkzkey"))]
use {ark_circom::read_zkey, std::io::Cursor};
use crate::utils::FrOrSecret;
#[cfg(feature = "arkzkey")]
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../resources/tree_height_20/rln_final.arkzkey");
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_depth_10/rln_final.arkzkey");
pub const ZKEY_BYTES: &[u8] = include_bytes!("../resources/tree_height_20/rln_final.zkey");
const GRAPH_BYTES: &[u8] = include_bytes!("../resources/tree_height_20/graph.bin");
#[cfg(not(target_arch = "wasm32"))]
const GRAPH_BYTES: &[u8] = include_bytes!("../../resources/tree_depth_10/graph.bin");
lazy_static! {
static ref ZKEY: (ProvingKey<Curve>, ConstraintMatrices<Fr>) = {
cfg_if! {
if #[cfg(feature = "arkzkey")] {
read_arkzkey_from_bytes_uncompressed(ARKZKEY_BYTES).expect("Failed to read arkzkey")
} else {
let mut reader = Cursor::new(ZKEY_BYTES);
read_zkey(&mut reader).expect("Failed to read zkey")
}
}
};
static ref ARKZKEY: (ProvingKey<Curve>, ConstraintMatrices<Fr>) =
read_arkzkey_from_bytes_uncompressed(ARKZKEY_BYTES).expect("Failed to read arkzkey");
}
pub const TEST_TREE_HEIGHT: usize = 20;
pub const TEST_TREE_DEPTH: usize = 10;
// The following types define the pairing friendly elliptic curve, the underlying finite fields and groups default to this module
// Note that proofs are serialized assuming Fr to be 4x8 = 32 bytes in size. Hence, changing to a curve with different encoding will make proof verification to fail
@@ -54,56 +43,32 @@ pub type G2Affine = ArkG2Affine;
pub type G2Projective = ArkG2Projective;
// Loads the proving key using a bytes vector
pub fn zkey_from_raw(zkey_data: &[u8]) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
pub fn zkey_from_raw(
zkey_data: &[u8],
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>), ZKeyReadError> {
if zkey_data.is_empty() {
return Err(Report::msg("No proving key found!"));
return Err(ZKeyReadError::EmptyBytes);
}
let proving_key_and_matrices = match () {
#[cfg(feature = "arkzkey")]
() => read_arkzkey_from_bytes_uncompressed(zkey_data)?,
#[cfg(not(feature = "arkzkey"))]
() => {
let mut reader = Cursor::new(zkey_data);
read_zkey(&mut reader)?
}
};
let proving_key_and_matrices = read_arkzkey_from_bytes_uncompressed(zkey_data)?;
Ok(proving_key_and_matrices)
}
// Loads the proving key
#[cfg(not(target_arch = "wasm32"))]
pub fn zkey_from_folder() -> &'static (ProvingKey<Curve>, ConstraintMatrices<Fr>) {
&ZKEY
&ARKZKEY
}
// Loads the verification key from a bytes vector
pub fn vk_from_raw(zkey_data: &[u8]) -> Result<VerifyingKey<Curve>> {
if !zkey_data.is_empty() {
let (proving_key, _matrices) = zkey_from_raw(zkey_data)?;
return Ok(proving_key.vk);
}
Err(Report::msg("No proving/verification key found!"))
}
// Checks verification key to be correct with respect to proving key
pub fn check_vk_from_zkey(verifying_key: VerifyingKey<Curve>) -> Result<()> {
let (proving_key, _matrices) = zkey_from_folder();
if proving_key.vk == verifying_key {
Ok(())
} else {
Err(Report::msg("verifying_keys are not equal"))
}
}
pub fn calculate_rln_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
pub fn calculate_rln_witness<I: IntoIterator<Item = (String, Vec<FrOrSecret>)>>(
inputs: I,
graph_data: &[u8],
) -> Vec<Fr> {
calc_witness(inputs, graph_data)
}
#[cfg(not(target_arch = "wasm32"))]
pub fn graph_from_folder() -> &'static [u8] {
GRAPH_BYTES
}
@@ -113,11 +78,9 @@ pub fn graph_from_folder() -> &'static [u8] {
// without print and allow to choose between compressed and uncompressed arkzkey
////////////////////////////////////////////////////////
#[cfg(feature = "arkzkey")]
#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq)]
pub struct SerializableProvingKey(pub ProvingKey<Bn254>);
#[cfg(feature = "arkzkey")]
#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq)]
pub struct SerializableConstraintMatrices<F: Field> {
pub num_instance_variables: usize,
@@ -131,29 +94,25 @@ pub struct SerializableConstraintMatrices<F: Field> {
pub c: SerializableMatrix<F>,
}
#[cfg(feature = "arkzkey")]
#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq)]
pub struct SerializableMatrix<F: Field> {
pub data: Vec<Vec<(F, usize)>>,
}
#[cfg(feature = "arkzkey")]
pub fn read_arkzkey_from_bytes_uncompressed(
arkzkey_data: &[u8],
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>), ZKeyReadError> {
if arkzkey_data.is_empty() {
return Err(Report::msg("No proving key found!"));
return Err(ZKeyReadError::EmptyBytes);
}
let mut cursor = std::io::Cursor::new(arkzkey_data);
let serialized_proving_key =
SerializableProvingKey::deserialize_uncompressed_unchecked(&mut cursor)
.wrap_err("Failed to deserialize proving key")?;
SerializableProvingKey::deserialize_uncompressed_unchecked(&mut cursor)?;
let serialized_constraint_matrices =
SerializableConstraintMatrices::deserialize_uncompressed_unchecked(&mut cursor)
.wrap_err("Failed to deserialize constraint matrices")?;
SerializableConstraintMatrices::deserialize_uncompressed_unchecked(&mut cursor)?;
// Get on right form for API
let proving_key: ProvingKey<Bn254> = serialized_proving_key.0;

120
rln/src/circuit/qap.rs Normal file
View File

@@ -0,0 +1,120 @@
// This file is based on the code by arkworks. Its preimage can be found here:
// https://github.com/arkworks-rs/circom-compat/blob/3c95ed98e23a408b4d99a53e483a9bba39685a4e/src/circom/qap.rs
use ark_ff::PrimeField;
use ark_groth16::r1cs_to_qap::{evaluate_constraint, LibsnarkReduction, R1CSToQAP};
use ark_poly::EvaluationDomain;
use ark_relations::r1cs::{ConstraintMatrices, ConstraintSystemRef, SynthesisError};
use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
#[cfg(feature = "parallel")]
use rayon::iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator,
IntoParallelRefMutIterator, ParallelIterator,
};
/// Implements the witness map used by snarkjs. The arkworks witness map calculates the
/// coefficients of H through computing (AB-C)/Z in the evaluation domain and going back to the
/// coefficients domain. snarkjs instead precomputes the Lagrange form of the powers of tau bases
/// in a domain twice as large and the witness map is computed as the odd coefficients of (AB-C)
/// in that domain. This serves as HZ when computing the C proof element.
pub struct CircomReduction;
impl R1CSToQAP for CircomReduction {
#[allow(clippy::type_complexity)]
fn instance_map_with_evaluation<F: PrimeField, D: EvaluationDomain<F>>(
cs: ConstraintSystemRef<F>,
t: &F,
) -> Result<(Vec<F>, Vec<F>, Vec<F>, F, usize, usize), SynthesisError> {
LibsnarkReduction::instance_map_with_evaluation::<F, D>(cs, t)
}
fn witness_map_from_matrices<F: PrimeField, D: EvaluationDomain<F>>(
matrices: &ConstraintMatrices<F>,
num_inputs: usize,
num_constraints: usize,
full_assignment: &[F],
) -> Result<Vec<F>, SynthesisError> {
let zero = F::zero();
let domain =
D::new(num_constraints + num_inputs).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
let domain_size = domain.size();
let mut a = vec![zero; domain_size];
let mut b = vec![zero; domain_size];
#[allow(unexpected_cfgs)]
cfg_iter_mut!(a[..num_constraints])
.zip(cfg_iter_mut!(b[..num_constraints]))
.zip(cfg_iter!(&matrices.a))
.zip(cfg_iter!(&matrices.b))
.for_each(|(((a, b), at_i), bt_i)| {
*a = evaluate_constraint(at_i, full_assignment);
*b = evaluate_constraint(bt_i, full_assignment);
});
{
let start = num_constraints;
let end = start + num_inputs;
a[start..end].clone_from_slice(&full_assignment[..num_inputs]);
}
let mut c = vec![zero; domain_size];
#[allow(unexpected_cfgs)]
cfg_iter_mut!(c[..num_constraints])
.zip(&a)
.zip(&b)
.for_each(|((c_i, &a), &b)| {
*c_i = a * b;
});
domain.ifft_in_place(&mut a);
domain.ifft_in_place(&mut b);
let root_of_unity = {
let domain_size_double = 2 * domain_size;
let domain_double =
D::new(domain_size_double).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
domain_double.element(1)
};
D::distribute_powers_and_mul_by_const(&mut a, root_of_unity, F::one());
D::distribute_powers_and_mul_by_const(&mut b, root_of_unity, F::one());
domain.fft_in_place(&mut a);
domain.fft_in_place(&mut b);
let mut ab = domain.mul_polynomials_in_evaluation_domain(&a, &b);
drop(a);
drop(b);
domain.ifft_in_place(&mut c);
D::distribute_powers_and_mul_by_const(&mut c, root_of_unity, F::one());
domain.fft_in_place(&mut c);
#[allow(unexpected_cfgs)]
cfg_iter_mut!(ab)
.zip(c)
.for_each(|(ab_i, c_i)| *ab_i -= &c_i);
Ok(ab)
}
fn h_query_scalars<F: PrimeField, D: EvaluationDomain<F>>(
max_power: usize,
t: F,
_: F,
delta_inverse: F,
) -> Result<Vec<F>, SynthesisError> {
// the usual H query has domain-1 powers. Z has domain powers. So HZ has 2*domain-1 powers.
#[allow(unexpected_cfgs)]
let mut scalars = cfg_into_iter!(0..2 * max_power + 1)
.map(|i| delta_inverse * t.pow([i as u64]))
.collect::<Vec<_>>();
let domain_size = scalars.len();
let domain = D::new(domain_size).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
// generate the lagrange coefficients
domain.ifft_in_place(&mut scalars);
#[allow(unexpected_cfgs)]
Ok(cfg_into_iter!(scalars).skip(1).step_by(2).collect())
}
}

79
rln/src/error.rs Normal file
View File

@@ -0,0 +1,79 @@
use crate::circuit::error::ZKeyReadError;
use ark_bn254::Fr;
use ark_relations::r1cs::SynthesisError;
use ark_serialize::SerializationError;
use num_bigint::{BigInt, ParseBigIntError};
use std::array::TryFromSliceError;
use std::num::TryFromIntError;
use std::string::FromUtf8Error;
use thiserror::Error;
use utils::error::{FromConfigError, ZerokitMerkleTreeError};
#[derive(Debug, thiserror::Error)]
pub enum ConversionError {
#[error("Expected radix 10 or 16")]
WrongRadix,
#[error("{0}")]
ParseBigInt(#[from] ParseBigIntError),
#[error("{0}")]
ToUsize(#[from] TryFromIntError),
#[error("{0}")]
FromSlice(#[from] TryFromSliceError),
#[error("Input data too short: expected at least {expected} bytes, got {actual} bytes")]
InsufficientData { expected: usize, actual: usize },
}
#[derive(Error, Debug)]
pub enum ProofError {
#[error("{0}")]
ProtocolError(#[from] ProtocolError),
#[error("Error producing proof: {0}")]
SynthesisError(#[from] SynthesisError),
}
#[derive(Debug, thiserror::Error)]
pub enum ProtocolError {
#[error("{0}")]
Conversion(#[from] ConversionError),
#[error("Expected to read {0} bytes but read only {1} bytes")]
InvalidReadLen(usize, usize),
#[error("Cannot convert bigint {0:?} to biguint")]
BigUintConversion(BigInt),
#[error("{0}")]
JsonError(#[from] serde_json::Error),
#[error("Message id ({0}) is not within user_message_limit ({1})")]
InvalidMessageId(Fr, Fr),
}
#[derive(Debug, thiserror::Error)]
pub enum ComputeIdSecretError {
/// Usually it means that the same signal is used to recover the user secret hash
#[error("Cannot recover secret: division by zero")]
DivisionByZero,
}
#[derive(Debug, thiserror::Error)]
pub enum RLNError {
#[error("I/O error: {0}")]
IO(#[from] std::io::Error),
#[error("Utf8 error: {0}")]
Utf8(#[from] FromUtf8Error),
#[error("Serde json error: {0}")]
JSON(#[from] serde_json::Error),
#[error("Config error: {0}")]
Config(#[from] FromConfigError),
#[error("Serialization error: {0}")]
Serialization(#[from] SerializationError),
#[error("Merkle tree error: {0}")]
MerkleTree(#[from] ZerokitMerkleTreeError),
#[error("ZKey error: {0}")]
ZKey(#[from] ZKeyReadError),
#[error("Conversion error: {0}")]
Conversion(#[from] ConversionError),
#[error("Protocol error: {0}")]
Protocol(#[from] ProtocolError),
#[error("Proof error: {0}")]
Proof(#[from] ProofError),
#[error("Unable to extract secret")]
RecoverSecret(#[from] ComputeIdSecretError),
}

View File

@@ -2,7 +2,12 @@
use std::slice;
use crate::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
use crate::public::{
extended_key_gen as public_extended_key_gen, hash as public_hash, key_gen as public_key_gen,
poseidon_hash as public_poseidon_hash,
seeded_extended_key_gen as public_seeded_extended_key_gen,
seeded_key_gen as public_seeded_key_gen, RLN,
};
// Macro to call methods with arbitrary amount of arguments,
// First argument to the macro is context,
@@ -80,23 +85,48 @@ macro_rules! call_with_output_arg {
// Second argument is the output buffer argument
// The remaining arguments are all other inputs to the method
macro_rules! no_ctx_call_with_output_arg {
($method:ident, $output_arg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
match $method($($arg.process()),*, &mut output_data) {
Ok(()) => {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
}
Err(err) => {
std::mem::forget(output_data);
eprintln!("execution error: {err}");
false
}
($method:ident, $output_arg:expr, $input_arg:expr, $endianness_arg:expr) => {{
let mut output_data: Vec<u8> = Vec::new();
match $method(
$input_arg.process(),
&mut output_data,
$endianness_arg.process(),
) {
Ok(()) => {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
}
Err(err) => {
std::mem::forget(output_data);
eprintln!("execution error: {err}");
false
}
}
}
}};
}
// Macro to call methods with arbitrary amount of arguments,
// which are not implemented in a ctx RLN object
// First argument is the method to call
// Second argument is the output buffer argument
// The remaining arguments are all other inputs to the method
macro_rules! no_ctx_call_with_output_arg_and_endianness {
($method:ident, $output_arg:expr, $endianness_arg:expr) => {{
let mut output_data: Vec<u8> = Vec::new();
match $method(&mut output_data, $endianness_arg.process()) {
Ok(()) => {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
}
Err(err) => {
std::mem::forget(output_data);
eprintln!("execution error: {err}");
false
}
}
}};
}
// Macro to call methods with arbitrary amount of arguments,
@@ -158,6 +188,13 @@ impl ProcessArg for *mut RLN {
}
}
impl ProcessArg for bool {
type ReturnType = bool;
fn process(self) -> Self::ReturnType {
self
}
}
///// Buffer struct is taken from
///// <https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs>
/////
@@ -195,8 +232,8 @@ impl<'a> From<&Buffer> for &'a [u8] {
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[cfg(not(feature = "stateless"))]
#[no_mangle]
pub extern "C" fn new(tree_height: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
match RLN::new(tree_height, input_buffer.process()) {
pub extern "C" fn new(tree_depth: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
match RLN::new(tree_depth, input_buffer.process()) {
Ok(rln) => {
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
true
@@ -228,14 +265,14 @@ pub extern "C" fn new(ctx: *mut *mut RLN) -> bool {
#[cfg(not(feature = "stateless"))]
#[no_mangle]
pub extern "C" fn new_with_params(
tree_height: usize,
tree_depth: usize,
zkey_buffer: *const Buffer,
graph_data: *const Buffer,
tree_config: *const Buffer,
ctx: *mut *mut RLN,
) -> bool {
match RLN::new_with_params(
tree_height,
tree_depth,
zkey_buffer.process().to_vec(),
graph_data.process().to_vec(),
tree_config.process(),
@@ -280,8 +317,8 @@ pub extern "C" fn new_with_params(
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
#[cfg(not(feature = "stateless"))]
pub extern "C" fn set_tree(ctx: *mut RLN, tree_height: usize) -> bool {
call!(ctx, set_tree, tree_height)
pub extern "C" fn set_tree(ctx: *mut RLN, tree_depth: usize) -> bool {
call!(ctx, set_tree, tree_depth)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -460,38 +497,6 @@ pub extern "C" fn verify_with_roots(
////////////////////////////////////////////////////////
// Utils
////////////////////////////////////////////////////////
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn key_gen(ctx: *const RLN, output_buffer: *mut Buffer) -> bool {
call_with_output_arg!(ctx, key_gen, output_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_key_gen(
ctx: *const RLN,
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
) -> bool {
call_with_output_arg!(ctx, seeded_key_gen, output_buffer, input_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn extended_key_gen(ctx: *const RLN, output_buffer: *mut Buffer) -> bool {
call_with_output_arg!(ctx, extended_key_gen, output_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_extended_key_gen(
ctx: *const RLN,
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
) -> bool {
call_with_output_arg!(ctx, seeded_extended_key_gen, output_buffer, input_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn recover_id_secret(
@@ -534,14 +539,77 @@ pub extern "C" fn flush(ctx: *mut RLN) -> bool {
call!(ctx, flush)
}
////////////////////////////////////////////////////////
// Utils APIs
////////////////////////////////////////////////////////
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer)
pub extern "C" fn hash(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer, is_little_endian)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn poseidon_hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
no_ctx_call_with_output_arg!(public_poseidon_hash, output_buffer, input_buffer)
pub extern "C" fn poseidon_hash(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(
public_poseidon_hash,
output_buffer,
input_buffer,
is_little_endian
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn key_gen(output_buffer: *mut Buffer, is_little_endian: bool) -> bool {
no_ctx_call_with_output_arg_and_endianness!(public_key_gen, output_buffer, is_little_endian)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_key_gen(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(
public_seeded_key_gen,
output_buffer,
input_buffer,
is_little_endian
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn extended_key_gen(output_buffer: *mut Buffer, is_little_endian: bool) -> bool {
no_ctx_call_with_output_arg_and_endianness!(
public_extended_key_gen,
output_buffer,
is_little_endian
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_extended_key_gen(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(
public_seeded_extended_key_gen,
output_buffer,
input_buffer,
is_little_endian
)
}

View File

@@ -1,5 +1,8 @@
/// This crate instantiates the Poseidon hash algorithm.
use crate::{circuit::Fr, utils::bytes_le_to_fr};
use crate::{
circuit::Fr,
utils::{bytes_be_to_fr, bytes_le_to_fr},
};
use once_cell::sync::Lazy;
use tiny_keccak::{Hasher, Keccak};
use utils::poseidon::Poseidon;
@@ -23,7 +26,7 @@ static POSEIDON: Lazy<Poseidon<Fr>> = Lazy::new(|| Poseidon::<Fr>::from(&ROUND_P
pub fn poseidon_hash(input: &[Fr]) -> Fr {
POSEIDON
.hash(input.to_vec())
.hash(input)
.expect("hash with fixed input size can't fail")
}
@@ -45,7 +48,7 @@ impl utils::merkle_tree::Hasher for PoseidonHash {
}
/// Hashes arbitrary signal to the underlying prime field.
pub fn hash_to_field(signal: &[u8]) -> Fr {
pub fn hash_to_field_le(signal: &[u8]) -> Fr {
// We hash the input signal using Keccak256
let mut hash = [0; 32];
let mut hasher = Keccak::v256();
@@ -56,3 +59,19 @@ pub fn hash_to_field(signal: &[u8]) -> Fr {
let (el, _) = bytes_le_to_fr(hash.as_ref());
el
}
/// Hashes arbitrary signal to the underlying prime field.
pub fn hash_to_field_be(signal: &[u8]) -> Fr {
// We hash the input signal using Keccak256
let mut hash = [0; 32];
let mut hasher = Keccak::v256();
hasher.update(signal);
hasher.finalize(&mut hash);
// Reverse the bytes to get big endian representation
hash.reverse();
// We export the hash as a field element
let (el, _) = bytes_be_to_fr(hash.as_ref());
el
}

View File

@@ -1,10 +1,11 @@
#![allow(dead_code)]
pub mod circuit;
pub mod error;
#[cfg(not(target_arch = "wasm32"))]
pub mod ffi;
pub mod hashers;
pub mod iden3calc;
#[cfg(feature = "pmtree-ft")]
pub mod pm_tree_adapter;
#[cfg(not(feature = "stateless"))]
pub mod poseidon_tree;
pub mod protocol;
pub mod public;
@@ -12,4 +13,23 @@ pub mod public;
pub mod public_api_tests;
pub mod utils;
pub mod ffi;
// Ensure that only one Merkle tree feature is enabled at a time
#[cfg(any(
all(feature = "fullmerkletree", feature = "optimalmerkletree"),
all(feature = "fullmerkletree", feature = "pmtree-ft"),
all(feature = "optimalmerkletree", feature = "pmtree-ft"),
))]
compile_error!(
"Only one of `fullmerkletree`, `optimalmerkletree`, or `pmtree-ft` can be enabled at a time."
);
// Ensure that the `stateless` feature is not enabled with any Merkle tree features
#[cfg(all(
feature = "stateless",
any(
feature = "fullmerkletree",
feature = "optimalmerkletree",
feature = "pmtree-ft"
)
))]
compile_error!("Cannot enable any Merkle tree features with stateless");

View File

@@ -1,17 +1,16 @@
use serde_json::Value;
use std::fmt::Debug;
use std::path::PathBuf;
use std::str::FromStr;
use color_eyre::{Report, Result};
use serde_json::Value;
use utils::pmtree::tree::Key;
use utils::pmtree::{Database, Hasher};
use utils::*;
use tempfile::Builder;
use crate::circuit::Fr;
use crate::hashers::{poseidon_hash, PoseidonHash};
use crate::utils::{bytes_le_to_fr, fr_to_bytes_le};
use utils::error::{FromConfigError, ZerokitMerkleTreeError};
use utils::pmtree::tree::Key;
use utils::pmtree::{Database, Hasher, PmtreeErrorKind};
use utils::{pmtree, Config, Mode, SledDB, ZerokitMerkleProof, ZerokitMerkleTree};
const METADATA_KEY: [u8; 8] = *b"metadata";
@@ -52,20 +51,104 @@ impl Hasher for PoseidonHash {
}
}
fn get_tmp_path() -> PathBuf {
std::env::temp_dir().join(format!("pmtree-{}", rand::random::<u64>()))
fn default_tmp_path() -> PathBuf {
Builder::new()
.prefix("pmtree-")
.tempfile()
.expect("Failed to create temp file")
.into_temp_path()
.to_path_buf()
}
fn get_tmp() -> bool {
true
const DEFAULT_TEMPORARY: bool = true;
const DEFAULT_CACHE_CAPACITY: u64 = 1073741824; // 1 Gigabyte
const DEFAULT_FLUSH_EVERY_MS: u64 = 500; // 500 Milliseconds
const DEFAULT_MODE: Mode = Mode::HighThroughput;
const DEFAULT_USE_COMPRESSION: bool = false;
pub struct PmtreeConfigBuilder {
path: Option<PathBuf>,
temporary: bool,
cache_capacity: u64,
flush_every_ms: u64,
mode: Mode,
use_compression: bool,
}
impl PmtreeConfigBuilder {
fn new() -> Self {
PmtreeConfigBuilder {
path: None,
temporary: DEFAULT_TEMPORARY,
cache_capacity: DEFAULT_CACHE_CAPACITY,
flush_every_ms: DEFAULT_FLUSH_EVERY_MS,
mode: DEFAULT_MODE,
use_compression: DEFAULT_USE_COMPRESSION,
}
}
pub fn path<P: Into<PathBuf>>(mut self, path: P) -> Self {
self.path = Some(path.into());
self
}
pub fn temporary(mut self, temporary: bool) -> Self {
self.temporary = temporary;
self
}
pub fn cache_capacity(mut self, capacity: u64) -> Self {
self.cache_capacity = capacity;
self
}
pub fn flush_every_ms(mut self, ms: u64) -> Self {
self.flush_every_ms = ms;
self
}
pub fn mode(mut self, mode: Mode) -> Self {
self.mode = mode;
self
}
pub fn use_compression(mut self, compression: bool) -> Self {
self.use_compression = compression;
self
}
pub fn build(self) -> Result<PmtreeConfig, FromConfigError> {
let path = match (self.temporary, self.path) {
(true, None) => default_tmp_path(),
(false, None) => return Err(FromConfigError::MissingPath),
(true, Some(path)) if path.exists() => return Err(FromConfigError::PathExists),
(_, Some(path)) => path,
};
let config = Config::new()
.temporary(self.temporary)
.path(path)
.cache_capacity(self.cache_capacity)
.flush_every_ms(Some(self.flush_every_ms))
.mode(self.mode)
.use_compression(self.use_compression);
Ok(PmtreeConfig(config))
}
}
pub struct PmtreeConfig(Config);
impl FromStr for PmtreeConfig {
type Err = Report;
impl PmtreeConfig {
pub fn builder() -> PmtreeConfigBuilder {
PmtreeConfigBuilder::new()
}
}
fn from_str(s: &str) -> Result<Self> {
impl FromStr for PmtreeConfig {
type Err = FromConfigError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let config: Value = serde_json::from_str(s)?;
let path = config["path"].as_str();
@@ -80,21 +163,16 @@ impl FromStr for PmtreeConfig {
};
let use_compression = config["use_compression"].as_bool();
if temporary.is_some()
&& path.is_some()
&& temporary.unwrap()
&& path.as_ref().unwrap().exists()
{
return Err(Report::msg(format!(
"Path {:?} already exists, cannot use temporary",
path.unwrap()
)));
if let (Some(true), Some(path)) = (temporary, path.as_ref()) {
if path.exists() {
return Err(FromConfigError::PathExists);
}
}
let config = Config::new()
.temporary(temporary.unwrap_or(get_tmp()))
.path(path.unwrap_or(get_tmp_path()))
.cache_capacity(cache_capacity.unwrap_or(1024 * 1024 * 1024))
.temporary(temporary.unwrap_or(DEFAULT_TEMPORARY))
.path(path.unwrap_or(default_tmp_path()))
.cache_capacity(cache_capacity.unwrap_or(DEFAULT_CACHE_CAPACITY))
.flush_every_ms(flush_every_ms)
.mode(mode)
.use_compression(use_compression.unwrap_or(false));
@@ -104,16 +182,9 @@ impl FromStr for PmtreeConfig {
impl Default for PmtreeConfig {
fn default() -> Self {
let tmp_path = get_tmp_path();
PmtreeConfig(
Config::new()
.temporary(true)
.path(tmp_path)
.cache_capacity(150_000)
.mode(Mode::HighThroughput)
.use_compression(false)
.flush_every_ms(Some(12_000)),
)
Self::builder()
.build()
.expect("Default configuration should never fail")
}
}
impl Debug for PmtreeConfig {
@@ -133,12 +204,16 @@ impl ZerokitMerkleTree for PmTree {
type Hasher = PoseidonHash;
type Config = PmtreeConfig;
fn default(depth: usize) -> Result<Self> {
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
let default_config = PmtreeConfig::default();
PmTree::new(depth, Self::Hasher::default_leaf(), default_config)
}
fn new(depth: usize, _default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self> {
fn new(
depth: usize,
_default_leaf: FrOf<Self::Hasher>,
config: Self::Config,
) -> Result<Self, ZerokitMerkleTreeError> {
let tree_loaded = pmtree::MerkleTree::load(config.clone().0);
let tree = match tree_loaded {
Ok(tree) => tree,
@@ -168,14 +243,12 @@ impl ZerokitMerkleTree for PmTree {
self.tree.root()
}
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
Ok(self.tree.root())
}
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()> {
self.tree
.set(index, leaf)
.map_err(|e| Report::msg(e.to_string()))?;
fn set(
&mut self,
index: usize,
leaf: FrOf<Self::Hasher>,
) -> Result<(), ZerokitMerkleTreeError> {
self.tree.set(index, leaf)?;
self.cached_leaves_indices[index] = 1;
Ok(())
}
@@ -184,27 +257,31 @@ impl ZerokitMerkleTree for PmTree {
&mut self,
start: usize,
values: I,
) -> Result<()> {
) -> Result<(), ZerokitMerkleTreeError> {
let v = values.into_iter().collect::<Vec<_>>();
self.tree
.set_range(start, v.clone().into_iter())
.map_err(|e| Report::msg(e.to_string()))?;
self.tree.set_range(start, v.clone().into_iter())?;
for i in start..v.len() {
self.cached_leaves_indices[i] = 1
}
Ok(())
}
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>> {
self.tree.get(index).map_err(|e| Report::msg(e.to_string()))
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
self.tree
.get(index)
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
}
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>> {
fn get_subtree_root(
&self,
n: usize,
index: usize,
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
if n > self.depth() {
return Err(Report::msg("level exceeds depth size"));
return Err(ZerokitMerkleTreeError::InvalidLevel);
}
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
if n == 0 {
Ok(self.root())
@@ -235,55 +312,71 @@ impl ZerokitMerkleTree for PmTree {
start: usize,
leaves: I,
indices: J,
) -> Result<()> {
) -> Result<(), ZerokitMerkleTreeError> {
let leaves = leaves.into_iter().collect::<Vec<_>>();
let mut indices = indices.into_iter().collect::<Vec<_>>();
indices.sort();
match (leaves.len(), indices.len()) {
(0, 0) => Err(Report::msg("no leaves or indices to be removed")),
(0, 0) => Err(ZerokitMerkleTreeError::InvalidLeaf),
(1, 0) => self.set(start, leaves[0]),
(0, 1) => self.delete(indices[0]),
(_, 0) => self.set_range(start, leaves),
(0, _) => self.remove_indices(&indices),
(_, _) => self.remove_indices_and_set_leaves(start, leaves, &indices),
(_, 0) => self.set_range(start, leaves.into_iter()),
(0, _) => self
.remove_indices(&indices)
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind),
(_, _) => self
.remove_indices_and_set_leaves(start, leaves, &indices)
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind),
}
}
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
self.tree
.update_next(leaf)
.map_err(|e| Report::msg(e.to_string()))
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
}
fn delete(&mut self, index: usize) -> Result<()> {
/// Delete a leaf in the merkle tree given its index
///
/// Deleting a leaf is done by resetting it to its default value. Note that the next_index field
/// will not be changed (== previously used index cannot be reused - this to avoid replay
/// attacks or unexpected and very hard to tackle issues)
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
self.tree
.delete(index)
.map_err(|e| Report::msg(e.to_string()))?;
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)?;
self.cached_leaves_indices[index] = 0;
Ok(())
}
fn proof(&self, index: usize) -> Result<Self::Proof> {
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError> {
let proof = self.tree.proof(index)?;
Ok(PmTreeProof { proof })
}
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool> {
fn verify(
&self,
leaf: &FrOf<Self::Hasher>,
witness: &Self::Proof,
) -> Result<bool, ZerokitMerkleTreeError> {
if self.tree.verify(leaf, &witness.proof) {
Ok(true)
} else {
Err(Report::msg("verify failed"))
Err(ZerokitMerkleTreeError::InvalidWitness)
}
}
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
self.tree.db.put(METADATA_KEY, metadata.to_vec())?;
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
self.tree
.db
.put(METADATA_KEY, metadata.to_vec())
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)?;
self.metadata = metadata.to_vec();
Ok(())
}
fn metadata(&self) -> Result<Vec<u8>> {
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
if !self.metadata.is_empty() {
return Ok(self.metadata.clone());
}
@@ -297,8 +390,11 @@ impl ZerokitMerkleTree for PmTree {
Ok(data.unwrap())
}
fn close_db_connection(&mut self) -> Result<()> {
self.tree.db.close().map_err(|e| Report::msg(e.to_string()))
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
self.tree
.db
.close()
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
}
}
@@ -306,15 +402,13 @@ type PmTreeHasher = <PmTree as ZerokitMerkleTree>::Hasher;
type FrOfPmTreeHasher = FrOf<PmTreeHasher>;
impl PmTree {
fn remove_indices(&mut self, indices: &[usize]) -> Result<()> {
fn remove_indices(&mut self, indices: &[usize]) -> Result<(), PmtreeErrorKind> {
let start = indices[0];
let end = indices.last().unwrap() + 1;
let new_leaves = (start..end).map(|_| PmTreeHasher::default_leaf());
self.tree
.set_range(start, new_leaves)
.map_err(|e| Report::msg(e.to_string()))?;
self.tree.set_range(start, new_leaves)?;
for i in start..end {
self.cached_leaves_indices[i] = 0
@@ -327,7 +421,7 @@ impl PmTree {
start: usize,
leaves: Vec<FrOfPmTreeHasher>,
indices: &[usize],
) -> Result<()> {
) -> Result<(), PmtreeErrorKind> {
let min_index = *indices.first().unwrap();
let max_index = start + leaves.len();
@@ -344,9 +438,7 @@ impl PmTree {
set_values[start - min_index + i] = leaf;
}
self.tree
.set_range(start, set_values)
.map_err(|e| Report::msg(e.to_string()))?;
self.tree.set_range(start, set_values)?;
for i in indices {
self.cached_leaves_indices[*i] = 0;
@@ -382,3 +474,32 @@ impl ZerokitMerkleProof for PmTreeProof {
self.proof.compute_root_from(leaf)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pmtree_json_config() {
let json = r#"
{
"path": "pmtree-123456",
"temporary": false,
"cache_capacity": 1073741824,
"flush_every_ms": 500,
"mode": "HighThroughput",
"use_compression": false
}"#;
let _: PmtreeConfig = json.parse().expect("Failed to parse JSON config");
let _ = PmtreeConfig::builder()
.path(default_tmp_path())
.temporary(DEFAULT_TEMPORARY)
.cache_capacity(DEFAULT_CACHE_CAPACITY)
.mode(DEFAULT_MODE)
.use_compression(DEFAULT_USE_COMPRESSION)
.build()
.expect("Failed to build config");
}
}

View File

@@ -1,30 +1,32 @@
// This crate defines the RLN module default Merkle tree implementation and its Hasher
// Implementation inspired by https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/poseidon_tree.rs
// Implementation inspired by https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/poseidon_tree.rs (no differences)
#![cfg(not(feature = "stateless"))]
use cfg_if::cfg_if;
cfg_if! {
if #[cfg(feature = "pmtree-ft")] {
use crate::pm_tree_adapter::*;
} else {
use crate::hashers::{PoseidonHash};
use utils::merkle_tree::*;
}
}
// The zerokit RLN default Merkle tree implementation is the OptimalMerkleTree.
// To switch to FullMerkleTree implementation, it is enough to enable the fullmerkletree feature
// The zerokit RLN default Merkle tree implementation is the PMTree from the vacp2p_pmtree crate
// To switch to FullMerkleTree or OptimalMerkleTree, enable the corresponding feature in the Cargo.toml file
cfg_if! {
if #[cfg(feature = "fullmerkletree")] {
use utils::{FullMerkleTree, FullMerkleProof};
use crate::hashers::PoseidonHash;
pub type PoseidonTree = FullMerkleTree<PoseidonHash>;
pub type MerkleProof = FullMerkleProof<PoseidonHash>;
} else if #[cfg(feature = "pmtree-ft")] {
pub type PoseidonTree = PmTree;
pub type MerkleProof = PmTreeProof;
} else {
} else if #[cfg(feature = "optimalmerkletree")] {
use utils::{OptimalMerkleTree, OptimalMerkleProof};
use crate::hashers::PoseidonHash;
pub type PoseidonTree = OptimalMerkleTree<PoseidonHash>;
pub type MerkleProof = OptimalMerkleProof<PoseidonHash>;
} else if #[cfg(feature = "pmtree-ft")] {
use crate::pm_tree_adapter::{PmTree, PmTreeProof};
pub type PoseidonTree = PmTree;
pub type MerkleProof = PmTreeProof;
} else {
compile_error!("One of the features `fullmerkletree`, `optimalmerkletree`, or `pmtree-ft` must be enabled.");
}
}

View File

@@ -1,28 +1,35 @@
// This crate collects all the underlying primitives used to implement RLN
use ark_bn254::Fr;
use ark_circom::CircomReduction;
#[cfg(not(feature = "stateless"))]
use {
crate::error::ConversionError,
crate::poseidon_tree::PoseidonTree,
utils::{ZerokitMerkleProof, ZerokitMerkleTree},
};
use crate::circuit::{calculate_rln_witness, qap::CircomReduction, Curve};
use crate::error::{ComputeIdSecretError, ProofError, ProtocolError};
use crate::hashers::{hash_to_field_le, poseidon_hash};
use crate::public::RLN_IDENTIFIER;
use crate::utils::{
bytes_be_to_fr, bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, fr_byte_size,
fr_to_bytes_le, normalize_usize_le, to_bigint, vec_fr_to_bytes_le, vec_u8_to_bytes_le,
FrOrSecret, IdSecret,
};
use ark_bn254::{Fr, FrConfig};
use ark_ff::{AdditiveGroup, Fp, MontBackend};
use ark_groth16::{prepare_verifying_key, Groth16, Proof as ArkProof, ProvingKey, VerifyingKey};
use ark_relations::r1cs::{ConstraintMatrices, SynthesisError};
use ark_relations::r1cs::ConstraintMatrices;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::{rand::thread_rng, UniformRand};
use color_eyre::{Report, Result};
use num_bigint::BigInt;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha20Rng;
use serde::{Deserialize, Serialize};
#[cfg(test)]
use std::time::Instant;
use thiserror::Error;
use tiny_keccak::{Hasher as _, Keccak};
use crate::circuit::{calculate_rln_witness, Curve};
use crate::hashers::{hash_to_field, poseidon_hash};
use crate::poseidon_tree::*;
use crate::public::RLN_IDENTIFIER;
use crate::utils::*;
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
use zeroize::Zeroize;
///////////////////////////////////////////////////////
// RLN Witness data structure and utility functions
///////////////////////////////////////////////////////
@@ -30,7 +37,7 @@ use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct RLNWitnessInput {
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
identity_secret: Fr,
identity_secret: IdSecret,
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
user_message_limit: Fr,
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
@@ -65,14 +72,21 @@ pub fn deserialize_field_element(serialized: Vec<u8>) -> Fr {
element
}
pub fn deserialize_identity_pair(serialized: Vec<u8>) -> (Fr, Fr) {
pub fn deserialize_identity_pair_le(serialized: Vec<u8>) -> (Fr, Fr) {
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized);
let (id_commitment, _) = bytes_le_to_fr(&serialized[read..]);
(identity_secret_hash, id_commitment)
}
pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
pub fn deserialize_identity_pair_be(serialized: Vec<u8>) -> (Fr, Fr) {
let (identity_secret_hash, read) = bytes_be_to_fr(&serialized);
let (id_commitment, _) = bytes_be_to_fr(&serialized[read..]);
(identity_secret_hash, id_commitment)
}
pub fn deserialize_identity_tuple_le(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
let mut all_read = 0;
let (identity_trapdoor, read) = bytes_le_to_fr(&serialized[all_read..]);
@@ -94,13 +108,35 @@ pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
)
}
pub fn deserialize_identity_tuple_be(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
let mut all_read = 0;
let (identity_trapdoor, read) = bytes_be_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_nullifier, read) = bytes_be_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_secret_hash, read) = bytes_be_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_commitment, _) = bytes_be_to_fr(&serialized[all_read..]);
(
identity_trapdoor,
identity_nullifier,
identity_secret_hash,
identity_commitment,
)
}
/// Serializes witness
///
/// # Errors
///
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
/// input data is [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements[<32>] | identity_path_index<8> | x<32> | external_nullifier<32> ]
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
/// input data is [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements<32> | identity_path_index<8> | x<32> | external_nullifier<32> ]
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>, ProtocolError> {
// Check if message_id is within user_message_limit
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
@@ -112,11 +148,11 @@ pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
fr_byte_size() * (5 + rln_witness.path_elements.len())
+ rln_witness.identity_path_index.len(),
);
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.identity_secret));
serialized.extend_from_slice(&rln_witness.identity_secret.to_bytes_le());
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.user_message_limit));
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.message_id));
serialized.extend_from_slice(&vec_fr_to_bytes_le(&rln_witness.path_elements)?);
serialized.extend_from_slice(&vec_u8_to_bytes_le(&rln_witness.identity_path_index)?);
serialized.extend_from_slice(&vec_fr_to_bytes_le(&rln_witness.path_elements));
serialized.extend_from_slice(&vec_u8_to_bytes_le(&rln_witness.identity_path_index));
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.x));
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.external_nullifier));
@@ -128,10 +164,10 @@ pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
/// # Errors
///
/// Returns an error if `message_id` is not within `user_message_limit`.
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)> {
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize), ProtocolError> {
let mut all_read: usize = 0;
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
let (identity_secret, read) = IdSecret::from_bytes_le(&serialized[all_read..]);
all_read += read;
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
@@ -155,7 +191,7 @@ pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)
all_read += read;
if serialized.len() != all_read {
return Err(Report::msg("serialized length is not equal to all_read"));
return Err(ProtocolError::InvalidReadLen(serialized.len(), all_read));
}
Ok((
@@ -176,18 +212,22 @@ pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)
// https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L148
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
// return value is a rln witness populated according to this information
#[cfg(not(feature = "stateless"))]
pub fn proof_inputs_to_rln_witness(
tree: &mut PoseidonTree,
serialized: &[u8],
) -> Result<(RLNWitnessInput, usize)> {
) -> Result<(RLNWitnessInput, usize), ProtocolError> {
let mut all_read: usize = 0;
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
let (identity_secret, read) = IdSecret::from_bytes_le(&serialized[all_read..]);
all_read += read;
let id_index = usize::try_from(u64::from_le_bytes(
serialized[all_read..all_read + 8].try_into()?,
))?;
serialized[all_read..all_read + 8]
.try_into()
.map_err(ConversionError::FromSlice)?,
))
.map_err(ConversionError::ToUsize)?;
all_read += 8;
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
@@ -200,8 +240,11 @@ pub fn proof_inputs_to_rln_witness(
all_read += read;
let signal_len = usize::try_from(u64::from_le_bytes(
serialized[all_read..all_read + 8].try_into()?,
))?;
serialized[all_read..all_read + 8]
.try_into()
.map_err(ConversionError::FromSlice)?,
))
.map_err(ConversionError::ToUsize)?;
all_read += 8;
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
@@ -210,7 +253,7 @@ pub fn proof_inputs_to_rln_witness(
let path_elements = merkle_proof.get_path_elements();
let identity_path_index = merkle_proof.get_path_index();
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
Ok((
RLNWitnessInput {
@@ -232,18 +275,16 @@ pub fn proof_inputs_to_rln_witness(
///
/// Returns an error if `message_id` is not within `user_message_limit`.
pub fn rln_witness_from_values(
identity_secret: Fr,
merkle_proof: &MerkleProof,
identity_secret: IdSecret,
path_elements: Vec<Fp<MontBackend<FrConfig, 4>, 4>>,
identity_path_index: Vec<u8>,
x: Fr,
external_nullifier: Fr,
user_message_limit: Fr,
message_id: Fr,
) -> Result<RLNWitnessInput> {
) -> Result<RLNWitnessInput, ProtocolError> {
message_id_range_check(&message_id, &user_message_limit)?;
let path_elements = merkle_proof.get_path_elements();
let identity_path_index = merkle_proof.get_path_index();
Ok(RLNWitnessInput {
identity_secret,
path_elements,
@@ -255,19 +296,19 @@ pub fn rln_witness_from_values(
})
}
pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
pub fn random_rln_witness(tree_depth: usize) -> RLNWitnessInput {
let mut rng = thread_rng();
let identity_secret = hash_to_field(&rng.gen::<[u8; 32]>());
let x = hash_to_field(&rng.gen::<[u8; 32]>());
let epoch = hash_to_field(&rng.gen::<[u8; 32]>());
let rln_identifier = hash_to_field(RLN_IDENTIFIER); //hash_to_field(&rng.gen::<[u8; 32]>());
let identity_secret = IdSecret::rand(&mut rng);
let x = hash_to_field_le(&rng.gen::<[u8; 32]>());
let epoch = hash_to_field_le(&rng.gen::<[u8; 32]>());
let rln_identifier = hash_to_field_le(RLN_IDENTIFIER);
let mut path_elements: Vec<Fr> = Vec::new();
let mut identity_path_index: Vec<u8> = Vec::new();
for _ in 0..tree_height {
path_elements.push(hash_to_field(&rng.gen::<[u8; 32]>()));
for _ in 0..tree_depth {
path_elements.push(hash_to_field_le(&rng.gen::<[u8; 32]>()));
identity_path_index.push(rng.gen_range(0..2) as u8);
}
@@ -285,16 +326,24 @@ pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
}
}
pub fn proof_values_from_witness(rln_witness: &RLNWitnessInput) -> Result<RLNProofValues> {
pub fn proof_values_from_witness(
rln_witness: &RLNWitnessInput,
) -> Result<RLNProofValues, ProtocolError> {
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
// y share
let a_0 = rln_witness.identity_secret;
let a_1 = poseidon_hash(&[a_0, rln_witness.external_nullifier, rln_witness.message_id]);
let y = a_0 + rln_witness.x * a_1;
let a_0 = &rln_witness.identity_secret;
let mut to_hash = [
*(a_0.clone()),
rln_witness.external_nullifier,
rln_witness.message_id,
];
let a_1 = poseidon_hash(&to_hash);
let y = *(a_0.clone()) + rln_witness.x * a_1;
// Nullifier
let nullifier = poseidon_hash(&[a_1]);
to_hash[0].zeroize();
// Merkle tree root computations
let root = compute_tree_root(
@@ -362,7 +411,7 @@ pub fn deserialize_proof_values(serialized: &[u8]) -> (RLNProofValues, usize) {
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
pub fn prepare_prove_input(
identity_secret: Fr,
identity_secret: IdSecret,
id_index: usize,
user_message_limit: Fr,
message_id: Fr,
@@ -375,12 +424,12 @@ pub fn prepare_prove_input(
// - variable length signal data
let mut serialized = Vec::with_capacity(fr_byte_size() * 4 + 16 + signal.len()); // length of 4 fr elements + 16 bytes (id_index + len) + signal length
serialized.extend_from_slice(&fr_to_bytes_le(&identity_secret));
serialized.extend_from_slice(&normalize_usize(id_index));
serialized.extend_from_slice(&identity_secret.to_bytes_le());
serialized.extend_from_slice(&normalize_usize_le(id_index));
serialized.extend_from_slice(&fr_to_bytes_le(&user_message_limit));
serialized.extend_from_slice(&fr_to_bytes_le(&message_id));
serialized.extend_from_slice(&fr_to_bytes_le(&external_nullifier));
serialized.extend_from_slice(&normalize_usize(signal.len()));
serialized.extend_from_slice(&normalize_usize_le(signal.len()));
serialized.extend_from_slice(signal);
serialized
@@ -395,7 +444,7 @@ pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
let mut serialized = Vec::with_capacity(proof_data.len() + 8 + signal.len());
serialized.extend(proof_data);
serialized.extend_from_slice(&normalize_usize(signal.len()));
serialized.extend_from_slice(&normalize_usize_le(signal.len()));
serialized.extend_from_slice(signal);
serialized
@@ -406,12 +455,15 @@ pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
///////////////////////////////////////////////////////
pub fn compute_tree_root(
identity_secret: &Fr,
identity_secret: &IdSecret,
user_message_limit: &Fr,
path_elements: &[Fr],
identity_path_index: &[u8],
) -> Fr {
let id_commitment = poseidon_hash(&[*identity_secret]);
let mut to_hash = [*identity_secret.clone()];
let id_commitment = poseidon_hash(&to_hash);
to_hash[0].zeroize();
let mut root = poseidon_hash(&[id_commitment, *user_message_limit]);
for i in 0..identity_path_index.len() {
@@ -432,10 +484,12 @@ pub fn compute_tree_root(
// Generates a tuple (identity_secret_hash, id_commitment) where
// identity_secret_hash is random and id_commitment = PoseidonHash(identity_secret_hash)
// RNG is instantiated using thread_rng()
pub fn keygen() -> (Fr, Fr) {
pub fn keygen() -> (IdSecret, Fr) {
let mut rng = thread_rng();
let identity_secret_hash = Fr::rand(&mut rng);
let id_commitment = poseidon_hash(&[identity_secret_hash]);
let identity_secret_hash = IdSecret::rand(&mut rng);
let mut to_hash = [*identity_secret_hash.clone()];
let id_commitment = poseidon_hash(&to_hash);
to_hash[0].zeroize();
(identity_secret_hash, id_commitment)
}
@@ -503,7 +557,10 @@ pub fn extended_seeded_keygen(signal: &[u8]) -> (Fr, Fr, Fr, Fr) {
)
}
pub fn compute_id_secret(share1: (Fr, Fr), share2: (Fr, Fr)) -> Result<Fr, String> {
pub fn compute_id_secret(
share1: (Fr, Fr),
share2: (Fr, Fr),
) -> Result<IdSecret, ComputeIdSecretError> {
// Assuming a0 is the identity secret and a1 = poseidonHash([a0, external_nullifier]),
// a (x,y) share satisfies the following relation
// y = a_0 + x * a_1
@@ -513,30 +570,26 @@ pub fn compute_id_secret(share1: (Fr, Fr), share2: (Fr, Fr)) -> Result<Fr, Strin
// If the two input shares were computed for the same external_nullifier and identity secret, we can recover the latter
// y1 = a_0 + x1 * a_1
// y2 = a_0 + x2 * a_1
let a_1 = (y1 - y2) / (x1 - x2);
let a_0 = y1 - x1 * a_1;
// If shares come from the same polynomial, a0 is correctly recovered and a1 = poseidonHash([a0, external_nullifier])
Ok(a_0)
if (x1 - x2) != Fr::ZERO {
let a_1 = (y1 - y2) / (x1 - x2);
let mut a_0 = y1 - x1 * a_1;
// If shares come from the same polynomial, a0 is correctly recovered and a1 = poseidonHash([a0, external_nullifier])
let id_secret = IdSecret::from(&mut a_0);
Ok(id_secret)
} else {
Err(ComputeIdSecretError::DivisionByZero)
}
}
///////////////////////////////////////////////////////
// zkSNARK utility functions
///////////////////////////////////////////////////////
#[derive(Error, Debug)]
pub enum ProofError {
#[error("Error reading circuit key: {0}")]
CircuitKeyError(#[from] Report),
#[error("Error producing witness: {0}")]
WitnessError(Report),
#[error("Error producing proof: {0}")]
SynthesisError(#[from] SynthesisError),
}
fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
witness: Vec<BigInt>,
) -> Result<Vec<E::ScalarField>> {
) -> Result<Vec<E::ScalarField>, ProtocolError> {
use ark_ff::PrimeField;
let modulus = <E::ScalarField as PrimeField>::MODULUS;
@@ -549,9 +602,9 @@ fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
modulus.into()
- w.abs()
.to_biguint()
.ok_or(Report::msg("not a biguint value"))?
.ok_or(ProtocolError::BigUintConversion(w))?
} else {
w.to_biguint().ok_or(Report::msg("not a biguint value"))?
w.to_biguint().ok_or(ProtocolError::BigUintConversion(w))?
};
witness_vec.push(E::ScalarField::from(w))
}
@@ -567,8 +620,7 @@ pub fn generate_proof_with_witness(
#[cfg(test)]
let now = Instant::now();
let full_assignment =
calculate_witness_element::<Curve>(witness).map_err(ProofError::WitnessError)?;
let full_assignment = calculate_witness_element::<Curve>(witness)?;
#[cfg(test)]
println!("witness generation took: {:.2?}", now.elapsed());
@@ -605,7 +657,7 @@ pub fn generate_proof_with_witness(
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
pub fn inputs_for_witness_calculation(
rln_witness: &RLNWitnessInput,
) -> Result<[(&str, Vec<Fr>); 7]> {
) -> Result<[(&str, Vec<FrOrSecret>); 7], ProtocolError> {
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
let mut identity_path_index = Vec::with_capacity(rln_witness.identity_path_index.len());
@@ -615,13 +667,33 @@ pub fn inputs_for_witness_calculation(
.for_each(|v| identity_path_index.push(Fr::from(*v)));
Ok([
("identitySecret", vec![rln_witness.identity_secret]),
("userMessageLimit", vec![rln_witness.user_message_limit]),
("messageId", vec![rln_witness.message_id]),
("pathElements", rln_witness.path_elements.clone()),
("identityPathIndex", identity_path_index),
("x", vec![rln_witness.x]),
("externalNullifier", vec![rln_witness.external_nullifier]),
(
"identitySecret",
vec![rln_witness.identity_secret.clone().into()],
),
(
"userMessageLimit",
vec![rln_witness.user_message_limit.into()],
),
("messageId", vec![rln_witness.message_id.into()]),
(
"pathElements",
rln_witness
.path_elements
.iter()
.cloned()
.map(Into::into)
.collect(),
),
(
"identityPathIndex",
identity_path_index.into_iter().map(Into::into).collect(),
),
("x", vec![rln_witness.x.into()]),
(
"externalNullifier",
vec![rln_witness.external_nullifier.into()],
),
])
}
@@ -733,7 +805,9 @@ where
/// # Errors
///
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitnessInput> {
pub fn rln_witness_from_json(
input_json: serde_json::Value,
) -> Result<RLNWitnessInput, ProtocolError> {
let rln_witness: RLNWitnessInput = serde_json::from_value(input_json).unwrap();
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
@@ -745,7 +819,9 @@ pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitness
/// # Errors
///
/// Returns an error if `message_id` is not within `user_message_limit`.
pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
pub fn rln_witness_to_json(
rln_witness: &RLNWitnessInput,
) -> Result<serde_json::Value, ProtocolError> {
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
let rln_witness_json = serde_json::to_value(rln_witness)?;
@@ -758,13 +834,15 @@ pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::
/// # Errors
///
/// Returns an error if `message_id` is not within `user_message_limit`.
pub fn rln_witness_to_bigint_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
pub fn rln_witness_to_bigint_json(
rln_witness: &RLNWitnessInput,
) -> Result<serde_json::Value, ProtocolError> {
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
let mut path_elements = Vec::new();
for v in rln_witness.path_elements.iter() {
path_elements.push(to_bigint(v)?.to_str_radix(10));
path_elements.push(to_bigint(v).to_str_radix(10));
}
let mut identity_path_index = Vec::new();
@@ -774,22 +852,26 @@ pub fn rln_witness_to_bigint_json(rln_witness: &RLNWitnessInput) -> Result<serde
.for_each(|v| identity_path_index.push(BigInt::from(*v).to_str_radix(10)));
let inputs = serde_json::json!({
"identitySecret": to_bigint(&rln_witness.identity_secret)?.to_str_radix(10),
"userMessageLimit": to_bigint(&rln_witness.user_message_limit)?.to_str_radix(10),
"messageId": to_bigint(&rln_witness.message_id)?.to_str_radix(10),
"identitySecret": to_bigint(&rln_witness.identity_secret).to_str_radix(10),
"userMessageLimit": to_bigint(&rln_witness.user_message_limit).to_str_radix(10),
"messageId": to_bigint(&rln_witness.message_id).to_str_radix(10),
"pathElements": path_elements,
"identityPathIndex": identity_path_index,
"x": to_bigint(&rln_witness.x)?.to_str_radix(10),
"externalNullifier": to_bigint(&rln_witness.external_nullifier)?.to_str_radix(10),
"x": to_bigint(&rln_witness.x).to_str_radix(10),
"externalNullifier": to_bigint(&rln_witness.external_nullifier).to_str_radix(10),
});
Ok(inputs)
}
pub fn message_id_range_check(message_id: &Fr, user_message_limit: &Fr) -> Result<()> {
pub fn message_id_range_check(
message_id: &Fr,
user_message_limit: &Fr,
) -> Result<(), ProtocolError> {
if message_id > user_message_limit {
return Err(color_eyre::Report::msg(
"message_id is not within user_message_limit",
return Err(ProtocolError::InvalidMessageId(
*message_id,
*user_message_limit,
));
}
Ok(())

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,21 @@
use crate::circuit::TEST_TREE_HEIGHT;
use crate::protocol::*;
use crate::circuit::TEST_TREE_DEPTH;
use crate::protocol::{
proof_values_from_witness, random_rln_witness, serialize_proof_values, serialize_witness,
verify_proof, RLNProofValues,
};
use crate::public::RLN;
use crate::utils::*;
use crate::utils::str_to_fr;
use ark_groth16::Proof as ArkProof;
use ark_serialize::CanonicalDeserialize;
use serde_json::{json, Value};
use std::io::Cursor;
use std::str::FromStr;
use serde_json::{json, Value};
#[cfg(not(feature = "stateless"))]
use crate::utils::generate_input_buffer;
fn fq_from_str(s: &str) -> ark_bn254::Fq {
ark_bn254::Fq::from_str(&s).unwrap()
ark_bn254::Fq::from_str(s).unwrap()
}
fn g1_from_str(g1: &[String]) -> ark_bn254::G1Affine {
@@ -40,7 +45,7 @@ fn value_to_string_vec(value: &Value) -> Vec<String> {
value
.as_array()
.unwrap()
.into_iter()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
}
@@ -48,7 +53,7 @@ fn value_to_string_vec(value: &Value) -> Vec<String> {
#[test]
fn test_groth16_proof_hardcoded() {
#[cfg(not(feature = "stateless"))]
let rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
let rln = RLN::new(TEST_TREE_DEPTH, generate_input_buffer()).unwrap();
#[cfg(feature = "stateless")]
let rln = RLN::new().unwrap();
@@ -87,7 +92,7 @@ fn test_groth16_proof_hardcoded() {
.as_array()
.unwrap()
.iter()
.map(|item| value_to_string_vec(item))
.map(value_to_string_vec)
.collect::<Vec<Vec<String>>>(),
),
c: g1_from_str(&value_to_string_vec(&valid_snarkjs_proof["pi_c"])),
@@ -128,15 +133,15 @@ fn test_groth16_proof_hardcoded() {
#[test]
// This test is similar to the one in lib, but uses only public API
fn test_groth16_proof() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
#[cfg(not(feature = "stateless"))]
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
#[cfg(feature = "stateless")]
let mut rln = RLN::new().unwrap();
// Note: we only test Groth16 proof generation, so we ignore setting the tree in the RLN object
let rln_witness = random_rln_witness(tree_height);
let rln_witness = random_rln_witness(tree_depth);
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
// We compute a Groth16 proof
@@ -166,12 +171,14 @@ fn test_groth16_proof() {
#[cfg(not(feature = "stateless"))]
mod tree_test {
use crate::circuit::{Fr, TEST_TREE_HEIGHT};
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
use crate::circuit::{Fr, TEST_TREE_DEPTH};
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
use crate::pm_tree_adapter::PmtreeConfig;
use crate::protocol::*;
use crate::public::RLN;
use crate::public::{TreeConfigInput, RLN};
use crate::utils::*;
use ark_serialize::Read;
use serde_json::json;
use std::io::Cursor;
use utils::ZerokitMerkleTree;
@@ -181,7 +188,7 @@ mod tree_test {
#[test]
// We test merkle batch Merkle tree additions
fn test_merkle_operations() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -192,14 +199,14 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We first add leaves one by one specifying the index
for (i, leaf) in leaves.iter().enumerate() {
// We check if the number of leaves set is consistent
assert_eq!(rln.tree.leaves_set(), i);
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
let mut buffer = Cursor::new(fr_to_bytes_le(leaf));
rln.set_leaf(i, &mut buffer).unwrap();
}
@@ -209,11 +216,11 @@ mod tree_test {
let (root_single, _) = bytes_le_to_fr(&buffer.into_inner());
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
// We add leaves one by one using the internal index (new leaves goes in next available position)
for leaf in &leaves {
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
let mut buffer = Cursor::new(fr_to_bytes_le(leaf));
rln.set_next_leaf(&mut buffer).unwrap();
}
@@ -228,10 +235,10 @@ mod tree_test {
assert_eq!(root_single, root_next);
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -258,7 +265,7 @@ mod tree_test {
let (root_delete, _) = bytes_le_to_fr(&buffer.into_inner());
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
let mut buffer = Cursor::new(Vec::<u8>::new());
rln.get_root(&mut buffer).unwrap();
@@ -271,7 +278,7 @@ mod tree_test {
// We test leaf setting with a custom index, to enable batch updates to the root
// Uses `set_leaves_from` to set leaves in a batch, from index `start_index`
fn test_leaf_setting_with_index() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -286,10 +293,10 @@ mod tree_test {
let set_index = rng.gen_range(0..no_of_leaves) as usize;
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -300,14 +307,14 @@ mod tree_test {
rln.get_root(&mut buffer).unwrap();
let (root_batch_with_init, _) = bytes_le_to_fr(&buffer.into_inner());
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
// `init_tree_with_leaves` resets the tree to the depth it was initialized with, using `set_tree`
// We add leaves in a batch starting from index 0..set_index
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We add the remaining n leaves in a batch starting from index m
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]));
rln.set_leaves_from(set_index, &mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -321,11 +328,11 @@ mod tree_test {
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
// We add leaves one by one using the internal index (new leaves goes in next available position)
for leaf in &leaves {
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
let mut buffer = Cursor::new(fr_to_bytes_le(leaf));
rln.set_next_leaf(&mut buffer).unwrap();
}
@@ -345,7 +352,7 @@ mod tree_test {
#[test]
// Tests the atomic_operation fn, which set_leaves_from uses internally
fn test_atomic_operation() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -356,10 +363,10 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -377,8 +384,8 @@ mod tree_test {
let last_leaf_index = no_of_leaves - 1;
let indices = vec![last_leaf_index as u8];
let last_leaf = vec![*last_leaf];
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf).unwrap());
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf));
rln.atomic_operation(last_leaf_index, leaves_buffer, indices_buffer)
.unwrap();
@@ -394,7 +401,7 @@ mod tree_test {
#[test]
fn test_atomic_operation_zero_indexed() {
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -405,10 +412,10 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -422,8 +429,8 @@ mod tree_test {
let zero_index = 0;
let indices = vec![zero_index as u8];
let zero_leaf: Vec<Fr> = vec![];
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf));
rln.atomic_operation(0, leaves_buffer, indices_buffer)
.unwrap();
@@ -438,7 +445,7 @@ mod tree_test {
#[test]
fn test_atomic_operation_consistency() {
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -449,10 +456,10 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -466,8 +473,8 @@ mod tree_test {
let set_index = rng.gen_range(0..no_of_leaves) as usize;
let indices = vec![set_index as u8];
let zero_leaf: Vec<Fr> = vec![];
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf));
rln.atomic_operation(0, leaves_buffer, indices_buffer)
.unwrap();
@@ -486,11 +493,10 @@ mod tree_test {
assert_eq!(received_leaf, Fr::from(0));
}
#[allow(unused_must_use)]
#[test]
// This test checks if `set_leaves_from` throws an error when the index is out of bounds
fn test_set_leaves_bad_index() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -499,10 +505,10 @@ mod tree_test {
for _ in 0..no_of_leaves {
leaves.push(Fr::rand(&mut rng));
}
let bad_index = (1 << tree_height) - rng.gen_range(0..no_of_leaves) as usize;
let bad_index = (1 << tree_depth) - rng.gen_range(0..no_of_leaves) as usize;
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// Get root of empty tree
let mut buffer = Cursor::new(Vec::<u8>::new());
@@ -510,7 +516,9 @@ mod tree_test {
let (root_empty, _) = bytes_le_to_fr(&buffer.into_inner());
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
#[allow(unused_must_use)]
rln.set_leaves_from(bad_index, &mut buffer)
.expect_err("Should throw an error");
@@ -528,9 +536,9 @@ mod tree_test {
#[test]
fn test_get_leaf() {
// We generate a random tree
let tree_height = 10;
let tree_depth = 10;
let mut rng = thread_rng();
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We generate a random leaf
let leaf = Fr::rand(&mut rng);
@@ -553,9 +561,9 @@ mod tree_test {
#[test]
fn test_valid_metadata() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
let arbitrary_metadata: &[u8] = b"block_number:200000";
rln.set_metadata(arbitrary_metadata).unwrap();
@@ -569,9 +577,9 @@ mod tree_test {
#[test]
fn test_empty_metadata() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
let mut buffer = Cursor::new(Vec::<u8>::new());
rln.get_metadata(&mut buffer).unwrap();
@@ -582,7 +590,7 @@ mod tree_test {
#[test]
fn test_rln_proof() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -595,10 +603,10 @@ mod tree_test {
}
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// Generate identity pair
@@ -616,9 +624,9 @@ mod tree_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -656,7 +664,7 @@ mod tree_test {
#[test]
fn test_rln_with_witness() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -667,10 +675,10 @@ mod tree_test {
}
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// Generate identity pair
@@ -688,9 +696,9 @@ mod tree_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -739,7 +747,7 @@ mod tree_test {
#[test]
fn proof_verification_with_roots() {
// The first part is similar to test_rln_with_witness
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -750,10 +758,10 @@ mod tree_test {
}
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
rln.init_tree_with_leaves(&mut buffer).unwrap();
// Generate identity pair
@@ -771,9 +779,9 @@ mod tree_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -822,7 +830,7 @@ mod tree_test {
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
.unwrap();
assert_eq!(verified, false);
assert!(!verified);
// We get the root of the tree obtained adding one leaf per time
let mut buffer = Cursor::new(Vec::<u8>::new());
@@ -841,10 +849,10 @@ mod tree_test {
#[test]
fn test_recover_id_secret() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// Generate identity pair
let (identity_secret_hash, id_commitment) = keygen();
@@ -863,9 +871,9 @@ mod tree_test {
let signal2: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -876,7 +884,7 @@ mod tree_test {
// We prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
let prove_input1 = prepare_prove_input(
identity_secret_hash,
identity_secret_hash.clone(),
identity_index,
user_message_limit,
message_id,
@@ -885,7 +893,7 @@ mod tree_test {
);
let prove_input2 = prepare_prove_input(
identity_secret_hash,
identity_secret_hash.clone(),
identity_index,
user_message_limit,
message_id,
@@ -928,7 +936,7 @@ mod tree_test {
// We check if the recovered identity secret hash corresponds to the original one
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, *identity_secret_hash);
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
@@ -978,20 +986,59 @@ mod tree_test {
// ensure that the recovered secret does not match with either of the
// used secrets in proof generation
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
assert_ne!(
recovered_identity_secret_hash_new,
*identity_secret_hash_new
);
}
#[test]
fn test_tree_config_input_trait() {
let empty_json_input = generate_input_buffer();
let rln_with_empty_json_config = RLN::new(TEST_TREE_DEPTH, empty_json_input);
assert!(rln_with_empty_json_config.is_ok());
let json_config = json!({
"tree_config": {
"path": "pmtree-123456",
"temporary": false,
"cache_capacity": 1073741824,
"flush_every_ms": 500,
"mode": "HighThroughput",
"use_compression": false
}
});
let json_input = Cursor::new(json_config.to_string());
let rln_with_json_config = RLN::new(TEST_TREE_DEPTH, json_input.clone());
assert!(rln_with_json_config.is_ok());
let json_to_tree_config = json_input.into_tree_config();
assert!(json_to_tree_config.is_ok());
let rln_with_json_to_tree_config = RLN::new(TEST_TREE_DEPTH, json_to_tree_config.unwrap());
assert!(rln_with_json_to_tree_config.is_ok());
let default_pmtree_config = PmtreeConfig::default();
let rln_with_default_tree_config = RLN::new(TEST_TREE_DEPTH, default_pmtree_config);
assert!(rln_with_default_tree_config.is_ok());
let custom_pmtree_config = PmtreeConfig::builder()
.temporary(true)
.use_compression(false)
.build();
let rln_with_custom_tree_config = RLN::new(TEST_TREE_DEPTH, custom_pmtree_config.unwrap());
assert!(rln_with_custom_tree_config.is_ok());
}
}
#[cfg(feature = "stateless")]
mod stateless_test {
use crate::circuit::{Fr, TEST_TREE_HEIGHT};
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
use crate::poseidon_tree::PoseidonTree;
use crate::circuit::{Fr, TEST_TREE_DEPTH};
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash, PoseidonHash};
use crate::protocol::*;
use crate::public::RLN;
use crate::utils::*;
use std::io::Cursor;
use utils::ZerokitMerkleTree;
use utils::{OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
@@ -1004,10 +1051,10 @@ mod stateless_test {
let mut rln = RLN::new().unwrap();
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
.unwrap();
@@ -1025,20 +1072,21 @@ mod stateless_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
let rln_witness = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x,
external_nullifier,
user_message_limit,
@@ -1080,7 +1128,7 @@ mod stateless_test {
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
.unwrap();
assert_eq!(verified, false);
assert!(!verified);
// We get the root of the tree obtained adding one leaf per time
let root = tree.root();
@@ -1101,10 +1149,10 @@ mod stateless_test {
let mut rln = RLN::new().unwrap();
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
.unwrap();
@@ -1115,25 +1163,26 @@ mod stateless_test {
tree.update_next(rate_commitment).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We generate a random signal
let mut rng = thread_rng();
let signal1: [u8; 32] = rng.gen();
let x1 = hash_to_field(&signal1);
let x1 = hash_to_field_le(&signal1);
let signal2: [u8; 32] = rng.gen();
let x2 = hash_to_field(&signal2);
let x2 = hash_to_field_le(&signal2);
let identity_index = tree.leaves_set();
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
let rln_witness1 = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
identity_secret_hash.clone(),
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x1,
external_nullifier,
user_message_limit,
@@ -1142,8 +1191,9 @@ mod stateless_test {
.unwrap();
let rln_witness2 = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
identity_secret_hash.clone(),
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x2,
external_nullifier,
user_message_limit,
@@ -1182,7 +1232,7 @@ mod stateless_test {
// We check if the recovered identity secret hash corresponds to the original one
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, *identity_secret_hash);
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
@@ -1192,14 +1242,15 @@ mod stateless_test {
tree.update_next(rate_commitment_new).unwrap();
let signal3: [u8; 32] = rng.gen();
let x3 = hash_to_field(&signal3);
let x3 = hash_to_field_le(&signal3);
let identity_index_new = tree.leaves_set();
let merkle_proof_new = tree.proof(identity_index_new).expect("proof should exist");
let rln_witness3 = rln_witness_from_values(
identity_secret_hash_new,
&merkle_proof_new,
identity_secret_hash_new.clone(),
merkle_proof_new.get_path_elements(),
merkle_proof_new.get_path_index(),
x3,
external_nullifier,
user_message_limit,
@@ -1226,7 +1277,7 @@ mod stateless_test {
let serialized_identity_secret_hash = output_buffer.into_inner();
let (recovered_identity_secret_hash_new, _) =
bytes_le_to_fr(&serialized_identity_secret_hash);
IdSecret::from_bytes_le(&serialized_identity_secret_hash);
// ensure that the recovered secret does not match with either of the
// used secrets in proof generation

View File

@@ -1,29 +1,34 @@
// This crate provides cross-module useful utilities (mainly type conversions) not necessarily specific to RLN
use crate::circuit::Fr;
use crate::error::ConversionError;
use ark_ff::PrimeField;
use color_eyre::{Report, Result};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::UniformRand;
use num_bigint::{BigInt, BigUint};
use num_traits::Num;
use rand::Rng;
use ruint::aliases::U256;
use serde_json::json;
use std::io::Cursor;
use crate::circuit::Fr;
use std::ops::Deref;
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
#[inline(always)]
pub fn to_bigint(el: &Fr) -> Result<BigInt> {
Ok(BigUint::from(*el).into())
pub fn to_bigint(el: &Fr) -> BigInt {
BigUint::from(*el).into()
}
#[inline(always)]
pub fn fr_byte_size() -> usize {
pub const fn fr_byte_size() -> usize {
let mbs = <Fr as PrimeField>::MODULUS_BIT_SIZE;
((mbs + 64 - (mbs % 64)) / 8) as usize
}
#[inline(always)]
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr> {
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr, ConversionError> {
if !(radix == 10 || radix == 16) {
return Err(Report::msg("wrong radix"));
return Err(ConversionError::WrongRadix);
}
// We remove any quote present and we trim
@@ -48,6 +53,15 @@ pub fn bytes_le_to_fr(input: &[u8]) -> (Fr, usize) {
)
}
#[inline(always)]
pub fn bytes_be_to_fr(input: &[u8]) -> (Fr, usize) {
let el_size = fr_byte_size();
(
Fr::from(BigUint::from_bytes_be(&input[0..el_size])),
el_size,
)
}
#[inline(always)]
pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
let input_biguint: BigUint = (*input).into();
@@ -58,78 +72,205 @@ pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
}
#[inline(always)]
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Result<Vec<u8>> {
pub fn fr_to_bytes_be(input: &Fr) -> Vec<u8> {
let input_biguint: BigUint = (*input).into();
let mut res = input_biguint.to_bytes_be();
// For BE, insert 0 at the start of the Vec (see also fr_to_bytes_le comments)
let to_insert_count = fr_byte_size().saturating_sub(res.len());
if to_insert_count > 0 {
// Insert multi 0 at index 0
res.splice(0..0, std::iter::repeat_n(0, to_insert_count));
}
res
}
#[inline(always)]
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
// Calculate capacity for Vec:
// - 8 bytes for normalized vector length (usize)
// - each Fr element requires fr_byte_size() bytes (typically 32 bytes)
let mut bytes = Vec::with_capacity(8 + input.len() * fr_byte_size());
// We store the vector length
bytes.extend_from_slice(&normalize_usize(input.len()));
bytes.extend_from_slice(&normalize_usize_le(input.len()));
// We store each element
for el in input {
bytes.extend_from_slice(&fr_to_bytes_le(el));
}
Ok(bytes)
bytes
}
#[inline(always)]
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Result<Vec<u8>> {
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Vec<u8> {
// Calculate capacity for Vec:
// - 8 bytes for normalized vector length (usize)
// - each Fr element requires fr_byte_size() bytes (typically 32 bytes)
let mut bytes = Vec::with_capacity(8 + input.len() * fr_byte_size());
// We store the vector length
bytes.extend_from_slice(&normalize_usize_be(input.len()));
// We store each element
for el in input {
bytes.extend_from_slice(&fr_to_bytes_be(el));
}
bytes
}
#[inline(always)]
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
// Calculate capacity for Vec:
// - 8 bytes for normalized vector length (usize)
// - variable length input data
let mut bytes = Vec::with_capacity(8 + input.len());
// We store the vector length
bytes.extend_from_slice(&normalize_usize(input.len()));
bytes.extend_from_slice(&normalize_usize_le(input.len()));
// We store the input
bytes.extend_from_slice(input);
Ok(bytes)
bytes
}
#[inline(always)]
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
let mut read: usize = 0;
pub fn vec_u8_to_bytes_be(input: &[u8]) -> Vec<u8> {
// Calculate capacity for Vec:
// - 8 bytes for normalized vector length (usize)
// - variable length input data
let mut bytes = Vec::with_capacity(8 + input.len());
// We store the vector length
bytes.extend_from_slice(&normalize_usize_be(input.len()));
// We store the input
bytes.extend_from_slice(input);
bytes
}
#[inline(always)]
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
read += 8;
if input.len() < 8 + len {
return Err(ConversionError::InsufficientData {
expected: 8 + len,
actual: input.len(),
});
}
let res = input[8..8 + len].to_vec();
read += res.len();
Ok((res, read))
}
#[inline(always)]
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
pub fn bytes_be_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
let mut read: usize = 0;
let mut res: Vec<Fr> = Vec::new();
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
read += 8;
if input.len() < 8 + len {
return Err(ConversionError::InsufficientData {
expected: 8 + len,
actual: input.len(),
});
}
let res = input[8..8 + len].to_vec();
read += res.len();
Ok((res, read))
}
#[inline(always)]
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
read += 8;
let el_size = fr_byte_size();
if input.len() < 8 + len * el_size {
return Err(ConversionError::InsufficientData {
expected: 8 + len * el_size,
actual: input.len(),
});
}
let mut res: Vec<Fr> = Vec::with_capacity(len);
for i in 0..len {
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
res.push(curr_el);
read += el_size;
}
Ok((res, read))
}
#[inline(always)]
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>> {
pub fn bytes_be_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
read += 8;
let el_size = fr_byte_size();
if input.len() < 8 + len * el_size {
return Err(ConversionError::InsufficientData {
expected: 8 + len * el_size,
actual: input.len(),
});
}
let mut res: Vec<Fr> = Vec::with_capacity(len);
for i in 0..len {
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
res.push(curr_el);
read += el_size;
}
Ok((res, read))
}
#[inline(always)]
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let nof_elem = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
if nof_elem == 0 {
Ok(vec![])
} else {
if input.len() < 8 + nof_elem * 8 {
return Err(ConversionError::InsufficientData {
expected: 8 + nof_elem * 8,
actual: input.len(),
});
}
let elements: Vec<usize> = input[8..]
.chunks(8)
.take(nof_elem)
.map(|ch| usize::from_le_bytes(ch[0..8].try_into().unwrap()))
.collect();
Ok(elements)
@@ -137,11 +278,144 @@ pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>> {
}
#[inline(always)]
pub fn normalize_usize(input: usize) -> [u8; 8] {
input.to_le_bytes()
pub fn bytes_be_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let nof_elem = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
if nof_elem == 0 {
Ok(vec![])
} else {
if input.len() < 8 + nof_elem * 8 {
return Err(ConversionError::InsufficientData {
expected: 8 + nof_elem * 8,
actual: input.len(),
});
}
let elements: Vec<usize> = input[8..]
.chunks(8)
.take(nof_elem)
.map(|ch| usize::from_be_bytes(ch[0..8].try_into().unwrap()))
.collect();
Ok(elements)
}
}
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
/// On 32-bit systems, the result is zero-padded to 8 bytes.
/// On 64-bit systems, it directly represents the `usize` value.
#[inline(always)]
pub fn normalize_usize_le(input: usize) -> [u8; 8] {
let mut bytes = [0u8; 8];
let input_bytes = input.to_le_bytes();
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
bytes
}
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
/// On 32-bit systems, the result is zero-padded to 8 bytes.
/// On 64-bit systems, it directly represents the `usize` value.
#[inline(always)]
pub fn normalize_usize_be(input: usize) -> [u8; 8] {
let mut bytes = [0u8; 8];
let input_bytes = input.to_be_bytes();
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
bytes
}
#[inline(always)] // using for test
pub fn generate_input_buffer() -> Cursor<String> {
Cursor::new(json!({}).to_string())
}
#[derive(
Debug, Zeroize, ZeroizeOnDrop, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize,
)]
pub struct IdSecret(ark_bn254::Fr);
impl IdSecret {
pub fn rand<R: Rng + ?Sized>(rng: &mut R) -> Self {
let mut fr = Fr::rand(rng);
let res = Self::from(&mut fr);
// No need to zeroize fr (already zeroiz'ed in from implementation)
#[allow(clippy::let_and_return)]
res
}
pub fn from_bytes_le(input: &[u8]) -> (Self, usize) {
let el_size = fr_byte_size();
let b_uint = BigUint::from_bytes_le(&input[0..el_size]);
let mut fr = Fr::from(b_uint);
let res = IdSecret::from(&mut fr);
// Note: no zeroize on b_uint as it has been moved
(res, el_size)
}
pub(crate) fn to_bytes_le(&self) -> Zeroizing<Vec<u8>> {
let input_biguint: BigUint = self.0.into();
let mut res = input_biguint.to_bytes_le();
res.resize(fr_byte_size(), 0);
Zeroizing::new(res)
}
pub(crate) fn to_bytes_be(&self) -> Zeroizing<Vec<u8>> {
let input_biguint: BigUint = self.0.into();
let mut res = input_biguint.to_bytes_be();
let to_insert_count = fr_byte_size().saturating_sub(res.len());
if to_insert_count > 0 {
// Insert multi 0 at index 0
res.splice(0..0, std::iter::repeat_n(0, to_insert_count));
}
Zeroizing::new(res)
}
/// Warning: this can leak the secret value
/// Warning: Leaked value is of type 'U256' which implement Copy (every copy will not be zeroized)
pub(crate) fn to_u256(&self) -> U256 {
let mut big_int = self.0.into_bigint();
let res = U256::from_limbs(big_int.0);
big_int.zeroize();
res
}
}
impl From<&mut Fr> for IdSecret {
fn from(value: &mut Fr) -> Self {
let id_secret = Self(*value);
value.zeroize();
id_secret
}
}
impl Deref for IdSecret {
type Target = Fr;
/// Deref to &Fr
///
/// Warning: this can leak the secret value
/// Warning: Leaked value is of type 'Fr' which implement Copy (every copy will not be zeroized)
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(Debug, Zeroize, ZeroizeOnDrop)]
pub enum FrOrSecret {
IdSecret(IdSecret),
Fr(Fr),
}
impl From<Fr> for FrOrSecret {
fn from(value: Fr) -> Self {
FrOrSecret::Fr(value)
}
}
impl From<IdSecret> for FrOrSecret {
fn from(value: IdSecret) -> Self {
FrOrSecret::IdSecret(value)
}
}

View File

@@ -3,10 +3,10 @@
mod test {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::*;
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::protocol::*;
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::ffi::*;
use rln::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
use rln::protocol::{deserialize_identity_tuple_le, *};
use rln::public::RLN;
use rln::utils::*;
use serde_json::json;
@@ -14,6 +14,7 @@ mod test {
use std::io::Read;
use std::mem::MaybeUninit;
use std::time::{Duration, Instant};
use zeroize::Zeroize;
const NO_OF_LEAVES: usize = 256;
@@ -21,13 +22,13 @@ mod test {
let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit();
let input_config = json!({}).to_string();
let input_buffer = &Buffer::from(input_config.as_bytes());
let success = new(TEST_TREE_HEIGHT, input_buffer, rln_pointer.as_mut_ptr());
let success = new(TEST_TREE_DEPTH, input_buffer, rln_pointer.as_mut_ptr());
assert!(success, "RLN object creation failed");
unsafe { &mut *rln_pointer.assume_init() }
}
fn set_leaves_init(rln_pointer: &mut RLN, leaves: &[Fr]) {
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
let leaves_ser = vec_fr_to_bytes_le(leaves);
let input_buffer = &Buffer::from(leaves_ser.as_ref());
let success = init_tree_with_leaves(rln_pointer, input_buffer);
assert!(success, "init tree with leaves call failed");
@@ -49,14 +50,14 @@ mod test {
root
}
fn identity_pair_gen(rln_pointer: &mut RLN) -> (Fr, Fr) {
fn identity_pair_gen() -> (IdSecret, Fr) {
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
let success = key_gen(output_buffer.as_mut_ptr(), true);
assert!(success, "key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
let (identity_secret_hash, read) = IdSecret::from_bytes_le(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
(identity_secret_hash, id_commitment)
}
@@ -80,7 +81,7 @@ mod test {
// We first add leaves one by one specifying the index
for (i, leaf) in leaves.iter().enumerate() {
// We prepare the rate_commitment and we set the leaf at provided index
let leaf_ser = fr_to_bytes_le(&leaf);
let leaf_ser = fr_to_bytes_le(leaf);
let input_buffer = &Buffer::from(leaf_ser.as_ref());
let success = set_leaf(rln_pointer, i, input_buffer);
assert!(success, "set leaf call failed");
@@ -90,12 +91,12 @@ mod test {
let root_single = get_tree_root(rln_pointer);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We add leaves one by one using the internal index (new leaves goes in next available position)
for leaf in &leaves {
let leaf_ser = fr_to_bytes_le(&leaf);
let leaf_ser = fr_to_bytes_le(leaf);
let input_buffer = &Buffer::from(leaf_ser.as_ref());
let success = set_next_leaf(rln_pointer, input_buffer);
assert!(success, "set next leaf call failed");
@@ -108,7 +109,7 @@ mod test {
assert_eq!(root_single, root_next);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We add leaves in a batch into the tree
@@ -131,7 +132,7 @@ mod test {
let root_delete = get_tree_root(rln_pointer);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We get the root of the empty tree
@@ -156,6 +157,7 @@ mod test {
// random number between 0..no_of_leaves
let mut rng = thread_rng();
let set_index = rng.gen_range(0..NO_OF_LEAVES) as usize;
println!("set_index: {set_index}");
// We add leaves in a batch into the tree
set_leaves_init(rln_pointer, &leaves);
@@ -163,28 +165,31 @@ mod test {
// We get the root of the tree obtained adding leaves in batch
let root_batch_with_init = get_tree_root(rln_pointer);
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
// `init_tree_with_leaves` resets the tree to the depth it was initialized with, using `set_tree`
// We add leaves in a batch starting from index 0..set_index
set_leaves_init(rln_pointer, &leaves[0..set_index]);
// We add the remaining n leaves in a batch starting from index set_index
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]).unwrap();
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]);
let buffer = &Buffer::from(leaves_n.as_ref());
let success = set_leaves_from(rln_pointer, set_index, buffer);
assert!(success, "set leaves from call failed");
// We get the root of the tree obtained adding leaves in batch
let root_batch_with_custom_index = get_tree_root(rln_pointer);
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
assert_eq!(
root_batch_with_init, root_batch_with_custom_index,
"root batch !="
);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We add leaves one by one using the internal index (new leaves goes in next available position)
for leaf in &leaves {
let leaf_ser = fr_to_bytes_le(&leaf);
let leaf_ser = fr_to_bytes_le(leaf);
let input_buffer = &Buffer::from(leaf_ser.as_ref());
let success = set_next_leaf(rln_pointer, input_buffer);
assert!(success, "set next leaf call failed");
@@ -192,7 +197,10 @@ mod test {
// We get the root of the tree obtained adding leaves using the internal index
let root_single_additions = get_tree_root(rln_pointer);
assert_eq!(root_batch_with_init, root_single_additions);
assert_eq!(
root_batch_with_init, root_single_additions,
"root single additions !="
);
}
#[test]
@@ -213,17 +221,12 @@ mod test {
let last_leaf_index = NO_OF_LEAVES - 1;
let indices = vec![last_leaf_index as u8];
let last_leaf = vec![*last_leaf];
let indices = vec_u8_to_bytes_le(&indices).unwrap();
let indices = vec_u8_to_bytes_le(&indices);
let indices_buffer = &Buffer::from(indices.as_ref());
let leaves = vec_fr_to_bytes_le(&last_leaf).unwrap();
let leaves = vec_fr_to_bytes_le(&last_leaf);
let leaves_buffer = &Buffer::from(leaves.as_ref());
let success = atomic_operation(
rln_pointer,
last_leaf_index as usize,
leaves_buffer,
indices_buffer,
);
let success = atomic_operation(rln_pointer, last_leaf_index, leaves_buffer, indices_buffer);
assert!(success, "atomic operation call failed");
// We get the root of the tree obtained after a no-op
@@ -240,13 +243,13 @@ mod test {
let rln_pointer = create_rln_instance();
let mut rng = thread_rng();
let bad_index = (1 << TEST_TREE_HEIGHT) - rng.gen_range(0..NO_OF_LEAVES) as usize;
let bad_index = (1 << TEST_TREE_DEPTH) - rng.gen_range(0..NO_OF_LEAVES) as usize;
// Get root of empty tree
let root_empty = get_tree_root(rln_pointer);
// We add leaves in a batch into the tree
let leaves = vec_fr_to_bytes_le(&leaves).unwrap();
let leaves = vec_fr_to_bytes_le(&leaves);
let buffer = &Buffer::from(leaves.as_ref());
let success = set_leaves_from(rln_pointer, bad_index, buffer);
assert!(!success, "set leaves from call succeeded");
@@ -264,8 +267,11 @@ mod test {
let rln_pointer = create_rln_instance();
// generate identity
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
let id_commitment = utils_poseidon_hash(&[identity_secret_hash]);
let mut identity_secret_hash_ = hash_to_field_le(b"test-merkle-proof");
let identity_secret_hash = IdSecret::from(&mut identity_secret_hash_);
let mut to_hash = [*identity_secret_hash.clone()];
let id_commitment = utils_poseidon_hash(&to_hash);
to_hash[0].zeroize();
let user_message_limit = Fr::from(100);
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
@@ -298,7 +304,7 @@ mod test {
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (path_elements, read) = bytes_le_to_vec_fr(&result_data).unwrap();
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..].to_vec()).unwrap();
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..]).unwrap();
// We check correct computation of the path and indexes
let expected_path_elements: Vec<Fr> = [
@@ -358,7 +364,7 @@ mod test {
for _ in 0..sample_size {
// We generate random witness instances and relative proof values
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
let rln_witness = random_rln_witness(TEST_TREE_DEPTH);
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
// We prepare id_commitment and we set the leaf at provided index
@@ -386,7 +392,7 @@ mod test {
let success = verify(rln_pointer, input_buffer, proof_is_valid_ptr);
verify_time += now.elapsed().as_nanos();
assert!(success, "verify call failed");
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
}
println!(
@@ -408,12 +414,9 @@ mod test {
// We obtain the root from the RLN instance
let root_rln_folder = get_tree_root(rln_pointer);
#[cfg(feature = "arkzkey")]
let zkey_path = "./resources/tree_height_20/rln_final.arkzkey";
#[cfg(not(feature = "arkzkey"))]
let zkey_path = "./resources/tree_height_20/rln_final.zkey";
let mut zkey_file = File::open(&zkey_path).expect("no file found");
let metadata = std::fs::metadata(&zkey_path).expect("unable to read metadata");
let zkey_path = "./resources/tree_depth_10/rln_final.arkzkey";
let mut zkey_file = File::open(zkey_path).expect("no file found");
let metadata = std::fs::metadata(zkey_path).expect("unable to read metadata");
let mut zkey_buffer = vec![0; metadata.len() as usize];
zkey_file
.read_exact(&mut zkey_buffer)
@@ -421,9 +424,9 @@ mod test {
let zkey_data = &Buffer::from(&zkey_buffer[..]);
let graph_data = "./resources/tree_height_20/graph.bin";
let mut graph_file = File::open(&graph_data).expect("no file found");
let metadata = std::fs::metadata(&graph_data).expect("unable to read metadata");
let graph_data = "./resources/tree_depth_10/graph.bin";
let mut graph_file = File::open(graph_data).expect("no file found");
let metadata = std::fs::metadata(graph_data).expect("unable to read metadata");
let mut graph_buffer = vec![0; metadata.len() as usize];
graph_file
.read_exact(&mut graph_buffer)
@@ -436,7 +439,7 @@ mod test {
let tree_config = "".to_string();
let tree_config_buffer = &Buffer::from(tree_config.as_bytes());
let success = new_with_params(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
zkey_data,
graph_data,
tree_config_buffer,
@@ -469,7 +472,7 @@ mod test {
set_leaves_init(rln_pointer, &leaves);
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let identity_index: usize = NO_OF_LEAVES;
// We generate a random signal
@@ -477,9 +480,9 @@ mod test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -518,7 +521,7 @@ mod test {
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
let success = verify_rln_proof(rln_pointer, input_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
}
#[test]
@@ -536,7 +539,7 @@ mod test {
set_leaves_init(rln_pointer, &leaves);
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
let identity_index: usize = NO_OF_LEAVES;
@@ -545,9 +548,9 @@ mod test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -593,7 +596,7 @@ mod test {
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
// Proof should be valid
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
// We then try to verify against some random values not containing the correct one.
for _ in 0..5 {
@@ -607,7 +610,7 @@ mod test {
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
// Proof should be invalid.
assert_eq!(proof_is_valid, false);
assert!(!proof_is_valid);
// We finally include the correct root
// We get the root of the tree obtained adding one leaf per time
@@ -623,7 +626,7 @@ mod test {
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
// Proof should be valid.
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
}
#[test]
@@ -633,7 +636,7 @@ mod test {
let rln_pointer = create_rln_instance();
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let user_message_limit = Fr::from(100);
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
@@ -656,9 +659,9 @@ mod test {
let signal2: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -667,7 +670,7 @@ mod test {
// We prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
let prove_input1 = prepare_prove_input(
identity_secret_hash,
identity_secret_hash.clone(),
identity_index,
user_message_limit,
message_id,
@@ -676,7 +679,7 @@ mod test {
);
let prove_input2 = prepare_prove_input(
identity_secret_hash,
identity_secret_hash.clone(),
identity_index,
user_message_limit,
message_id,
@@ -711,12 +714,12 @@ mod test {
// We check if the recovered identity secret hash corresponds to the original one
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, *identity_secret_hash);
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
// We generate a new identity pair
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen();
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
// We set as leaf id_commitment, its index would be equal to 1 since at 0 there is id_commitment
@@ -765,146 +768,16 @@ mod test {
// ensure that the recovered secret does not match with either of the
// used secrets in proof generation
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_keygen_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity pair from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
assert_ne!(
recovered_identity_secret_hash_new,
*identity_secret_hash_new
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_extended_keygen_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity tuple from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success =
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple(result_data);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
);
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
);
assert_eq!(
identity_trapdoor,
expected_identity_trapdoor_seed_bytes.unwrap()
);
assert_eq!(
identity_nullifier,
expected_identity_nullifier_seed_bytes.unwrap()
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_hash_to_field_ffi() {
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
// We prepare id_commitment and we set the leaf at provided index
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
// We read the returned proof and we append proof values for verify
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field(&signal);
assert_eq!(hash1, hash2);
}
#[test]
// Test Poseidon hash FFI
fn test_poseidon_hash_ffi() {
// generate random number between 1..ROUND_PARAMS.len()
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
let input_buffer = &Buffer::from(inputs_ser.as_ref());
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (received_hash, _) = bytes_le_to_fr(&result_data);
assert_eq!(received_hash, expected_hash);
}
#[test]
fn test_get_leaf_ffi() {
// We create a RLN instance
let no_of_leaves = 1 << TEST_TREE_HEIGHT;
let no_of_leaves = 1 << TEST_TREE_DEPTH;
// We create a RLN instance
let rln_pointer = create_rln_instance();
@@ -913,13 +786,12 @@ mod test {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success =
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (_, _, _, id_commitment) = deserialize_identity_tuple(result_data);
let (_, _, _, id_commitment) = deserialize_identity_tuple_le(result_data);
// We insert the id_commitment into the tree at a random index
let mut rng = thread_rng();
@@ -983,15 +855,14 @@ mod stateless_test {
use rand::Rng;
use rln::circuit::*;
use rln::ffi::generate_rln_proof_with_witness;
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::poseidon_tree::PoseidonTree;
use rln::ffi::*;
use rln::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash, PoseidonHash};
use rln::protocol::*;
use rln::public::RLN;
use rln::utils::*;
use std::mem::MaybeUninit;
use std::time::{Duration, Instant};
use utils::ZerokitMerkleTree;
use utils::{OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
type ConfigOf<T> = <T as ZerokitMerkleTree>::Config;
@@ -1002,14 +873,14 @@ mod stateless_test {
unsafe { &mut *rln_pointer.assume_init() }
}
fn identity_pair_gen(rln_pointer: &mut RLN) -> (Fr, Fr) {
fn identity_pair_gen() -> (IdSecret, Fr) {
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
let success = key_gen(output_buffer.as_mut_ptr(), true);
assert!(success, "key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
let (identity_secret_hash, read) = IdSecret::from_bytes_le(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
(identity_secret_hash, id_commitment)
}
@@ -1026,43 +897,44 @@ mod stateless_test {
#[test]
fn test_recover_id_secret_stateless_ffi() {
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
.unwrap();
let rln_pointer = create_rln_instance();
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let user_message_limit = Fr::from(100);
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
tree.update_next(rate_commitment).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We generate two proofs using same epoch but different signals.
// We generate a random signal
let mut rng = thread_rng();
let signal1: [u8; 32] = rng.gen();
let x1 = hash_to_field(&signal1);
let x1 = hash_to_field_le(&signal1);
let signal2: [u8; 32] = rng.gen();
let x2 = hash_to_field(&signal2);
let x2 = hash_to_field_le(&signal2);
let identity_index = tree.leaves_set();
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
// We prepare input for generate_rln_proof API
let rln_witness1 = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
identity_secret_hash.clone(),
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x1,
external_nullifier,
user_message_limit,
@@ -1072,8 +944,9 @@ mod stateless_test {
let serialized1 = serialize_witness(&rln_witness1).unwrap();
let rln_witness2 = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
identity_secret_hash.clone(),
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x2,
external_nullifier,
user_message_limit,
@@ -1108,26 +981,28 @@ mod stateless_test {
assert!(!serialized_identity_secret_hash.is_empty());
// We check if the recovered identity secret hash corresponds to the original one
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
let (recovered_identity_secret_hash, _) =
IdSecret::from_bytes_le(&serialized_identity_secret_hash);
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
// We generate a new identity pair
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen();
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
tree.update_next(rate_commitment_new).unwrap();
// We generate a random signals
let signal3: [u8; 32] = rng.gen();
let x3 = hash_to_field(&signal3);
let x3 = hash_to_field_le(&signal3);
let identity_index_new = tree.leaves_set();
let merkle_proof_new = tree.proof(identity_index_new).expect("proof should exist");
let rln_witness3 = rln_witness_from_values(
identity_secret_hash_new,
&merkle_proof_new,
identity_secret_hash_new.clone(),
merkle_proof_new.get_path_elements(),
merkle_proof_new.get_path_index(),
x3,
external_nullifier,
user_message_limit,
@@ -1159,23 +1034,26 @@ mod stateless_test {
// ensure that the recovered secret does not match with either of the
// used secrets in proof generation
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
assert_ne!(
recovered_identity_secret_hash_new,
*identity_secret_hash_new
);
}
#[test]
fn test_verify_with_roots_stateless_ffi() {
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
.unwrap();
let rln_pointer = create_rln_instance();
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let identity_index = tree.leaves_set();
let user_message_limit = Fr::from(100);
@@ -1183,22 +1061,23 @@ mod stateless_test {
tree.update_next(rate_commitment).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We generate two proofs using same epoch but different signals.
// We generate a random signal
let mut rng = thread_rng();
let signal: [u8; 32] = rng.gen();
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
// We prepare input for generate_rln_proof API
let rln_witness = rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x,
external_nullifier,
user_message_limit,
@@ -1222,7 +1101,7 @@ mod stateless_test {
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
// Proof should be valid
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
// We serialize in the roots buffer some random values and we check that the proof is not verified since doesn't contain the correct root the proof refers to
for _ in 0..5 {
@@ -1236,7 +1115,7 @@ mod stateless_test {
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
// Proof should be invalid.
assert_eq!(proof_is_valid, false);
assert!(!proof_is_valid);
// We get the root of the tree obtained adding one leaf per time
let root = tree.root();
@@ -1251,7 +1130,7 @@ mod stateless_test {
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
assert!(success, "verify call failed");
// Proof should be valid.
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
}
#[test]
@@ -1268,7 +1147,7 @@ mod stateless_test {
for _ in 0..sample_size {
// We generate random witness instances and relative proof values
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
let rln_witness = random_rln_witness(TEST_TREE_DEPTH);
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
// We prepare id_commitment and we set the leaf at provided index
@@ -1296,7 +1175,7 @@ mod stateless_test {
let success = verify(rln_pointer, input_buffer, proof_is_valid_ptr);
verify_time += now.elapsed().as_nanos();
assert!(success, "verify call failed");
assert_eq!(proof_is_valid, true);
assert!(proof_is_valid);
}
println!(
@@ -1308,23 +1187,34 @@ mod stateless_test {
Duration::from_nanos((verify_time / sample_size).try_into().unwrap())
);
}
}
#[cfg(test)]
mod general_tests {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::*;
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{
hash_to_field_be, hash_to_field_le, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS,
};
use rln::protocol::*;
use rln::utils::*;
use std::mem::MaybeUninit;
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_keygen_stateless_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity pair from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = seeded_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
@@ -1343,23 +1233,47 @@ mod stateless_test {
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
fn test_seeded_keygen_big_endian_ffi() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_key_gen(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_be_to_fr(&result_data);
let (id_commitment, _) = bytes_be_to_fr(&result_data[read..]);
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_extended_keygen_stateless_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity tuple from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success =
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple(result_data);
deserialize_identity_tuple_le(result_data);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
@@ -1394,6 +1308,50 @@ mod stateless_test {
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
fn test_seeded_extended_keygen_big_endian_ffi() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple_be(result_data);
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
);
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
);
assert_eq!(
identity_trapdoor,
expected_identity_trapdoor_seed_bytes.unwrap()
);
assert_eq!(
identity_nullifier,
expected_identity_nullifier_seed_bytes.unwrap()
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_hash_to_field_stateless_ffi() {
@@ -1403,7 +1361,7 @@ mod stateless_test {
// We prepare id_commitment and we set the leaf at provided index
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
@@ -1411,7 +1369,25 @@ mod stateless_test {
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field(&signal);
let hash2 = hash_to_field_le(&signal);
assert_eq!(hash1, hash2);
}
#[test]
fn test_hash_to_field_big_endian_ffi() {
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
let (hash1, _) = bytes_be_to_fr(&serialized_hash);
let hash2 = hash_to_field_be(&signal);
assert_eq!(hash1, hash2);
}
@@ -1426,13 +1402,13 @@ mod stateless_test {
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
let inputs_ser = vec_fr_to_bytes_le(&inputs);
let input_buffer = &Buffer::from(inputs_ser.as_ref());
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
@@ -1441,4 +1417,28 @@ mod stateless_test {
assert_eq!(received_hash, expected_hash);
}
#[test]
fn test_poseidon_hash_big_endian_ffi() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_be(&inputs);
let input_buffer = &Buffer::from(inputs_ser.as_ref());
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (received_hash, _) = bytes_be_to_fr(&result_data);
assert_eq!(received_hash, expected_hash);
}
}

View File

@@ -1,28 +1,38 @@
////////////////////////////////////////////////////////////
/// Tests
// Tests
////////////////////////////////////////////////////////////
#![cfg(not(feature = "stateless"))]
#[cfg(test)]
mod test {
use rln::hashers::{poseidon_hash, PoseidonHash};
use rln::{circuit::*, poseidon_tree::PoseidonTree};
use rln::{
circuit::{Fr, TEST_TREE_DEPTH},
poseidon_tree::PoseidonTree,
};
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
#[test]
// The test is checked correctness for `FullMerkleTree` and `OptimalMerkleTree` with Poseidon hash
// The test checked correctness for `FullMerkleTree` and `OptimalMerkleTree` with Poseidon hash
fn test_zerokit_merkle_implementations() {
let sample_size = 100;
let leaves: Vec<Fr> = (0..sample_size).map(|s| Fr::from(s)).collect();
let leaves: Vec<Fr> = (0..sample_size).map(Fr::from).collect();
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
for i in 0..sample_size.try_into().unwrap() {
tree_full.set(i, leaves[i]).unwrap();
for (i, leave) in leaves
.into_iter()
.enumerate()
.take(sample_size.try_into().unwrap())
{
tree_full.set(i, leave).unwrap();
let proof = tree_full.proof(i).expect("index should be set");
assert_eq!(proof.leaf_index(), i);
tree_opt.set(i, leaves[i]).unwrap();
tree_opt.set(i, leave).unwrap();
assert_eq!(tree_opt.root(), tree_full.root());
let proof = tree_opt.proof(i).expect("index should be set");
assert_eq!(proof.leaf_index(), i);
}
@@ -37,11 +47,11 @@ mod test {
#[test]
fn test_subtree_root() {
const DEPTH: usize = 3;
const LEAVES_LEN: usize = 6;
const LEAVES_LEN: usize = 8;
let mut tree = PoseidonTree::default(DEPTH).unwrap();
let leaves: Vec<Fr> = (0..LEAVES_LEN).map(|s| Fr::from(s as i32)).collect();
let _ = tree.set_range(0, leaves);
let _ = tree.set_range(0, leaves.into_iter());
for i in 0..LEAVES_LEN {
// check leaves
@@ -78,7 +88,7 @@ mod test {
let leaves: Vec<Fr> = (0..nof_leaves).map(|s| Fr::from(s as i32)).collect();
// check set_range
let _ = tree.set_range(0, leaves.clone());
let _ = tree.set_range(0, leaves.clone().into_iter());
assert!(tree.get_empty_leaves_indices().is_empty());
let mut vec_idxs = Vec::new();
@@ -97,27 +107,29 @@ mod test {
// check remove_indices_and_set_leaves inside override_range function
assert!(tree.get_empty_leaves_indices().is_empty());
let leaves_2: Vec<Fr> = (0..2).map(|s| Fr::from(s as i32)).collect();
tree.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
let leaves_2: Vec<Fr> = (0..2).map(Fr::from).collect();
tree.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
// check remove_indices inside override_range function
tree.override_range(0, [], [0, 1]).unwrap();
tree.override_range(0, [].into_iter(), [0, 1].into_iter())
.unwrap();
assert_eq!(tree.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
// check set_range inside override_range function
tree.override_range(0, leaves_2.clone(), []).unwrap();
tree.override_range(0, leaves_2.clone().into_iter(), [].into_iter())
.unwrap();
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
let leaves_4: Vec<Fr> = (0..4).map(|s| Fr::from(s as i32)).collect();
let leaves_4: Vec<Fr> = (0..4).map(Fr::from).collect();
// check if the indexes for write and delete are the same
tree.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
tree.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
assert!(tree.get_empty_leaves_indices().is_empty());
// check if indexes for deletion are before indexes for overwriting
tree.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
tree.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
// The result will be like this, because in the set_range function in pmtree
// the next_index value is increased not by the number of elements to insert,
@@ -128,7 +140,7 @@ mod test {
);
// check if the indices for write and delete do not overlap completely
tree.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
tree.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.unwrap();
// The result will be like this, because in the set_range function in pmtree
// the next_index value is increased not by the number of elements to insert,

View File

@@ -1,11 +1,18 @@
#![cfg(not(feature = "stateless"))]
#[cfg(test)]
mod test {
use ark_ff::BigInt;
use rln::circuit::{graph_from_folder, zkey_from_folder};
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
use rln::hashers::{hash_to_field, poseidon_hash};
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::hashers::{hash_to_field_le, poseidon_hash};
use rln::poseidon_tree::PoseidonTree;
use rln::protocol::*;
use rln::protocol::{
deserialize_proof_values, deserialize_witness, generate_proof, keygen,
proof_values_from_witness, rln_witness_from_json, rln_witness_from_values,
rln_witness_to_json, seeded_keygen, serialize_proof_values, serialize_witness,
verify_proof, RLNWitnessInput,
};
use rln::utils::str_to_fr;
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
@@ -17,19 +24,19 @@ mod test {
let leaf_index = 3;
// generate identity
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
let identity_secret_hash = hash_to_field_le(b"test-merkle-proof");
let id_commitment = poseidon_hash(&[identity_secret_hash]);
let rate_commitment = poseidon_hash(&[id_commitment, 100.into()]);
// generate merkle tree
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
)
.unwrap();
tree.set(leaf_index, rate_commitment.into()).unwrap();
tree.set(leaf_index, rate_commitment).unwrap();
// We check correct computation of the root
let root = tree.root();
@@ -95,26 +102,27 @@ mod test {
//// generate merkle tree
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
)
.unwrap();
tree.set(leaf_index, rate_commitment.into()).unwrap();
tree.set(leaf_index, rate_commitment).unwrap();
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
let signal = b"hey hey";
let x = hash_to_field(signal);
let x = hash_to_field_le(signal);
// We set the remaining values to random ones
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
rln_witness_from_values(
identity_secret_hash,
&merkle_proof,
merkle_proof.get_path_elements(),
merkle_proof.get_path_index(),
x,
external_nullifier,
user_message_limit,
@@ -137,11 +145,11 @@ mod test {
assert_eq!(rln_witness_deser, rln_witness);
// Let's generate a zkSNARK proof
let proof = generate_proof(&proving_key, &rln_witness_deser, &graph_data).unwrap();
let proof = generate_proof(proving_key, &rln_witness_deser, graph_data).unwrap();
let proof_values = proof_values_from_witness(&rln_witness_deser).unwrap();
// Let's verify the proof
let verified = verify_proof(&verification_key, &proof, &proof_values);
let verified = verify_proof(verification_key, &proof, &proof_values);
assert!(verified.unwrap());
}
@@ -160,12 +168,12 @@ mod test {
let graph_data = graph_from_folder();
// Let's generate a zkSNARK proof
let proof = generate_proof(&proving_key, &rln_witness_deser, &graph_data).unwrap();
let proof = generate_proof(proving_key, &rln_witness_deser, graph_data).unwrap();
let proof_values = proof_values_from_witness(&rln_witness_deser).unwrap();
// Let's verify the proof
let success = verify_proof(&verification_key, &proof, &proof_values).unwrap();
let success = verify_proof(verification_key, &proof, &proof_values).unwrap();
assert!(success);
}

View File

@@ -3,16 +3,35 @@ mod test {
#[cfg(not(feature = "stateless"))]
use {
ark_ff::BigInt,
rln::{circuit::TEST_TREE_HEIGHT, protocol::compute_tree_root},
rln::{
circuit::TEST_TREE_DEPTH,
protocol::compute_tree_root,
public::RLN,
utils::{
bytes_le_to_vec_fr, bytes_le_to_vec_u8, bytes_le_to_vec_usize, fr_to_bytes_le,
generate_input_buffer, IdSecret,
},
},
zeroize::Zeroize,
};
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::Fr;
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::protocol::deserialize_identity_tuple;
use rln::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
use rln::utils::*;
use rln::hashers::{
hash_to_field_be, hash_to_field_le, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS,
};
use rln::protocol::{
deserialize_identity_pair_be, deserialize_identity_pair_le, deserialize_identity_tuple_be,
deserialize_identity_tuple_le,
};
use rln::public::{
hash as public_hash, poseidon_hash as public_poseidon_hash, seeded_extended_key_gen,
seeded_key_gen,
};
use rln::utils::{
bytes_be_to_fr, bytes_le_to_fr, str_to_fr, vec_fr_to_bytes_be, vec_fr_to_bytes_le,
};
use std::io::Cursor;
#[test]
@@ -22,11 +41,16 @@ mod test {
let leaf_index = 3;
let user_message_limit = 1;
let mut rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
let mut rln = RLN::new(TEST_TREE_DEPTH, generate_input_buffer()).unwrap();
// generate identity
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
let id_commitment = utils_poseidon_hash(&vec![identity_secret_hash]);
let mut identity_secret_hash_ = hash_to_field_le(b"test-merkle-proof");
let identity_secret_hash = IdSecret::from(&mut identity_secret_hash_);
let mut to_hash = [*identity_secret_hash.clone()];
let id_commitment = utils_poseidon_hash(&to_hash);
to_hash[0].zeroize();
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit.into()]);
// check that leaves indices is empty
@@ -66,7 +90,7 @@ mod test {
let buffer_inner = buffer.into_inner();
let (path_elements, read) = bytes_le_to_vec_fr(&buffer_inner).unwrap();
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec()).unwrap();
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..]).unwrap();
// We check correct computation of the path and indexes
let expected_path_elements: Vec<Fr> = [
@@ -102,9 +126,9 @@ mod test {
// check subtree root computation for leaf 0 for all corresponding node until the root
let l_idx = 0;
for n in (1..=TEST_TREE_HEIGHT).rev() {
let idx_l = l_idx * (1 << (TEST_TREE_HEIGHT - n));
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_HEIGHT - n));
for n in (1..=TEST_TREE_DEPTH).rev() {
let idx_l = l_idx * (1 << (TEST_TREE_DEPTH - n));
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_DEPTH - n));
let idx_sr = idx_l;
let mut buffer = Cursor::new(Vec::<u8>::new());
@@ -136,19 +160,46 @@ mod test {
#[test]
fn test_seeded_keygen() {
let rln = RLN::default();
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
.unwrap();
seeded_key_gen(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized_output);
let (id_commitment, _) = bytes_le_to_fr(&serialized_output[read..].to_vec());
let (identity_secret_hash, id_commitment) = deserialize_identity_pair_le(serialized_output);
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
)
.unwrap();
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
}
#[test]
fn test_seeded_keygen_big_endian() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
seeded_key_gen(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_secret_hash, id_commitment) = deserialize_identity_pair_be(serialized_output);
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
@@ -171,19 +222,60 @@ mod test {
#[test]
fn test_seeded_extended_keygen() {
let rln = RLN::default();
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.seeded_extended_key_gen(&mut input_buffer, &mut output_buffer)
.unwrap();
seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple(serialized_output);
deserialize_identity_tuple_le(serialized_output);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
)
.unwrap();
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
)
.unwrap();
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
)
.unwrap();
assert_eq!(identity_trapdoor, expected_identity_trapdoor_seed_bytes);
assert_eq!(identity_nullifier, expected_identity_nullifier_seed_bytes);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
}
#[test]
fn test_seeded_extended_keygen_big_endian() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple_be(serialized_output);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
@@ -224,11 +316,28 @@ mod test {
let mut input_buffer = Cursor::new(&signal);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_hash(&mut input_buffer, &mut output_buffer).unwrap();
public_hash(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field(&signal);
let hash2 = hash_to_field_le(&signal);
assert_eq!(hash1, hash2);
}
#[test]
fn test_hash_to_field_big_endian() {
let mut rng = thread_rng();
let signal: [u8; 32] = rng.gen();
let mut input_buffer = Cursor::new(&signal);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_hash(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash1, _) = bytes_be_to_fr(&serialized_hash);
let hash2 = hash_to_field_be(&signal);
assert_eq!(hash1, hash2);
}
@@ -243,13 +352,33 @@ mod test {
}
let expected_hash = utils_poseidon_hash(&inputs);
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs).unwrap());
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs));
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_poseidon_hash(&mut input_buffer, &mut output_buffer).unwrap();
public_poseidon_hash(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash, _) = bytes_le_to_fr(&serialized_hash);
assert_eq!(hash, expected_hash);
}
#[test]
fn test_poseidon_hash_big_endian() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let expected_hash = utils_poseidon_hash(&inputs);
let mut input_buffer = Cursor::new(vec_fr_to_bytes_be(&inputs));
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_poseidon_hash(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash, _) = bytes_be_to_fr(&serialized_hash);
assert_eq!(hash, expected_hash);
}
}

411
rln/tests/utils.rs Normal file
View File

@@ -0,0 +1,411 @@
#[cfg(test)]
mod test {
use rln::utils::{
bytes_be_to_fr, bytes_be_to_vec_fr, bytes_be_to_vec_u8, bytes_be_to_vec_usize,
bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, bytes_le_to_vec_usize,
fr_to_bytes_be, fr_to_bytes_le, normalize_usize_be, normalize_usize_le, str_to_fr,
vec_fr_to_bytes_be, vec_fr_to_bytes_le, vec_u8_to_bytes_be, vec_u8_to_bytes_le,
};
use ark_std::{rand::thread_rng, UniformRand};
use rln::circuit::Fr;
#[test]
fn test_normalize_usize_le() {
// Test basic cases
assert_eq!(normalize_usize_le(0), [0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(1), [1, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(255), [255, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(256), [0, 1, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(65535), [255, 255, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(65536), [0, 0, 1, 0, 0, 0, 0, 0]);
// Test 32-bit boundary
assert_eq!(
normalize_usize_le(4294967295),
[255, 255, 255, 255, 0, 0, 0, 0]
);
assert_eq!(normalize_usize_le(4294967296), [0, 0, 0, 0, 1, 0, 0, 0]);
// Test maximum value
assert_eq!(
normalize_usize_le(usize::MAX),
[255, 255, 255, 255, 255, 255, 255, 255]
);
// Test that result is always 8 bytes
assert_eq!(normalize_usize_le(0).len(), 8);
assert_eq!(normalize_usize_le(usize::MAX).len(), 8);
}
#[test]
fn test_normalize_usize_be() {
// Test basic cases
assert_eq!(normalize_usize_be(0), [0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_be(1), [0, 0, 0, 0, 0, 0, 0, 1]);
assert_eq!(normalize_usize_be(255), [0, 0, 0, 0, 0, 0, 0, 255]);
assert_eq!(normalize_usize_be(256), [0, 0, 0, 0, 0, 0, 1, 0]);
assert_eq!(normalize_usize_be(65535), [0, 0, 0, 0, 0, 0, 255, 255]);
assert_eq!(normalize_usize_be(65536), [0, 0, 0, 0, 0, 1, 0, 0]);
// Test 32-bit boundary
assert_eq!(
normalize_usize_be(4294967295),
[0, 0, 0, 0, 255, 255, 255, 255]
);
assert_eq!(normalize_usize_be(4294967296), [0, 0, 0, 1, 0, 0, 0, 0]);
// Test maximum value
assert_eq!(
normalize_usize_be(usize::MAX),
[255, 255, 255, 255, 255, 255, 255, 255]
);
// Test that result is always 8 bytes
assert_eq!(normalize_usize_be(0).len(), 8);
assert_eq!(normalize_usize_be(usize::MAX).len(), 8);
}
#[test]
fn test_normalize_usize_endianness() {
// Test that little-endian and big-endian produce different results for non-zero values
let test_values = vec![1, 255, 256, 65535, 65536, 4294967295, 4294967296];
for &value in &test_values {
let le_result = normalize_usize_le(value);
let be_result = normalize_usize_be(value);
// For non-zero values, LE and BE should be different
assert_ne!(
le_result, be_result,
"LE and BE should differ for value {value}"
);
// Both should be 8 bytes
assert_eq!(le_result.len(), 8);
assert_eq!(be_result.len(), 8);
}
// Zero should be the same in both endianness
assert_eq!(normalize_usize_le(0), normalize_usize_be(0));
}
#[test]
fn test_normalize_usize_roundtrip() {
// Test that we can reconstruct the original value from the normalized bytes
let test_values = vec![
0,
1,
255,
256,
65535,
65536,
4294967295,
4294967296,
usize::MAX,
];
for &value in &test_values {
let le_bytes = normalize_usize_le(value);
let be_bytes = normalize_usize_be(value);
// Reconstruct from little-endian bytes
let reconstructed_le = usize::from_le_bytes(le_bytes);
assert_eq!(
reconstructed_le, value,
"LE roundtrip failed for value {value}"
);
// Reconstruct from big-endian bytes
let reconstructed_be = usize::from_be_bytes(be_bytes);
assert_eq!(
reconstructed_be, value,
"BE roundtrip failed for value {value}"
);
}
}
#[test]
fn test_normalize_usize_edge_cases() {
// Test edge cases and boundary values
let edge_cases = vec![
0,
1,
255,
256,
65535,
65536,
16777215, // 2^24 - 1
16777216, // 2^24
4294967295, // 2^32 - 1
4294967296, // 2^32
1099511627775, // 2^40 - 1
1099511627776, // 2^40
281474976710655, // 2^48 - 1
281474976710656, // 2^48
72057594037927935, // 2^56 - 1
72057594037927936, // 2^56
usize::MAX,
];
for &value in &edge_cases {
let le_result = normalize_usize_le(value);
let be_result = normalize_usize_be(value);
// Both should be 8 bytes
assert_eq!(le_result.len(), 8);
assert_eq!(be_result.len(), 8);
// Roundtrip should work
assert_eq!(usize::from_le_bytes(le_result), value);
assert_eq!(usize::from_be_bytes(be_result), value);
}
}
#[test]
fn test_normalize_usize_architecture_independence() {
// Test that the functions work consistently regardless of the underlying architecture
// This test ensures that the functions provide consistent 8-byte output
// even on 32-bit systems where usize might be 4 bytes
let test_values = vec![0, 1, 255, 256, 65535, 65536, 4294967295, 4294967296];
for &value in &test_values {
let le_result = normalize_usize_le(value);
let be_result = normalize_usize_be(value);
// Always 8 bytes regardless of architecture
assert_eq!(le_result.len(), 8);
assert_eq!(be_result.len(), 8);
// The result should be consistent with the original value
assert_eq!(usize::from_le_bytes(le_result), value);
assert_eq!(usize::from_be_bytes(be_result), value);
}
}
#[test]
fn test_fr_serialization_roundtrip() {
let mut rng = thread_rng();
// Test multiple random Fr values
for _ in 0..10 {
let fr = Fr::rand(&mut rng);
// Test little-endian roundtrip
let le_bytes = fr_to_bytes_le(&fr);
let (reconstructed_le, _) = bytes_le_to_fr(&le_bytes);
assert_eq!(fr, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = fr_to_bytes_be(&fr);
let (reconstructed_be, _) = bytes_be_to_fr(&be_bytes);
assert_eq!(fr, reconstructed_be);
}
}
#[test]
fn test_vec_fr_serialization_roundtrip() {
let mut rng = thread_rng();
// Test with different vector sizes
for size in [0, 1, 5, 10] {
let fr_vec: Vec<Fr> = (0..size).map(|_| Fr::rand(&mut rng)).collect();
// Test little-endian roundtrip
let le_bytes = vec_fr_to_bytes_le(&fr_vec);
let (reconstructed_le, _) = bytes_le_to_vec_fr(&le_bytes).unwrap();
assert_eq!(fr_vec, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = vec_fr_to_bytes_be(&fr_vec);
let (reconstructed_be, _) = bytes_be_to_vec_fr(&be_bytes).unwrap();
assert_eq!(fr_vec, reconstructed_be);
}
}
#[test]
fn test_vec_u8_serialization_roundtrip() {
// Test with different vector sizes and content
let test_cases = vec![
vec![],
vec![0],
vec![255],
vec![1, 2, 3, 4, 5],
vec![0, 255, 128, 64, 32, 16, 8, 4, 2, 1],
(0..100).collect::<Vec<u8>>(),
];
for test_case in test_cases {
// Test little-endian roundtrip
let le_bytes = vec_u8_to_bytes_le(&test_case);
let (reconstructed_le, _) = bytes_le_to_vec_u8(&le_bytes).unwrap();
assert_eq!(test_case, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = vec_u8_to_bytes_be(&test_case);
let (reconstructed_be, _) = bytes_be_to_vec_u8(&be_bytes).unwrap();
assert_eq!(test_case, reconstructed_be);
}
}
#[test]
fn test_vec_usize_serialization_roundtrip() {
// Test with different vector sizes and content
let test_cases = vec![
vec![],
vec![0],
vec![usize::MAX],
vec![1, 2, 3, 4, 5],
vec![0, 255, 65535, 4294967295, usize::MAX],
(0..10).collect::<Vec<usize>>(),
];
for test_case in test_cases {
// Test little-endian roundtrip
let le_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_le(test_case.len()));
for &value in &test_case {
bytes.extend_from_slice(&normalize_usize_le(value));
}
bytes
};
let reconstructed_le = bytes_le_to_vec_usize(&le_bytes).unwrap();
assert_eq!(test_case, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_be(test_case.len()));
for &value in &test_case {
bytes.extend_from_slice(&normalize_usize_be(value));
}
bytes
};
let reconstructed_be = bytes_be_to_vec_usize(&be_bytes).unwrap();
assert_eq!(test_case, reconstructed_be);
}
}
#[test]
fn test_str_to_fr() {
// Test valid hex strings
let test_cases = vec![
("0x0", 16, Fr::from(0u64)),
("0x1", 16, Fr::from(1u64)),
("0xff", 16, Fr::from(255u64)),
("0x100", 16, Fr::from(256u64)),
];
for (input, radix, expected) in test_cases {
let result = str_to_fr(input, radix).unwrap();
assert_eq!(result, expected);
}
// Test invalid inputs
assert!(str_to_fr("invalid", 16).is_err());
assert!(str_to_fr("0x", 16).is_err());
}
#[test]
fn test_endianness_differences() {
let mut rng = thread_rng();
let fr = Fr::rand(&mut rng);
// Test that LE and BE produce different byte representations
let le_bytes = fr_to_bytes_le(&fr);
let be_bytes = fr_to_bytes_be(&fr);
// They should be different (unless the value is symmetric)
if le_bytes != be_bytes {
// Verify they can both be reconstructed correctly
let (reconstructed_le, _) = bytes_le_to_fr(&le_bytes);
let (reconstructed_be, _) = bytes_be_to_fr(&be_bytes);
assert_eq!(fr, reconstructed_le);
assert_eq!(fr, reconstructed_be);
}
}
#[test]
fn test_error_handling() {
// Test with valid length but insufficient data
let valid_length_invalid_data = vec![0u8; 8]; // Length 0, but no data
assert!(bytes_le_to_vec_u8(&valid_length_invalid_data).is_ok());
assert!(bytes_be_to_vec_u8(&valid_length_invalid_data).is_ok());
assert!(bytes_le_to_vec_fr(&valid_length_invalid_data).is_ok());
assert!(bytes_be_to_vec_fr(&valid_length_invalid_data).is_ok());
assert!(bytes_le_to_vec_usize(&valid_length_invalid_data).is_ok());
assert!(bytes_be_to_vec_usize(&valid_length_invalid_data).is_ok());
// Test with reasonable length but insufficient data for vector deserialization
let reasonable_length = {
let mut bytes = vec![0u8; 8];
bytes[0] = 1; // Length 1
bytes
};
// This should fail because we don't have enough data for the vector elements
assert!(bytes_le_to_vec_u8(&reasonable_length).is_err());
assert!(bytes_be_to_vec_u8(&reasonable_length).is_err());
assert!(bytes_le_to_vec_fr(&reasonable_length).is_err());
assert!(bytes_be_to_vec_fr(&reasonable_length).is_err());
assert!(bytes_le_to_vec_usize(&reasonable_length).is_err());
assert!(bytes_be_to_vec_usize(&reasonable_length).is_err());
// Test with valid data for u8 vector
let valid_u8_data_le = {
let mut bytes = vec![0u8; 9];
bytes[..8].copy_from_slice(&(1u64.to_le_bytes())); // Length 1, little-endian
bytes[8] = 42; // One byte of data
bytes
};
let valid_u8_data_be = {
let mut bytes = vec![0u8; 9];
bytes[..8].copy_from_slice(&(1u64.to_be_bytes())); // Length 1, big-endian
bytes[8] = 42; // One byte of data
bytes
};
assert!(bytes_le_to_vec_u8(&valid_u8_data_le).is_ok());
assert!(bytes_be_to_vec_u8(&valid_u8_data_be).is_ok());
}
#[test]
fn test_empty_vectors() {
// Test empty vector serialization/deserialization
let empty_fr: Vec<Fr> = vec![];
let empty_u8: Vec<u8> = vec![];
let empty_usize: Vec<usize> = vec![];
// Test Fr vectors
let le_fr_bytes = vec_fr_to_bytes_le(&empty_fr);
let be_fr_bytes = vec_fr_to_bytes_be(&empty_fr);
let (reconstructed_le_fr, _) = bytes_le_to_vec_fr(&le_fr_bytes).unwrap();
let (reconstructed_be_fr, _) = bytes_be_to_vec_fr(&be_fr_bytes).unwrap();
assert_eq!(empty_fr, reconstructed_le_fr);
assert_eq!(empty_fr, reconstructed_be_fr);
// Test u8 vectors
let le_u8_bytes = vec_u8_to_bytes_le(&empty_u8);
let be_u8_bytes = vec_u8_to_bytes_be(&empty_u8);
let (reconstructed_le_u8, _) = bytes_le_to_vec_u8(&le_u8_bytes).unwrap();
let (reconstructed_be_u8, _) = bytes_be_to_vec_u8(&be_u8_bytes).unwrap();
assert_eq!(empty_u8, reconstructed_le_u8);
assert_eq!(empty_u8, reconstructed_be_u8);
// Test usize vectors
let le_usize_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_le(0));
bytes
};
let be_usize_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_be(0));
bytes
};
let reconstructed_le_usize = bytes_le_to_vec_usize(&le_usize_bytes).unwrap();
let reconstructed_be_usize = bytes_be_to_vec_usize(&be_usize_bytes).unwrap();
assert_eq!(empty_usize, reconstructed_le_usize);
assert_eq!(empty_usize, reconstructed_be_usize);
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "zerokit_utils"
version = "0.5.2"
version = "0.6.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Various utilities for Zerokit"
@@ -12,29 +12,35 @@ repository = "https://github.com/vacp2p/zerokit"
bench = false
[dependencies]
ark-ff = { version = "0.5.0", features = ["asm"] }
num-bigint = { version = "0.4.6", default-features = false, features = [
"rand",
] }
color-eyre = "0.6.2"
pmtree = { package = "vacp2p_pmtree", version = "=2.0.2", optional = true }
ark-ff = { version = "0.5.0", default-features = false }
num-bigint = { version = "0.4.6", default-features = false }
pmtree = { package = "vacp2p_pmtree", version = "2.0.3", optional = true }
sled = "0.34.7"
serde = "1.0"
lazy_static = "1.4.0"
hex = "0.4"
serde_json = "1.0.141"
lazy_static = "1.5.0"
hex = "0.4.3"
rayon = "1.10.0"
thiserror = "2.0"
[dev-dependencies]
ark-bn254 = { version = "0.5.0", features = ["std"] }
num-traits = "0.2.19"
hex-literal = "0.3.4"
hex-literal = "0.4.1"
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
criterion = { version = "0.4.0", features = ["html_reports"] }
criterion = { version = "0.7.0", features = ["html_reports"] }
[features]
default = ["parallel"]
default = []
parallel = ["ark-ff/parallel"]
pmtree-ft = ["pmtree"]
[[bench]]
name = "merkle_tree_benchmark"
harness = false
[[bench]]
name = "poseidon_benchmark"
harness = false
[package.metadata.docs.rs]
all-features = true

View File

@@ -8,4 +8,4 @@ args = ["test", "--release"]
[tasks.bench]
command = "cargo"
args = ["bench"]
args = ["bench"]

View File

@@ -1,68 +1,124 @@
# Zerokit Utils Crate
[![Crates.io](https://img.shields.io/crates/v/zerokit_utils.svg)](https://crates.io/crates/zerokit_utils)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
Cryptographic primitives for zero-knowledge applications, featuring efficient Merkle tree implementations and a Poseidon hash function.
**Zerokit Utils** provides essential cryptographic primitives optimized for zero-knowledge applications.
This crate features efficient Merkle tree implementations and a Poseidon hash function,
designed to be robust and performant.
## Overview
This crate provides core cryptographic components optimized for zero-knowledge proof systems:
1. Multiple Merkle tree implementations with different space/time tradeoffs
2. A Poseidon hash implementation
- **Multiple Merkle Trees**: Various implementations optimised for the trade-off between space and time.
- **Poseidon Hash Function**: An efficient hashing algorithm suitable for ZK contexts, with customizable parameters.
- **Parallel Performance**: Leverages Rayon for significant speed-ups in Merkle tree computations.
- **Arkworks Compatibility**: Poseidon hash implementation is designed to work seamlessly
with Arkworks field traits and data structures.
## Merkle Tree Implementations
The crate supports two interchangeable Merkle tree implementations:
Merkle trees are fundamental data structures for verifying data integrity and set membership.
Zerokit Utils offers two interchangeable implementations:
### Understanding Merkle Tree Terminology
To better understand the structure and parameters of our Merkle trees, here's a quick glossary:
- **Depth (`depth`)**: level of leaves if we count from root.
If the root is at level 0, leaves are at level `depth`.
- **Number of Levels**: `depth + 1`.
- **Capacity (Number of Leaves)**: $2^{\text{depth}}$. This is the maximum number of leaves the tree can hold.
- **Total Number of Nodes**: $2^{(\text{depth} + 1)} - 1$ for a full binary tree.
**Example for a tree with `depth: 3`**:
- Number of Levels: 4 (levels 0, 1, 2, 3)
- Capacity (Number of Leaves): $2^3 = 8$
- Total Number of Nodes: $2^{(3+1)} - 1 = 15$
Visual representation of a Merkle tree with `depth: 3`:
```mermaid
flowchart TD
A[Root] --> N1
A[Root] --> N2
N1 --> N3
N1 --> N4
N2 --> N5
N2 --> N6
N3 -->|Leaf| L1
N3 -->|Leaf| L2
N4 -->|Leaf| L3
N4 -->|Leaf| L4
N5 -->|Leaf| L5
N5 -->|Leaf| L6
N6 -->|Leaf| L7
N6 -->|Leaf| L8
```
### Available Implementations
- **FullMerkleTree**
- Stores each tree node in memory
- Stores all tree nodes in memory.
- Use Case: Use when memory is abundant and operation speed is critical.
- **OptimalMerkleTree**
- Only stores nodes used to prove accumulation of set leaves
- Stores only the nodes required to prove the accumulation of set leaves (i.e., authentication paths).
- Use Case: Suited for environments where memory efficiency is a higher priority than raw speed.
#### Parallel Processing with Rayon
Both `OptimalMerkleTree` and `FullMerkleTree` internally utilize the Rayon crate
to accelerate computations through data parallelism.
This can lead to significant performance improvements, particularly during updates to large Merkle trees.
## Poseidon Hash Implementation
This crate provides an implementation to compute the Poseidon hash round constants and MDS matrices:
This crate provides an implementation for computing Poseidon hash round constants and MDS matrices.
Key characteristics include:
- **Customizable parameters**: Supports different security levels and input sizes
- **Arkworks-friendly**: Adapted to work over arkworks field traits and custom data structures
- **Customizable parameters**: Supports various security levels and input sizes,
allowing you to tailor the hash function to your specific needs.
- **Arkworks-friendly**: Adapted to integrate smoothly with Arkworks field traits and custom data structures.
### Security Note
### ⚠️ Security Note
The MDS matrices are generated iteratively using the Grain LFSR until certain criteria are met.
According to the paper, such matrices must respect specific conditions which are checked by 3 different algorithms in the reference implementation.
The MDS matrices used in the Poseidon hash function are generated iteratively
using the Grain LFSR (Linear Feedback Shift Register) algorithm until specific cryptographic criteria are met.
These validation algorithms are not currently implemented in this crate.
For the hardcoded parameters, the first random matrix generated satisfies these conditions.
If using different parameters, you should check against the reference implementation how many matrices are generated before outputting the correct one,
and pass this number to the `skip_matrices` parameter of the `find_poseidon_ark_and_mds` function.
- The reference Poseidon implementation includes validation algorithms to ensure these criteria are satisfied.
These validation algorithms are not currently implemented in this crate.
- For the hardcoded parameters provided within this crate,
the initially generated random matrix has been verified to meet these conditions.
- If you intend to use custom parameters, it is crucial to verify your generated MDS matrix.
You should consult the Poseidon reference implementation to determine
how many matrices are typically skipped before a valid one is found.
This count should then be passed as the `skip_matrices parameter` to the `find_poseidon_ark_and_mds`
function in this crate.
## Installation
Add Zerokit Utils to your Rust project:
Add zerokit-utils as a dependency to your Cargo.toml file:
```toml
[dependencies]
zerokit-utils = "0.5.1"
zerokit-utils = "0.6.0"
```
## Performance Considerations
- **FullMerkleTree**: Use when memory is abundant and operation speed is critical
- **OptimalMerkleTree**: Use when memory efficiency is more important than raw speed
- **Poseidon**: Offers a good balance between security and performance for ZK applications
## Building and Testing
```bash
# Build the crate
cargo build
cargo make build
# Run tests
cargo test
cargo make test
# Run benchmarks
cargo bench
cargo make bench
```
To view the results of the benchmark, open the `target/criterion/report/index.html` file generated after the bench

View File

@@ -1,7 +1,7 @@
use criterion::{criterion_group, criterion_main, Criterion};
use hex_literal::hex;
use lazy_static::lazy_static;
use std::{fmt::Display, str::FromStr};
use criterion::{criterion_group, criterion_main, Criterion};
use lazy_static::lazy_static;
use tiny_keccak::{Hasher as _, Keccak};
use zerokit_utils::{
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
@@ -47,54 +47,78 @@ impl FromStr for TestFr {
}
lazy_static! {
static ref LEAVES: [TestFr; 4] = [
hex!("0000000000000000000000000000000000000000000000000000000000000001"),
hex!("0000000000000000000000000000000000000000000000000000000000000002"),
hex!("0000000000000000000000000000000000000000000000000000000000000003"),
hex!("0000000000000000000000000000000000000000000000000000000000000004"),
]
.map(TestFr);
static ref LEAVES: Vec<TestFr> = {
let mut leaves = Vec::with_capacity(1 << 20);
for i in 0..(1 << 20) {
let mut bytes = [0u8; 32];
bytes[28..].copy_from_slice(&(i as u32).to_be_bytes());
leaves.push(TestFr(bytes));
}
leaves
};
static ref INDICES: Vec<usize> = (0..(1 << 20)).collect();
}
const NOF_LEAVES: usize = 8192;
pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
let mut tree =
OptimalMerkleTree::<Keccak256>::new(2, TestFr([0; 32]), OptimalMerkleConfig::default())
OptimalMerkleTree::<Keccak256>::new(20, TestFr([0; 32]), OptimalMerkleConfig::default())
.unwrap();
for i in 0..NOF_LEAVES {
tree.set(i, LEAVES[i % LEAVES.len()]).unwrap();
}
c.bench_function("OptimalMerkleTree::set", |b| {
let mut index = NOF_LEAVES;
b.iter(|| {
tree.set(0, LEAVES[0]).unwrap();
tree.set(index % (1 << 20), LEAVES[index % LEAVES.len()])
.unwrap();
index = (index + 1) % (1 << 20);
})
});
c.bench_function("OptimalMerkleTree::delete", |b| {
let mut index = 0;
b.iter(|| {
tree.delete(0).unwrap();
tree.delete(index % NOF_LEAVES).unwrap();
tree.set(index % NOF_LEAVES, LEAVES[index % LEAVES.len()])
.unwrap();
index = (index + 1) % NOF_LEAVES;
})
});
c.bench_function("OptimalMerkleTree::override_range", |b| {
let mut offset = 0;
b.iter(|| {
tree.override_range(0, *LEAVES, [0, 1, 2, 3]).unwrap();
})
});
c.bench_function("OptimalMerkleTree::compute_root", |b| {
b.iter(|| {
tree.compute_root().unwrap();
let range = offset..offset + NOF_LEAVES;
tree.override_range(
offset,
LEAVES[range.clone()].iter().cloned(),
INDICES[range.clone()].iter().cloned(),
)
.unwrap();
offset = (offset + NOF_LEAVES) % (1 << 20);
})
});
c.bench_function("OptimalMerkleTree::get", |b| {
let mut index = 0;
b.iter(|| {
tree.get(0).unwrap();
tree.get(index % NOF_LEAVES).unwrap();
index = (index + 1) % NOF_LEAVES;
})
});
// check intermediate node getter which required additional computation of sub root index
c.bench_function("OptimalMerkleTree::get_subtree_root", |b| {
let mut level = 1;
let mut index = 0;
b.iter(|| {
tree.get_subtree_root(1, 0).unwrap();
tree.get_subtree_root(level % 20, index % (1 << (20 - (level % 20))))
.unwrap();
index = (index + 1) % (1 << (20 - (level % 20)));
level = 1 + (level % 20);
})
});
@@ -107,42 +131,61 @@ pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
pub fn full_merkle_tree_benchmark(c: &mut Criterion) {
let mut tree =
FullMerkleTree::<Keccak256>::new(2, TestFr([0; 32]), FullMerkleConfig::default()).unwrap();
FullMerkleTree::<Keccak256>::new(20, TestFr([0; 32]), FullMerkleConfig::default()).unwrap();
for i in 0..NOF_LEAVES {
tree.set(i, LEAVES[i % LEAVES.len()]).unwrap();
}
c.bench_function("FullMerkleTree::set", |b| {
let mut index = NOF_LEAVES;
b.iter(|| {
tree.set(0, LEAVES[0]).unwrap();
tree.set(index % (1 << 20), LEAVES[index % LEAVES.len()])
.unwrap();
index = (index + 1) % (1 << 20);
})
});
c.bench_function("FullMerkleTree::delete", |b| {
let mut index = 0;
b.iter(|| {
tree.delete(0).unwrap();
tree.delete(index % NOF_LEAVES).unwrap();
tree.set(index % NOF_LEAVES, LEAVES[index % LEAVES.len()])
.unwrap();
index = (index + 1) % NOF_LEAVES;
})
});
c.bench_function("FullMerkleTree::override_range", |b| {
let mut offset = 0;
b.iter(|| {
tree.override_range(0, *LEAVES, [0, 1, 2, 3]).unwrap();
})
});
c.bench_function("FullMerkleTree::compute_root", |b| {
b.iter(|| {
tree.compute_root().unwrap();
let range = offset..offset + NOF_LEAVES;
tree.override_range(
offset,
LEAVES[range.clone()].iter().cloned(),
INDICES[range.clone()].iter().cloned(),
)
.unwrap();
offset = (offset + NOF_LEAVES) % (1 << 20);
})
});
c.bench_function("FullMerkleTree::get", |b| {
let mut index = 0;
b.iter(|| {
tree.get(0).unwrap();
tree.get(index % NOF_LEAVES).unwrap();
index = (index + 1) % NOF_LEAVES;
})
});
// check intermediate node getter which required additional computation of sub root index
c.bench_function("FullMerkleTree::get_subtree_root", |b| {
let mut level = 1;
let mut index = 0;
b.iter(|| {
tree.get_subtree_root(1, 0).unwrap();
tree.get_subtree_root(level % 20, index % (1 << (20 - (level % 20))))
.unwrap();
index = (index + 1) % (1 << (20 - (level % 20)));
level = 1 + (level % 20);
})
});

View File

@@ -0,0 +1,65 @@
use std::hint::black_box;
use ark_bn254::Fr;
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput};
use zerokit_utils::Poseidon;
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
(2, 8, 56, 0),
(3, 8, 57, 0),
(4, 8, 56, 0),
(5, 8, 60, 0),
(6, 8, 60, 0),
(7, 8, 63, 0),
(8, 8, 64, 0),
(9, 8, 63, 0),
];
pub fn poseidon_benchmark(c: &mut Criterion) {
let hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let mut group = c.benchmark_group("poseidon Fr");
for size in [10u32, 100, 1000].iter() {
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("Array hash", size), size, |b, &size| {
b.iter_batched(
// Setup: create values for each benchmark iteration
|| {
let mut values = Vec::with_capacity(size as usize);
for i in 0..size {
values.push([Fr::from(i)]);
}
values
},
// Actual benchmark
|values| {
for v in values.iter() {
let _ = hasher.hash(black_box(&v[..]));
}
},
BatchSize::SmallInput,
)
});
}
// Benchmark single hash operation separately
group.bench_function("Single hash", |b| {
let input = [Fr::from(u64::MAX)];
b.iter(|| {
let _ = hasher.hash(black_box(&input[..]));
})
});
group.finish();
}
criterion_group! {
name = benches;
config = Criterion::default()
.warm_up_time(std::time::Duration::from_millis(500))
.measurement_time(std::time::Duration::from_secs(4))
.sample_size(20);
targets = poseidon_benchmark
}
criterion_main!(benches);

View File

@@ -0,0 +1,33 @@
#[derive(thiserror::Error, Debug)]
pub enum ZerokitMerkleTreeError {
#[error("Invalid index")]
InvalidIndex,
// InvalidProof,
#[error("Leaf index out of bounds")]
InvalidLeaf,
#[error("Level exceeds tree depth")]
InvalidLevel,
#[error("Subtree index out of bounds")]
InvalidSubTreeIndex,
#[error("Start level is != from end level")]
InvalidStartAndEndLevel,
#[error("set_range got too many leaves")]
TooManySet,
#[error("Unknown error while computing merkle proof")]
ComputingProofError,
#[error("Invalid witness length (!= tree depth)")]
InvalidWitness,
#[cfg(feature = "pmtree-ft")]
#[error("Pmtree error: {0}")]
PmtreeErrorKind(#[from] pmtree::PmtreeErrorKind),
}
#[derive(Debug, thiserror::Error)]
pub enum FromConfigError {
#[error("Error while reading pmtree config: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Error while creating pmtree config: missing path")]
MissingPath,
#[error("Error while creating pmtree config: path already exists")]
PathExists,
}

View File

@@ -1,28 +1,29 @@
use crate::merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
use color_eyre::{Report, Result};
use std::{
cmp::max,
fmt::Debug,
iter::{once, repeat, successors},
iter::{once, repeat_n},
str::FromStr,
};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use crate::merkle_tree::{
error::{FromConfigError, ZerokitMerkleTreeError},
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
};
////////////////////////////////////////////////////////////
///// Full Merkle Tree Implementation
////////////////////////////////////////////////////////////
/// Merkle tree with all leaf and intermediate hashes stored
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FullMerkleTree<H: Hasher> {
pub struct FullMerkleTree<H>
where
H: Hasher,
{
/// The depth of the tree, i.e. the number of levels from leaf to root
depth: usize,
/// The nodes cached from the empty part of the tree (where leaves are set to default).
/// Since the rightmost part of the tree is usually changed much later than its creation,
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
/// and by caching few intermediate nodes to the root computed from default leaves
cached_nodes: Vec<H::Fr>,
/// The tree nodes
nodes: Vec<H::Fr>,
@@ -30,11 +31,11 @@ pub struct FullMerkleTree<H: Hasher> {
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
cached_leaves_indices: Vec<u8>,
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
// (deletions leave next_index unchanged)
/// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
/// (deletions leave next_index unchanged)
next_index: usize,
// metadata that an application may use to store additional information
/// metadata that an application may use to store additional information
metadata: Vec<u8>,
}
@@ -56,9 +57,9 @@ pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
pub struct FullMerkleConfig(());
impl FromStr for FullMerkleConfig {
type Err = Report;
type Err = FromConfigError;
fn from_str(_s: &str) -> Result<Self> {
fn from_str(_s: &str) -> Result<Self, Self::Err> {
Ok(FullMerkleConfig::default())
}
}
@@ -72,86 +73,89 @@ where
type Hasher = H;
type Config = FullMerkleConfig;
fn default(depth: usize) -> Result<Self> {
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
FullMerkleTree::<H>::new(depth, Self::Hasher::default_leaf(), Self::Config::default())
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(depth: usize, initial_leaf: FrOf<Self::Hasher>, _config: Self::Config) -> Result<Self> {
/// depth - the depth of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(
depth: usize,
default_leaf: FrOf<Self::Hasher>,
_config: Self::Config,
) -> Result<Self, ZerokitMerkleTreeError> {
// Compute cache node values, leaf to root
let cached_nodes = successors(Some(initial_leaf), |prev| Some(H::hash(&[*prev, *prev])))
.take(depth + 1)
.collect::<Vec<_>>();
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
cached_nodes.push(default_leaf);
for i in 0..depth {
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
}
cached_nodes.reverse();
// Compute node values
let nodes = cached_nodes
.iter()
.rev()
.enumerate()
.flat_map(|(levels, hash)| repeat(hash).take(1 << levels))
.flat_map(|(levels, hash)| repeat_n(hash, 1 << levels))
.cloned()
.collect::<Vec<_>>();
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
let next_index = 0;
Ok(Self {
depth,
cached_nodes,
nodes,
cached_leaves_indices: vec![0; 1 << depth],
next_index,
next_index: 0,
metadata: Vec::new(),
})
}
fn close_db_connection(&mut self) -> Result<()> {
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
Ok(())
}
// Returns the depth of the tree
/// Returns the depth of the tree
fn depth(&self) -> usize {
self.depth
}
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
/// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
fn capacity(&self) -> usize {
1 << self.depth
}
// Returns the total number of leaves set
/// Returns the total number of leaves set
fn leaves_set(&self) -> usize {
self.next_index
}
#[must_use]
// Returns the root of the tree
/// Returns the root of the tree
fn root(&self) -> FrOf<Self::Hasher> {
self.nodes[0]
}
// Sets a leaf at the specified tree index
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<()> {
/// Sets a leaf at the specified tree index
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
self.set_range(leaf, once(hash))?;
self.next_index = max(self.next_index, leaf + 1);
Ok(())
}
// Get a leaf from the specified tree index
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>> {
/// Get a leaf from the specified tree index
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
if leaf >= self.capacity() {
return Err(Report::msg("leaf index out of bounds"));
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
Ok(self.nodes[self.capacity() + leaf - 1])
}
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
/// Returns the root of the subtree at level n and index
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
if n > self.depth() {
return Err(Report::msg("level exceeds depth size"));
return Err(ZerokitMerkleTreeError::InvalidIndex);
}
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
if n == 0 {
Ok(self.root())
@@ -161,7 +165,7 @@ where
let mut idx = self.capacity() + index - 1;
let mut nd = self.depth;
loop {
let parent = self.parent(idx).unwrap();
let parent = self.parent(idx).expect("parent should exist");
nd -= 1;
if nd == n {
return Ok(self.nodes[parent]);
@@ -172,6 +176,8 @@ where
}
}
}
/// Returns the indices of the leaves that are empty
fn get_empty_leaves_indices(&self) -> Vec<usize> {
self.cached_leaves_indices
.iter()
@@ -182,40 +188,45 @@ where
.collect()
}
// Sets tree nodes, starting from start index
// Function proper of FullMerkleTree implementation
fn set_range<I: IntoIterator<Item = FrOf<Self::Hasher>>>(
/// Sets multiple leaves from the specified tree index
fn set_range<I: ExactSizeIterator<Item = FrOf<Self::Hasher>>>(
&mut self,
start: usize,
hashes: I,
) -> Result<()> {
leaves: I,
) -> Result<(), ZerokitMerkleTreeError> {
let index = self.capacity() + start - 1;
let mut count = 0;
// first count number of hashes, and check that they fit in the tree
// first count number of leaves, and check that they fit in the tree
// then insert into the tree
let hashes = hashes.into_iter().collect::<Vec<_>>();
if hashes.len() + start > self.capacity() {
return Err(Report::msg("provided hashes do not fit in the tree"));
let leaves = leaves.into_iter().collect::<Vec<_>>();
if leaves.len() + start > self.capacity() {
return Err(ZerokitMerkleTreeError::TooManySet);
}
hashes.into_iter().for_each(|hash| {
leaves.into_iter().for_each(|hash| {
self.nodes[index + count] = hash;
self.cached_leaves_indices[start + count] = 1;
count += 1;
});
if count != 0 {
self.update_nodes(index, index + (count - 1))?;
self.update_hashes(index, index + (count - 1))?;
self.next_index = max(self.next_index, start + count);
}
Ok(())
}
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
/// Overrides a range of leaves while resetting specified indices to default and preserving unaffected values.
fn override_range<I, J>(
&mut self,
start: usize,
leaves: I,
indices: J,
) -> Result<(), ZerokitMerkleTreeError>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>,
J: IntoIterator<Item = usize>,
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
J: ExactSizeIterator<Item = usize>,
{
let indices = indices.into_iter().collect::<Vec<_>>();
let min_index = *indices.first().unwrap();
let min_index = *indices.first().expect("indices should not be empty");
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
let max_index = start + leaves_vec.len();
@@ -237,18 +248,17 @@ where
self.cached_leaves_indices[i] = 0;
}
self.set_range(start, set_values)
.map_err(|e| Report::msg(e.to_string()))
self.set_range(start, set_values.into_iter())
}
// Sets a leaf at the next available index
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
/// Sets a leaf at the next available index
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
self.set(self.next_index, leaf)?;
Ok(())
}
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
fn delete(&mut self, index: usize) -> Result<()> {
/// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
// We reset the leaf only if we previously set a leaf at that index
if index < self.next_index {
self.set(index, H::default_leaf())?;
@@ -258,9 +268,9 @@ where
}
// Computes a merkle proof the leaf at the specified index
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>> {
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>, ZerokitMerkleTreeError> {
if leaf >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
let mut index = self.capacity() + leaf - 1;
let mut path = Vec::with_capacity(self.depth + 1);
@@ -277,30 +287,29 @@ where
}
// Verifies a Merkle proof with respect to the input leaf and the tree root
fn verify(&self, hash: &FrOf<Self::Hasher>, proof: &FullMerkleProof<H>) -> Result<bool> {
fn verify(
&self,
hash: &FrOf<Self::Hasher>,
proof: &FullMerkleProof<H>,
) -> Result<bool, ZerokitMerkleTreeError> {
Ok(proof.compute_root_from(hash) == self.root())
}
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
Ok(self.root())
}
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
self.metadata = metadata.to_vec();
Ok(())
}
fn metadata(&self) -> Result<Vec<u8>> {
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
Ok(self.metadata.to_vec())
}
}
// Utilities for updating the tree nodes
impl<H: Hasher> FullMerkleTree<H>
where
H: Hasher,
{
// Utilities for updating the tree nodes
/// For a given node index, return the parent node index
/// Returns None if there is no parent (root node)
fn parent(&self, index: usize) -> Option<usize> {
@@ -316,23 +325,60 @@ where
(index << 1) + 1
}
/// Returns the depth level of a node based on its index in the flattened tree.
fn levels(&self, index: usize) -> usize {
// `n.next_power_of_two()` will return `n` iff `n` is a power of two.
// The extra offset corrects this.
(index + 2).next_power_of_two().trailing_zeros() as usize - 1
}
fn update_nodes(&mut self, start: usize, end: usize) -> Result<()> {
if self.levels(start) != self.levels(end) {
return Err(Report::msg("self.levels(start) != self.levels(end)"));
/// Updates parent hashes after modifying a range of nodes at the same level.
///
/// - `start_index`: The first index at the current level that was updated.
/// - `end_index`: The last index (inclusive) at the same level that was updated.
fn update_hashes(
&mut self,
start_index: usize,
end_index: usize,
) -> Result<(), ZerokitMerkleTreeError> {
// Ensure the range is within the same tree level
if self.levels(start_index) != self.levels(end_index) {
return Err(ZerokitMerkleTreeError::InvalidStartAndEndLevel);
}
if let (Some(start), Some(end)) = (self.parent(start), self.parent(end)) {
for parent in start..=end {
let child = self.first_child(parent);
self.nodes[parent] = H::hash(&[self.nodes[child], self.nodes[child + 1]]);
// Compute parent indices for the range
if let (Some(start_parent), Some(end_parent)) =
(self.parent(start_index), self.parent(end_index))
{
// Use parallel processing when the number of pairs exceeds the threshold
if end_parent - start_parent + 1 >= MIN_PARALLEL_NODES {
let updates: Vec<(usize, H::Fr)> = (start_parent..=end_parent)
.into_par_iter()
.map(|parent| {
let left_child = self.first_child(parent);
let right_child = left_child + 1;
let hash = H::hash(&[self.nodes[left_child], self.nodes[right_child]]);
(parent, hash)
})
.collect();
for (parent, hash) in updates {
self.nodes[parent] = hash;
}
} else {
// Otherwise, fallback to sequential update for small ranges
for parent in start_parent..=end_parent {
let left_child = self.first_child(parent);
let right_child = left_child + 1;
self.nodes[parent] =
H::hash(&[self.nodes[left_child], self.nodes[right_child]]);
}
}
self.update_nodes(start, end)?;
// Recurse to update upper levels
self.update_hashes(start_parent, end_parent)?;
}
Ok(())
}
}
@@ -341,14 +387,12 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
type Index = u8;
type Hasher = H;
#[must_use]
// Returns the length of a Merkle proof
fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
fn leaf_index(&self) -> usize {
self.0.iter().rev().fold(0, |index, branch| match branch {
FullMerkleBranch::Left(_) => index << 1,
@@ -356,7 +400,6 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
})
}
#[must_use]
/// Returns the path elements forming a Merkle proof
fn get_path_elements(&self) -> Vec<FrOf<Self::Hasher>> {
self.0
@@ -368,7 +411,6 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
fn get_path_index(&self) -> Vec<Self::Index> {
self.0
.iter()
@@ -380,7 +422,6 @@ impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
}
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
#[must_use]
fn compute_root_from(&self, hash: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher> {
self.0.iter().fold(*hash, |hash, branch| match branch {
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),

View File

@@ -8,20 +8,25 @@
// and https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
//!
//! # To do
//! # TODO
//!
//! * Disk based storage backend (using mmaped files should be easy)
//! * Implement serialization for tree and Merkle proof
use std::str::FromStr;
use crate::merkle_tree::error::ZerokitMerkleTreeError;
use std::{
fmt::{Debug, Display},
str::FromStr,
};
use color_eyre::Result;
/// Enables parallel hashing when there are at least 8 nodes (4 pairs to hash), justifying the overhead.
pub const MIN_PARALLEL_NODES: usize = 8;
/// In the Hasher trait we define the node type, the default leaf
/// and the hash function used to initialize a Merkle Tree implementation
pub trait Hasher {
/// Type of the leaf and tree node
type Fr: Clone + Copy + Eq + Default + std::fmt::Debug + std::fmt::Display + FromStr;
type Fr: Clone + Copy + Eq + Default + Debug + Display + FromStr + Send + Sync;
/// Returns the default tree leaf
fn default_leaf() -> Self::Fr;
@@ -39,35 +44,52 @@ pub trait ZerokitMerkleTree {
type Hasher: Hasher;
type Config: Default + FromStr;
fn default(depth: usize) -> Result<Self>
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError>
where
Self: Sized;
fn new(depth: usize, default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self>
fn new(
depth: usize,
default_leaf: FrOf<Self::Hasher>,
config: Self::Config,
) -> Result<Self, ZerokitMerkleTreeError>
where
Self: Sized;
fn depth(&self) -> usize;
fn capacity(&self) -> usize;
fn leaves_set(&self) -> usize;
fn root(&self) -> FrOf<Self::Hasher>;
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>>;
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>>;
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()>;
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<()>
fn get_subtree_root(
&self,
n: usize,
index: usize,
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>)
-> Result<(), ZerokitMerkleTreeError>;
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<(), ZerokitMerkleTreeError>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>;
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>>;
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>;
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
fn get_empty_leaves_indices(&self) -> Vec<usize>;
fn override_range<I, J>(&mut self, start: usize, leaves: I, to_remove_indices: J) -> Result<()>
fn override_range<I, J>(
&mut self,
start: usize,
leaves: I,
to_remove_indices: J,
) -> Result<(), ZerokitMerkleTreeError>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>,
J: IntoIterator<Item = usize>;
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()>;
fn delete(&mut self, index: usize) -> Result<()>;
fn proof(&self, index: usize) -> Result<Self::Proof>;
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool>;
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()>;
fn metadata(&self) -> Result<Vec<u8>>;
fn close_db_connection(&mut self) -> Result<()>;
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
J: ExactSizeIterator<Item = usize>;
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError>;
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError>;
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError>;
fn verify(
&self,
leaf: &FrOf<Self::Hasher>,
witness: &Self::Proof,
) -> Result<bool, ZerokitMerkleTreeError>;
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError>;
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError>;
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError>;
}
pub trait ZerokitMerkleProof {

View File

@@ -1,7 +1,11 @@
pub mod error;
pub mod full_merkle_tree;
#[allow(clippy::module_inception)]
pub mod merkle_tree;
pub mod optimal_merkle_tree;
pub use self::full_merkle_tree::*;
pub use self::merkle_tree::*;
pub use self::optimal_merkle_tree::*;
pub use self::full_merkle_tree::{FullMerkleConfig, FullMerkleProof, FullMerkleTree};
pub use self::merkle_tree::{
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
};
pub use self::optimal_merkle_tree::{OptimalMerkleConfig, OptimalMerkleProof, OptimalMerkleTree};

View File

@@ -1,10 +1,11 @@
use crate::merkle_tree::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
use crate::FrOf;
use color_eyre::{Report, Result};
use std::collections::HashMap;
use std::str::FromStr;
use std::{cmp::max, fmt::Debug};
use std::{cmp::max, collections::HashMap, fmt::Debug, str::FromStr};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use crate::merkle_tree::{
error::{FromConfigError, ZerokitMerkleTreeError},
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
};
////////////////////////////////////////////////////////////
///// Optimal Merkle Tree Implementation
////////////////////////////////////////////////////////////
@@ -31,11 +32,11 @@ where
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
cached_leaves_indices: Vec<u8>,
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
// (deletions leave next_index unchanged)
/// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
/// (deletions leave next_index unchanged)
next_index: usize,
// metadata that an application may use to store additional information
/// metadata that an application may use to store additional information
metadata: Vec<u8>,
}
@@ -48,17 +49,14 @@ pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
pub struct OptimalMerkleConfig(());
impl FromStr for OptimalMerkleConfig {
type Err = Report;
type Err = FromConfigError;
fn from_str(_s: &str) -> Result<Self> {
fn from_str(_s: &str) -> Result<Self, Self::Err> {
Ok(OptimalMerkleConfig::default())
}
}
////////////////////////////////////////////////////////////
///// Implementations
////////////////////////////////////////////////////////////
/// Implementations
impl<H: Hasher> ZerokitMerkleTree for OptimalMerkleTree<H>
where
H: Hasher,
@@ -67,60 +65,86 @@ where
type Hasher = H;
type Config = OptimalMerkleConfig;
fn default(depth: usize) -> Result<Self> {
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
OptimalMerkleTree::<H>::new(depth, H::default_leaf(), Self::Config::default())
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(depth: usize, default_leaf: H::Fr, _config: Self::Config) -> Result<Self> {
/// depth - the depth of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(
depth: usize,
default_leaf: H::Fr,
_config: Self::Config,
) -> Result<Self, ZerokitMerkleTreeError> {
// Compute cache node values, leaf to root
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
cached_nodes.push(default_leaf);
for i in 0..depth {
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
}
cached_nodes.reverse();
Ok(OptimalMerkleTree {
cached_nodes: cached_nodes.clone(),
depth,
nodes: HashMap::new(),
cached_nodes,
nodes: HashMap::with_capacity(1 << depth),
cached_leaves_indices: vec![0; 1 << depth],
next_index: 0,
metadata: Vec::new(),
})
}
fn close_db_connection(&mut self) -> Result<()> {
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
Ok(())
}
// Returns the depth of the tree
/// Returns the depth of the tree
fn depth(&self) -> usize {
self.depth
}
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
/// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
fn capacity(&self) -> usize {
1 << self.depth
}
// Returns the total number of leaves set
/// Returns the total number of leaves set
fn leaves_set(&self) -> usize {
self.next_index
}
#[must_use]
// Returns the root of the tree
/// Returns the root of the tree
fn root(&self) -> H::Fr {
self.get_node(0, 0)
}
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
/// Sets a leaf at the specified tree index
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<(), ZerokitMerkleTreeError> {
if index >= self.capacity() {
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
self.nodes.insert((self.depth, index), leaf);
self.update_hashes(index, 1)?;
self.next_index = max(self.next_index, index + 1);
self.cached_leaves_indices[index] = 1;
Ok(())
}
/// Get a leaf from the specified tree index
fn get(&self, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
if index >= self.capacity() {
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
Ok(self.get_node(self.depth, index))
}
/// Returns the root of the subtree at level n and index
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
if n > self.depth() {
return Err(Report::msg("level exceeds depth size"));
return Err(ZerokitMerkleTreeError::InvalidLevel);
}
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
if n == 0 {
Ok(self.root())
@@ -131,26 +155,7 @@ where
}
}
// Sets a leaf at the specified tree index
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<()> {
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
}
self.nodes.insert((self.depth, index), leaf);
self.recalculate_from(index)?;
self.next_index = max(self.next_index, index + 1);
self.cached_leaves_indices[index] = 1;
Ok(())
}
// Get a leaf from the specified tree index
fn get(&self, index: usize) -> Result<H::Fr> {
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
}
Ok(self.get_node(self.depth, index))
}
/// Returns the indices of the leaves that are empty
fn get_empty_leaves_indices(&self) -> Vec<usize> {
self.cached_leaves_indices
.iter()
@@ -161,29 +166,39 @@ where
.collect()
}
// Sets multiple leaves from the specified tree index
fn set_range<I: IntoIterator<Item = H::Fr>>(&mut self, start: usize, leaves: I) -> Result<()> {
let leaves = leaves.into_iter().collect::<Vec<_>>();
/// Sets multiple leaves from the specified tree index
fn set_range<I: ExactSizeIterator<Item = H::Fr>>(
&mut self,
start: usize,
leaves: I,
) -> Result<(), ZerokitMerkleTreeError> {
// check if the range is valid
if start + leaves.len() > self.capacity() {
return Err(Report::msg("provided range exceeds set size"));
let leaves_len = leaves.len();
if start + leaves_len > self.capacity() {
return Err(ZerokitMerkleTreeError::TooManySet);
}
for (i, leaf) in leaves.iter().enumerate() {
self.nodes.insert((self.depth, start + i), *leaf);
for (i, leaf) in leaves.enumerate() {
self.nodes.insert((self.depth, start + i), leaf);
self.cached_leaves_indices[start + i] = 1;
self.recalculate_from(start + i)?;
}
self.next_index = max(self.next_index, start + leaves.len());
self.update_hashes(start, leaves_len)?;
self.next_index = max(self.next_index, start + leaves_len);
Ok(())
}
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
/// Overrides a range of leaves while resetting specified indices to default and preserving unaffected values.
fn override_range<I, J>(
&mut self,
start: usize,
leaves: I,
indices: J,
) -> Result<(), ZerokitMerkleTreeError>
where
I: IntoIterator<Item = FrOf<Self::Hasher>>,
J: IntoIterator<Item = usize>,
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
J: ExactSizeIterator<Item = usize>,
{
let indices = indices.into_iter().collect::<Vec<_>>();
let min_index = *indices.first().unwrap();
let min_index = *indices.first().expect("indices should not be empty");
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
let max_index = start + leaves_vec.len();
@@ -192,7 +207,7 @@ where
for i in min_index..start {
if !indices.contains(&i) {
let value = self.get_leaf(i);
let value = self.get(i)?;
set_values[i - min_index] = value;
}
}
@@ -205,18 +220,17 @@ where
self.cached_leaves_indices[i] = 0;
}
self.set_range(start, set_values)
.map_err(|e| Report::msg(e.to_string()))
self.set_range(start, set_values.into_iter())
}
// Sets a leaf at the next available index
fn update_next(&mut self, leaf: H::Fr) -> Result<()> {
/// Sets a leaf at the next available index
fn update_next(&mut self, leaf: H::Fr) -> Result<(), ZerokitMerkleTreeError> {
self.set(self.next_index, leaf)?;
Ok(())
}
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
fn delete(&mut self, index: usize) -> Result<()> {
/// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
// We reset the leaf only if we previously set a leaf at that index
if index < self.next_index {
self.set(index, H::default_leaf())?;
@@ -225,17 +239,20 @@ where
Ok(())
}
// Computes a merkle proof the leaf at the specified index
fn proof(&self, index: usize) -> Result<Self::Proof> {
/// Computes a merkle proof the leaf at the specified index
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError> {
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
return Err(ZerokitMerkleTreeError::InvalidLeaf);
}
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
let mut i = index;
let mut depth = self.depth;
loop {
i ^= 1;
witness.push((self.get_node(depth, i), (1 - (i & 1)).try_into().unwrap()));
witness.push((
self.get_node(depth, i),
(1 - (i & 1)).try_into().expect("0 or 1 expected"),
));
i >>= 1;
depth -= 1;
if depth == 0 {
@@ -243,78 +260,102 @@ where
}
}
if i != 0 {
Err(Report::msg("i != 0"))
Err(ZerokitMerkleTreeError::ComputingProofError)
} else {
Ok(OptimalMerkleProof(witness))
}
}
// Verifies a Merkle proof with respect to the input leaf and the tree root
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool> {
/// Verifies a Merkle proof with respect to the input leaf and the tree root
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool, ZerokitMerkleTreeError> {
if witness.length() != self.depth {
return Err(Report::msg("witness length doesn't match tree depth"));
return Err(ZerokitMerkleTreeError::InvalidWitness);
}
let expected_root = witness.compute_root_from(leaf);
Ok(expected_root.eq(&self.root()))
}
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
self.recalculate_from(0)?;
Ok(self.root())
}
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
self.metadata = metadata.to_vec();
Ok(())
}
fn metadata(&self) -> Result<Vec<u8>> {
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
Ok(self.metadata.to_vec())
}
}
// Utilities for updating the tree nodes
impl<H: Hasher> OptimalMerkleTree<H>
where
H: Hasher,
{
// Utilities for updating the tree nodes
/// Returns the value of a node at a specific (depth, index).
/// Falls back to a cached default if the node hasn't been set.
fn get_node(&self, depth: usize, index: usize) -> H::Fr {
let node = *self
*self
.nodes
.get(&(depth, index))
.unwrap_or_else(|| &self.cached_nodes[depth]);
node
.unwrap_or(&self.cached_nodes[depth])
}
pub fn get_leaf(&self, index: usize) -> H::Fr {
self.get_node(self.depth, index)
}
fn hash_couple(&mut self, depth: usize, index: usize) -> H::Fr {
/// Computes the hash of a nodes two children at the given depth.
/// If the index is odd, it is rounded down to the nearest even index.
fn hash_couple(&self, depth: usize, index: usize) -> H::Fr {
let b = index & !1;
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
}
fn recalculate_from(&mut self, index: usize) -> Result<()> {
let mut i = index;
let mut depth = self.depth;
loop {
let h = self.hash_couple(depth, i);
i >>= 1;
depth -= 1;
self.nodes.insert((depth, i), h);
self.cached_leaves_indices[index] = 1;
if depth == 0 {
break;
/// Updates parent hashes after modifying a range of leaf nodes.
///
/// - `start`: Starting leaf index that was updated.
/// - `length`: Number of consecutive leaves that were updated.
fn update_hashes(&mut self, start: usize, length: usize) -> Result<(), ZerokitMerkleTreeError> {
// Start at the leaf level
let mut current_depth = self.depth;
// Round down to include the left sibling in the pair (if start is odd)
let mut current_index = start & !1;
// Compute the max index at this level, round up to include the last updated leafs right sibling (if start + length is odd)
let mut current_index_max = (start + length + 1) & !1;
// Traverse from the leaf level up to the root
while current_depth > 0 {
// Compute the parent level (one level above the current)
let parent_depth = current_depth - 1;
// Use parallel processing when the number of pairs exceeds the threshold
if current_index_max - current_index >= MIN_PARALLEL_NODES {
let updates: Vec<((usize, usize), H::Fr)> = (current_index..current_index_max)
.step_by(2)
.collect::<Vec<_>>()
.into_par_iter()
.map(|index| {
// Hash two child nodes at positions (current_depth, index) and (current_depth, index + 1)
let hash = self.hash_couple(current_depth, index);
// Return the computed parent hash and its position at
((parent_depth, index >> 1), hash)
})
.collect();
for (parent, hash) in updates {
self.nodes.insert(parent, hash);
}
} else {
// Otherwise, fallback to sequential update for small ranges
for index in (current_index..current_index_max).step_by(2) {
let hash = self.hash_couple(current_depth, index);
self.nodes.insert((parent_depth, index >> 1), hash);
}
}
// Move up one level in the tree
current_index >>= 1;
current_index_max = (current_index_max + 1) >> 1;
current_depth -= 1;
}
if depth != 0 {
return Err(Report::msg("did not reach the depth"));
}
if i != 0 {
return Err(Report::msg("did not go through all indexes"));
}
Ok(())
}
}
@@ -326,14 +367,12 @@ where
type Index = u8;
type Hasher = H;
#[must_use]
// Returns the length of a Merkle proof
/// Returns the length of a Merkle proof
fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
fn leaf_index(&self) -> usize {
// In current implementation the path indexes in a proof correspond to the binary representation of the leaf index
let mut binary_repr = self.get_path_index();
@@ -343,19 +382,16 @@ where
.fold(0, |acc, digit| (acc << 1) + usize::from(digit))
}
#[must_use]
/// Returns the path elements forming a Merkle proof
fn get_path_elements(&self) -> Vec<H::Fr> {
self.0.iter().map(|x| x.0).collect()
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
fn get_path_index(&self) -> Vec<u8> {
self.0.iter().map(|x| x.1).collect()
}
#[must_use]
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
let mut acc: H::Fr = *leaf;

View File

@@ -1,4 +1,4 @@
pub mod sled_adapter;
pub use self::sled_adapter::*;
pub use self::sled_adapter::SledDB;
pub use pmtree;
pub use sled::*;
pub use sled::{Config, Mode};

View File

@@ -1,5 +1,4 @@
pub mod poseidon_hash;
pub use self::poseidon_hash::*;
pub use poseidon_hash::Poseidon;
pub mod poseidon_constants;
pub use self::poseidon_constants::*;

View File

@@ -9,8 +9,6 @@
// The following implementation was adapted from https://github.com/arkworks-rs/sponge/blob/7d9b3a474c9ddb62890014aeaefcb142ac2b3776/src/poseidon/grain_lfsr.rs
#![allow(dead_code)]
use ark_ff::PrimeField;
use num_bigint::BigUint;
@@ -20,7 +18,6 @@ pub struct PoseidonGrainLFSR {
pub head: usize,
}
#[allow(unused_variables)]
impl PoseidonGrainLFSR {
pub fn new(
is_field: u64,

View File

@@ -7,7 +7,7 @@ use crate::poseidon_constants::find_poseidon_ark_and_mds;
use ark_ff::PrimeField;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RoundParamenters<F: PrimeField> {
pub struct RoundParameters<F: PrimeField> {
pub t: usize,
pub n_rounds_f: usize,
pub n_rounds_p: usize,
@@ -17,16 +17,16 @@ pub struct RoundParamenters<F: PrimeField> {
}
pub struct Poseidon<F: PrimeField> {
round_params: Vec<RoundParamenters<F>>,
round_params: Vec<RoundParameters<F>>,
}
impl<F: PrimeField> Poseidon<F> {
// Loads round parameters and generates round constants
// poseidon_params is a vector containing tuples (t, RF, RP, skip_matrices)
// where: t is the rate (input lenght + 1), RF is the number of full rounds, RP is the number of partial rounds
// where: t is the rate (input length + 1), RF is the number of full rounds, RP is the number of partial rounds
// and skip_matrices is a (temporary) parameter used to generate secure MDS matrices (see comments in the description of find_poseidon_ark_and_mds)
// TODO: implement automatic generation of round parameters
pub fn from(poseidon_params: &[(usize, usize, usize, usize)]) -> Self {
let mut read_params = Vec::<RoundParamenters<F>>::new();
let mut read_params = Vec::<RoundParameters<F>>::with_capacity(poseidon_params.len());
for &(t, n_rounds_f, n_rounds_p, skip_matrices) in poseidon_params {
let (ark, mds) = find_poseidon_ark_and_mds::<F>(
@@ -38,7 +38,7 @@ impl<F: PrimeField> Poseidon<F> {
n_rounds_p as u64,
skip_matrices,
);
let rp = RoundParamenters {
let rp = RoundParameters {
t,
n_rounds_p,
n_rounds_f,
@@ -54,24 +54,24 @@ impl<F: PrimeField> Poseidon<F> {
}
}
pub fn get_parameters(&self) -> Vec<RoundParamenters<F>> {
self.round_params.clone()
pub fn get_parameters(&self) -> &Vec<RoundParameters<F>> {
&self.round_params
}
pub fn ark(&self, state: &mut [F], c: &[F], it: usize) {
for i in 0..state.len() {
state[i] += c[it + i];
}
state.iter_mut().enumerate().for_each(|(i, elem)| {
*elem += c[it + i];
});
}
pub fn sbox(&self, n_rounds_f: usize, n_rounds_p: usize, state: &mut [F], i: usize) {
if (i < n_rounds_f / 2) || (i >= n_rounds_f / 2 + n_rounds_p) {
for current_state in &mut state.iter_mut() {
state.iter_mut().for_each(|current_state| {
let aux = *current_state;
*current_state *= *current_state;
*current_state *= *current_state;
*current_state *= aux;
}
})
} else {
let aux = state[0];
state[0] *= state[0];
@@ -80,21 +80,20 @@ impl<F: PrimeField> Poseidon<F> {
}
}
pub fn mix(&self, state: &[F], m: &[Vec<F>]) -> Vec<F> {
let mut new_state: Vec<F> = Vec::new();
pub fn mix_2(&self, state: &[F], m: &[Vec<F>], state_2: &mut [F]) {
for i in 0..state.len() {
new_state.push(F::zero());
for (j, state_item) in state.iter().enumerate() {
let mut mij = m[i][j];
mij *= state_item;
new_state[i] += mij;
// Cache the row reference
let row = &m[i];
let mut acc = F::ZERO;
for j in 0..state.len() {
acc += row[j] * state[j];
}
state_2[i] = acc;
}
new_state.clone()
}
pub fn hash(&self, inp: Vec<F>) -> Result<F, String> {
// Note that the rate t becomes input lenght + 1, hence for lenght N we pick parameters with T = N + 1
pub fn hash(&self, inp: &[F]) -> Result<F, String> {
// Note that the rate t becomes input length + 1; hence for length N we pick parameters with T = N + 1
let t = inp.len() + 1;
// We seek the index (Poseidon's round_params is an ordered vector) for the parameters corresponding to t
@@ -106,8 +105,9 @@ impl<F: PrimeField> Poseidon<F> {
let param_index = param_index.unwrap();
let mut state = vec![F::zero(); t];
state[1..].clone_from_slice(&inp);
let mut state = vec![F::ZERO; t];
let mut state_2 = state.clone();
state[1..].clone_from_slice(inp);
for i in 0..(self.round_params[param_index].n_rounds_f
+ self.round_params[param_index].n_rounds_p)
@@ -123,7 +123,8 @@ impl<F: PrimeField> Poseidon<F> {
&mut state,
i,
);
state = self.mix(&state, &self.round_params[param_index].m);
self.mix_2(&state, &self.round_params[param_index].m, &mut state_2);
std::mem::swap(&mut state, &mut state_2);
}
Ok(state[0])

View File

@@ -6,7 +6,7 @@ pub mod test {
use tiny_keccak::{Hasher as _, Keccak};
use zerokit_utils::{
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
ZerokitMerkleProof, ZerokitMerkleTree,
ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
};
#[derive(Clone, Copy, Eq, PartialEq)]
struct Keccak256;
@@ -42,7 +42,7 @@ pub mod test {
type Err = std::string::FromUtf8Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(TestFr(s.as_bytes().try_into().unwrap()))
Ok(TestFr(s.as_bytes().try_into().expect("Invalid length")))
}
}
@@ -50,7 +50,7 @@ pub mod test {
fn from(value: u32) -> Self {
let mut bytes: Vec<u8> = vec![0; 28];
bytes.extend_from_slice(&value.to_be_bytes());
TestFr(bytes.as_slice().try_into().unwrap())
TestFr(bytes.as_slice().try_into().expect("Invalid length"))
}
}
@@ -58,12 +58,12 @@ pub mod test {
fn default_full_merkle_tree(depth: usize) -> FullMerkleTree<Keccak256> {
FullMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), FullMerkleConfig::default())
.unwrap()
.expect("Failed to create FullMerkleTree")
}
fn default_optimal_merkle_tree(depth: usize) -> OptimalMerkleTree<Keccak256> {
OptimalMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), OptimalMerkleConfig::default())
.unwrap()
.expect("Failed to create OptimalMerkleTree")
}
#[test]
@@ -83,21 +83,101 @@ pub mod test {
let nof_leaves = 4;
let leaves: Vec<TestFr> = (1..=nof_leaves as u32).map(TestFr::from).collect();
let mut tree = default_full_merkle_tree(DEFAULT_DEPTH);
assert_eq!(tree.root(), default_tree_root);
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
assert_eq!(tree_full.root(), default_tree_root);
for i in 0..nof_leaves {
tree.set(i, leaves[i]).unwrap();
assert_eq!(tree.root(), roots[i]);
tree_full.set(i, leaves[i]).expect("Failed to set leaf");
assert_eq!(tree_full.root(), roots[i]);
}
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
assert_eq!(tree.root(), default_tree_root);
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
assert_eq!(tree_opt.root(), default_tree_root);
for i in 0..nof_leaves {
tree.set(i, leaves[i]).unwrap();
assert_eq!(tree.root(), roots[i]);
tree_opt.set(i, leaves[i]).expect("Failed to set leaf");
assert_eq!(tree_opt.root(), roots[i]);
}
}
#[test]
fn test_set_range() {
let depth = 4;
let leaves: Vec<TestFr> = (0..(1 << depth) as u32).map(TestFr::from).collect();
let mut tree_full = default_full_merkle_tree(depth);
let root_before = tree_full.root();
tree_full
.set_range(0, leaves.iter().cloned())
.expect("Failed to set leaves");
let root_after = tree_full.root();
assert_ne!(root_before, root_after);
let mut tree_opt = default_optimal_merkle_tree(depth);
let root_before = tree_opt.root();
tree_opt
.set_range(0, leaves.iter().cloned())
.expect("Failed to set leaves");
let root_after = tree_opt.root();
assert_ne!(root_before, root_after);
}
#[test]
fn test_update_next() {
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
for i in 0..4 {
let leaf = TestFr::from(i as u32);
tree_full.update_next(leaf).expect("Failed to update leaf");
tree_opt.update_next(leaf).expect("Failed to update leaf");
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), leaf);
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), leaf);
}
assert_eq!(tree_full.leaves_set(), 4);
assert_eq!(tree_opt.leaves_set(), 4);
}
#[test]
fn test_delete_and_reset() {
let index = 1;
let original_leaf = TestFr::from(42);
let new_leaf = TestFr::from(99);
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
tree_full
.set(index, original_leaf)
.expect("Failed to set leaf");
let root_with_original = tree_full.root();
tree_full.delete(index).expect("Failed to delete leaf");
let root_after_delete = tree_full.root();
assert_ne!(root_with_original, root_after_delete);
tree_full.set(index, new_leaf).expect("Failed to set leaf");
let root_after_reset = tree_full.root();
assert_ne!(root_after_delete, root_after_reset);
assert_ne!(root_with_original, root_after_reset);
assert_eq!(tree_full.get(index).expect("Failed to get leaf"), new_leaf);
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
tree_opt
.set(index, original_leaf)
.expect("Failed to set leaf");
let root_with_original = tree_opt.root();
tree_opt.delete(index).expect("Failed to delete leaf");
let root_after_delete = tree_opt.root();
assert_ne!(root_with_original, root_after_delete);
tree_opt.set(index, new_leaf).expect("Failed to set leaf");
let root_after_reset = tree_opt.root();
assert_ne!(root_after_delete, root_after_reset);
assert_ne!(root_with_original, root_after_reset);
assert_eq!(tree_opt.get(index).expect("Failed to get leaf"), new_leaf);
}
#[test]
fn test_get_empty_leaves_indices() {
let depth = 4;
@@ -107,7 +187,7 @@ pub mod test {
let leaves_4: Vec<TestFr> = (0u32..4).map(TestFr::from).collect();
let mut tree_full = default_full_merkle_tree(depth);
let _ = tree_full.set_range(0, leaves.clone());
let _ = tree_full.set_range(0, leaves.clone().into_iter());
assert!(tree_full.get_empty_leaves_indices().is_empty());
let mut vec_idxs = Vec::new();
@@ -123,33 +203,31 @@ pub mod test {
assert_eq!(tree_full.get_empty_leaves_indices(), vec_idxs);
}
// Check situation when the number of items to insert is less than the number of items to delete
// check situation when the number of items to insert is less than the number of items to delete
tree_full
.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
.unwrap();
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
// check if the indexes for write and delete are the same
tree_full
.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
.unwrap();
assert_eq!(tree_full.get_empty_leaves_indices(), vec![]);
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
assert_eq!(tree_full.get_empty_leaves_indices(), Vec::<usize>::new());
// check if indexes for deletion are before indexes for overwriting
tree_full
.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
.unwrap();
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
// check if the indices for write and delete do not overlap completely
tree_full
.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
.unwrap();
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1]);
//// Optimal Merkle Tree Trest
let mut tree_opt = default_optimal_merkle_tree(depth);
let _ = tree_opt.set_range(0, leaves.clone());
let _ = tree_opt.set_range(0, leaves.clone().into_iter());
assert!(tree_opt.get_empty_leaves_indices().is_empty());
let mut vec_idxs = Vec::new();
@@ -164,48 +242,55 @@ pub mod test {
assert_eq!(tree_opt.get_empty_leaves_indices(), vec_idxs);
}
// Check situation when the number of items to insert is less than the number of items to delete
// check situation when the number of items to insert is less than the number of items to delete
tree_opt
.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
.unwrap();
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
// check if the indexes for write and delete are the same
tree_opt
.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
.unwrap();
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![]);
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
assert_eq!(tree_opt.get_empty_leaves_indices(), Vec::<usize>::new());
// check if indexes for deletion are before indexes for overwriting
tree_opt
.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
.unwrap();
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
// check if the indices for write and delete do not overlap completely
tree_opt
.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
.unwrap();
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
.expect("Failed to override range");
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1]);
}
#[test]
fn test_subtree_root() {
let depth = 3;
let nof_leaves: usize = 6;
let nof_leaves: usize = 4;
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
let mut tree_full = default_optimal_merkle_tree(depth);
let mut tree_full = default_full_merkle_tree(depth);
let _ = tree_full.set_range(0, leaves.iter().cloned());
for i in 0..nof_leaves {
// check leaves
assert_eq!(
tree_full.get(i).unwrap(),
tree_full.get_subtree_root(depth, i).unwrap()
tree_full.get(i).expect("Failed to get leaf"),
tree_full
.get_subtree_root(depth, i)
.expect("Failed to get subtree root")
);
// check root
assert_eq!(tree_full.root(), tree_full.get_subtree_root(0, i).unwrap());
assert_eq!(
tree_full.root(),
tree_full
.get_subtree_root(0, i)
.expect("Failed to get subtree root")
);
}
// check intermediate nodes
@@ -215,26 +300,39 @@ pub mod test {
let idx_r = (i + 1) * (1 << (depth - n));
let idx_sr = idx_l;
let prev_l = tree_full.get_subtree_root(n, idx_l).unwrap();
let prev_r = tree_full.get_subtree_root(n, idx_r).unwrap();
let subroot = tree_full.get_subtree_root(n - 1, idx_sr).unwrap();
let prev_l = tree_full
.get_subtree_root(n, idx_l)
.expect("Failed to get subtree root");
let prev_r = tree_full
.get_subtree_root(n, idx_r)
.expect("Failed to get subtree root");
let subroot = tree_full
.get_subtree_root(n - 1, idx_sr)
.expect("Failed to get subtree root");
// check intermediate nodes
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
}
}
let mut tree_opt = default_full_merkle_tree(depth);
let mut tree_opt = default_optimal_merkle_tree(depth);
let _ = tree_opt.set_range(0, leaves.iter().cloned());
for i in 0..nof_leaves {
// check leaves
assert_eq!(
tree_opt.get(i).unwrap(),
tree_opt.get_subtree_root(depth, i).unwrap()
tree_opt.get(i).expect("Failed to get leaf"),
tree_opt
.get_subtree_root(depth, i)
.expect("Failed to get subtree root")
);
// check root
assert_eq!(tree_opt.root(), tree_opt.get_subtree_root(0, i).unwrap());
assert_eq!(
tree_opt.root(),
tree_opt
.get_subtree_root(0, i)
.expect("Failed to get subtree root")
);
}
// check intermediate nodes
@@ -244,9 +342,15 @@ pub mod test {
let idx_r = (i + 1) * (1 << (depth - n));
let idx_sr = idx_l;
let prev_l = tree_opt.get_subtree_root(n, idx_l).unwrap();
let prev_r = tree_opt.get_subtree_root(n, idx_r).unwrap();
let subroot = tree_opt.get_subtree_root(n - 1, idx_sr).unwrap();
let prev_l = tree_opt
.get_subtree_root(n, idx_l)
.expect("Failed to get subtree root");
let prev_r = tree_opt
.get_subtree_root(n, idx_r)
.expect("Failed to get subtree root");
let subroot = tree_opt
.get_subtree_root(n - 1, idx_sr)
.expect("Failed to get subtree root");
// check intermediate nodes
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
@@ -259,61 +363,83 @@ pub mod test {
let nof_leaves = 4;
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
// We thest the FullMerkleTree implementation
let mut tree = default_full_merkle_tree(DEFAULT_DEPTH);
// We test the FullMerkleTree implementation
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
for i in 0..nof_leaves {
// We set the leaves
tree.set(i, leaves[i]).unwrap();
tree_full.set(i, leaves[i]).expect("Failed to set leaf");
// We compute a merkle proof
let proof = tree.proof(i).expect("index should be set");
let proof = tree_full.proof(i).expect("Failed to compute proof");
// We verify if the merkle proof corresponds to the right leaf index
assert_eq!(proof.leaf_index(), i);
// We verify the proof
assert!(tree.verify(&leaves[i], &proof).unwrap());
assert!(tree_full
.verify(&leaves[i], &proof)
.expect("Failed to verify proof"));
// We ensure that the Merkle proof and the leaf generate the same root as the tree
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
assert_eq!(proof.compute_root_from(&leaves[i]), tree_full.root());
// We check that the proof is not valid for another leaf
assert!(!tree.verify(&leaves[(i + 1) % nof_leaves], &proof).unwrap());
assert!(!tree_full
.verify(&leaves[(i + 1) % nof_leaves], &proof)
.expect("Failed to verify proof"));
}
// We test the OptimalMerkleTree implementation
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
for i in 0..nof_leaves {
// We set the leaves
tree.set(i, leaves[i]).unwrap();
tree_opt.set(i, leaves[i]).expect("Failed to set leaf");
// We compute a merkle proof
let proof = tree.proof(i).expect("index should be set");
let proof = tree_opt.proof(i).expect("Failed to compute proof");
// We verify if the merkle proof corresponds to the right leaf index
assert_eq!(proof.leaf_index(), i);
// We verify the proof
assert!(tree.verify(&leaves[i], &proof).unwrap());
assert!(tree_opt
.verify(&leaves[i], &proof)
.expect("Failed to verify proof"));
// We ensure that the Merkle proof and the leaf generate the same root as the tree
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
assert_eq!(proof.compute_root_from(&leaves[i]), tree_opt.root());
// We check that the proof is not valid for another leaf
assert!(!tree.verify(&leaves[(i + 1) % nof_leaves], &proof).unwrap());
assert!(!tree_opt
.verify(&leaves[(i + 1) % nof_leaves], &proof)
.expect("Failed to verify proof"));
}
}
#[test]
fn test_proof_fail() {
let tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
let tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
let invalid_leaf = TestFr::from(12345);
let proof_full = tree_full.proof(0).expect("Failed to compute proof");
let proof_opt = tree_opt.proof(0).expect("Failed to compute proof");
// Should fail because no leaf was set
assert!(!tree_full
.verify(&invalid_leaf, &proof_full)
.expect("Failed to verify proof"));
assert!(!tree_opt
.verify(&invalid_leaf, &proof_opt)
.expect("Failed to verify proof"));
}
#[test]
fn test_override_range() {
let nof_leaves = 4;
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
// We set the leaves
tree.set_range(0, leaves.iter().cloned()).unwrap();
let new_leaves = [
hex!("0000000000000000000000000000000000000000000000000000000000000005"),
hex!("0000000000000000000000000000000000000000000000000000000000000006"),
@@ -322,17 +448,70 @@ pub mod test {
let to_delete_indices: [usize; 2] = [0, 1];
// We override the leaves
tree.override_range(
0, // start from the end of the initial leaves
new_leaves.iter().cloned(),
to_delete_indices.iter().cloned(),
)
.unwrap();
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
tree_full
.set_range(0, leaves.iter().cloned())
.expect("Failed to set leaves");
tree_full
.override_range(
0,
new_leaves.iter().cloned(),
to_delete_indices.iter().cloned(),
)
.expect("Failed to override range");
// ensure that the leaves are set correctly
for (i, &new_leaf) in new_leaves.iter().enumerate() {
assert_eq!(tree.get_leaf(i), new_leaf);
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), new_leaf);
}
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
tree_opt
.set_range(0, leaves.iter().cloned())
.expect("Failed to set leaves");
tree_opt
.override_range(
0,
new_leaves.iter().cloned(),
to_delete_indices.iter().cloned(),
)
.expect("Failed to override range");
for (i, &new_leaf) in new_leaves.iter().enumerate() {
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), new_leaf);
}
}
#[test]
fn test_override_range_parallel_triggered() {
let depth = 13;
let nof_leaves = 8192;
// number of leaves larger than MIN_PARALLEL_NODES to trigger parallel hashing
assert!(MIN_PARALLEL_NODES < nof_leaves);
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
let indices: Vec<usize> = (0..nof_leaves).collect();
let mut tree_full = default_full_merkle_tree(depth);
tree_full
.override_range(0, leaves.iter().cloned(), indices.iter().cloned())
.expect("Failed to override range");
for (i, &leaf) in leaves.iter().enumerate() {
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), leaf);
}
let mut tree_opt = default_optimal_merkle_tree(depth);
tree_opt
.override_range(0, leaves.iter().cloned(), indices.iter().cloned())
.expect("Failed to override range");
for (i, &leaf) in leaves.iter().enumerate() {
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), leaf);
}
}
}

View File

@@ -3530,7 +3530,8 @@ mod test {
{
// We check if the round constants and matrices correspond to the one generated when instantiating Poseidon with ROUND_PARAMS
let (loaded_c, loaded_m) = load_constants();
let poseidon_parameters = Poseidon::<Fr>::from(&ROUND_PARAMS).get_parameters();
let poseidon_hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let poseidon_parameters = poseidon_hasher.get_parameters();
for i in 0..poseidon_parameters.len() {
assert_eq!(loaded_c[i], poseidon_parameters[i].c);
assert_eq!(loaded_m[i], poseidon_parameters[i].m);

View File

@@ -0,0 +1,131 @@
#[cfg(test)]
mod test {
use ark_bn254::Fr;
use ark_ff::{AdditiveGroup, Field};
use std::collections::HashMap;
use std::str::FromStr;
use zerokit_utils::poseidon_hash::Poseidon;
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
(2, 8, 56, 0),
(3, 8, 57, 0),
(4, 8, 56, 0),
(5, 8, 60, 0),
(6, 8, 60, 0),
(7, 8, 63, 0),
(8, 8, 64, 0),
(9, 8, 63, 0),
];
#[test]
fn test_poseidon_hash_basic() {
let map = HashMap::from([
(
Fr::ZERO,
Fr::from_str(
"19014214495641488759237505126948346942972912379615652741039992445865937985820",
)
.unwrap(),
),
(
Fr::ONE,
Fr::from_str(
"18586133768512220936620570745912940619677854269274689475585506675881198879027",
)
.unwrap(),
),
(
Fr::from(255),
Fr::from_str(
"20026131459732984724454933360292530547665726761019872861025481903072111625788",
)
.unwrap(),
),
(
Fr::from(u16::MAX),
Fr::from_str(
"12358868638722666642632413418981275677998688723398440898957566982787708451243",
)
.unwrap(),
),
(
Fr::from(u64::MAX),
Fr::from_str(
"17449307747295017006142981453320720946812828330895590310359634430146721583189",
)
.unwrap(),
),
]);
// map (key: what to hash, value: expected value)
for (k, v) in map.into_iter() {
let hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let h = hasher.hash(&[k]);
assert_eq!(h.unwrap(), v);
}
}
#[test]
fn test_poseidon_hash_multi() {
// All hashes done in a merkle tree (with leaves: [0, 1, 2, 3, 4, 5, 6, 7])
// ~ leaves
let fr_0 = Fr::ZERO;
let fr_1 = Fr::ONE;
let fr_2 = Fr::from(2);
let fr_3 = Fr::from(3);
let fr_4 = Fr::from(4);
let fr_5 = Fr::from(5);
let fr_6 = Fr::from(6);
let fr_7 = Fr::from(7);
let fr_0_1 = Fr::from_str(
"12583541437132735734108669866114103169564651237895298778035846191048104863326",
)
.unwrap();
let fr_2_3 = Fr::from_str(
"17197790661637433027297685226742709599380837544520340689137581733613433332983",
)
.unwrap();
let fr_4_5 = Fr::from_str(
"756592041685769348226045093946546956867261766023639881791475046640232555043",
)
.unwrap();
let fr_6_7 = Fr::from_str(
"5558359459771725727593826278265342308584225092343962757289948761260561575479",
)
.unwrap();
let fr_0_3 = Fr::from_str(
"3720616653028013822312861221679392249031832781774563366107458835261883914924",
)
.unwrap();
let fr_4_7 = Fr::from_str(
"7960741062684589801276390367952372418815534638314682948141519164356522829957",
)
.unwrap();
// ~ root
let fr_0_7 = Fr::from_str(
"11780650233517635876913804110234352847867393797952240856403268682492028497284",
)
.unwrap();
// map (key: what to hash, value: expected value)
let map = HashMap::from([
((fr_0, fr_1), fr_0_1),
((fr_2, fr_3), fr_2_3),
((fr_4, fr_5), fr_4_5),
((fr_6, fr_7), fr_6_7),
((fr_0_1, fr_2_3), fr_0_3),
((fr_4_5, fr_6_7), fr_4_7),
((fr_0_3, fr_4_7), fr_0_7),
]);
for (k, v) in map.into_iter() {
let hasher = Poseidon::<Fr>::from(&ROUND_PARAMS);
let h = hasher.hash(&[k.0, k.1]);
assert_eq!(h.unwrap(), v);
}
}
}