Compare commits

...

17 Commits

Author SHA1 Message Date
vinhtc27
9cb4ff7571 refactor: update tree depth to 10 and adjust resource paths accordingly 2025-09-19 19:49:01 +07:00
vinhtc27
b5afb847f1 refactor: update tree depth to 30 and adjust resource paths accordingly 2025-09-19 19:39:17 +07:00
vinhtc27
a7d58926e4 chore: update Cargo.lock 2025-09-17 20:08:21 +07:00
vinhtc27
e160ac2524 Merge remote-tracking branch 'origin/master' into fix-cross-build-and-update-deps 2025-09-17 20:02:00 +07:00
0xc1c4da
eb8eedfdb4 Allow flake to be consumed, and nix build .#rln (#340)
I had been trying to consume zerokit (specifically rln on x86_64), to
build libwaku (nwaku) and was having issues, this PR at least allows a
build to occur.

```bash
$ nix flake show github:vacp2p/zerokit
error: syntax error, unexpected '=', expecting ';'
       at «github:vacp2p/zerokit/0b00c639a059a2cfde74bcf68fdf75db3b6898a4»/flake.nix:36:25:
           35|
           36|         rln-linux-arm64 = buildRln {
             |                         ^
           37|           target-platform = "aarch64-multiplatform";
```

`Cargo.lock` is required in repo for this to be possible, otherwise:
```bash
$ nix build .#rln --show-trace
warning: Git tree '/home/j/experiments/zerokit' is dirty
error:
       … while calling the 'derivationStrict' builtin
         at <nix/derivation-internal.nix>:37:12:
           36|
           37|   strict = derivationStrict drvAttrs;
             |            ^
           38|

       … while evaluating derivation 'zerokit-nightly'
         whose name attribute is located at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/stdenv/generic/make-derivation.nix:336:7

       … while evaluating attribute 'cargoDeps' of derivation 'zerokit-nightly'
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/build-rust-package/default.nix:157:5:
          156|   // {
          157|     cargoDeps = cargoDeps';
             |     ^
          158|     inherit buildAndTestSubdir;

       … while calling the 'getAttr' builtin
         at <nix/derivation-internal.nix>:50:17:
           49|     value = commonAttrs // {
           50|       outPath = builtins.getAttr outputName strict;
             |                 ^
           51|       drvPath = strict.drvPath;

       … while calling the 'derivationStrict' builtin
         at <nix/derivation-internal.nix>:37:12:
           36|
           37|   strict = derivationStrict drvAttrs;
             |            ^
           38|

       … while evaluating derivation 'cargo-vendor-dir'
         whose name attribute is located at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/stdenv/generic/make-derivation.nix:336:7

       … while evaluating attribute 'buildCommand' of derivation 'cargo-vendor-dir'
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/trivial-builders/default.nix:59:17:
           58|         enableParallelBuilding = true;
           59|         inherit buildCommand name;
             |                 ^
           60|         passAsFile = [ "buildCommand" ]

       … while calling the 'toString' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:264:20:
          263|
          264|     for crate in ${toString depCrates}; do
             |                    ^
          265|       # Link the crate directory, removing the output path hash from the destination.

       … while calling the 'deepSeq' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:68:15:
           67|   # being evaluated otherwise, since there could be no git dependencies.
           68|   depCrates = builtins.deepSeq gitShaOutputHash (builtins.map mkCrate depPackages);
             |               ^
           69|

       … while calling the 'map' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:68:50:
           67|   # being evaluated otherwise, since there could be no git dependencies.
           68|   depCrates = builtins.deepSeq gitShaOutputHash (builtins.map mkCrate depPackages);
             |                                                  ^
           69|

       … while calling the 'filter' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:61:17:
           60|   # safely skip it.
           61|   depPackages = builtins.filter (p: p ? "source") packages;
             |                 ^
           62|

       … while calling the 'fromTOML' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:50:20:
           49|
           50|   parsedLockFile = builtins.fromTOML lockFileContents;
             |                    ^
           51|

       … while evaluating the argument passed to builtins.fromTOML

       … while calling the 'readFile' builtin
         at /nix/store/fy7zcm8ya6p215wvrlqrl8022da6asn0-source/pkgs/build-support/rust/import-cargo-lock.nix:47:10:
           46|     if lockFile != null
           47|     then builtins.readFile lockFile
             |          ^
           48|     else args.lockFileContents;

       error: opening file '/nix/store/qh8gf0sl8znhnjwc1ksif7pwik26dsyd-source/Cargo.lock': No such file or directory
```

The PR allows for a successful build:
```bash
$ ls -R result
result:
target

result/target:
release

result/target/release:
librln.a  librln.d  librln.rlib  librln.so
```

---------

Co-authored-by: Jarrad Hope <jarrad@logos.co>
Co-authored-by: Vinh Trịnh <108657096+vinhtc27@users.noreply.github.com>
2025-09-17 14:57:06 +02:00
vinhtc27
c78f1f1534 chore: remove duplicate feature flag 2025-09-16 15:08:38 +07:00
Vinh Trịnh
4d62a4d60d Merge branch 'master' into fix-cross-build-and-update-deps 2025-09-16 14:57:46 +07:00
Vinh Trịnh
57b694db5d chore(rln-wasm): remove wasm-bindgen-cli installation (#341)
Currently, the new wasm-bindgen-cli version [causes CI to
fail](https://github.com/vacp2p/zerokit/actions/runs/17699917161/job/50313998747),
and it isn't needed for the parallel feature anymore.
So it's better to remove it from the codebase.
2025-09-16 14:55:18 +07:00
vinhtc27
fd568c17b3 chore(rln-wasm-utils): update wasm-bindgen-test version 2025-09-14 16:31:56 +07:00
vinhtc27
cf845c6a74 docs: update contributing guidelines to include rln-wasm-utils and improve clippy command 2025-09-14 16:24:01 +07:00
vinhtc27
fe566b3314 fix: exlude rln-cli, adjust pmtree-ft feature flags to avoid feature config when build --no-default-features --features optimalmerkletree | fullmerkletree 2025-09-14 16:16:59 +07:00
Vinh Trịnh
0b00c639a0 feat(rln): improve the PmTreeConfig initialization process with builder pattern (#334) 2025-09-03 18:54:08 +07:00
Vinh Trịnh
7c801a804e chore: remove cmake due to CI error and skip tests and benchmarks on draft pull requests (#339) 2025-09-03 15:56:09 +07:00
Joe Wanga
9da80dd807 docs: add comprehensive CONTRIBUTING.md with contributing guidelines (#331)
## Description
Adds a comprehensive CONTRIBUTING.md document that addresses all
requirements from the issue #309 .

---------

Co-authored-by: Ekaterina Broslavskaya <seemenkina@gmail.com>
2025-08-19 11:56:05 +03:00
Vinh Trịnh
bcbd6a97af chore: consistent naming and update docs for merkle trees (#333) 2025-08-18 21:37:28 +07:00
Ekaterina Broslavskaya
6965cf2852 feat(rln-wasm-utils): extracting identity generation and hash functions into a separate module (#332)
- separated all identity generation functions as separate functions,
rather than RLN methods
- added BE support - only for these functions so far
- covered the functions with tests, as well as conversion to big endian
- prepared for publication, but is actually awaiting the initial
publication of the RLN module

@vinhtc27, please check that everything is correct from the wasm point
of view. This module does not require parallel computing, so if there
are any unnecessary dependencies, builds, etc., please let me know.

---------

Co-authored-by: vinhtc27 <vinhtc27@gmail.com>
2025-07-31 16:05:46 +03:00
Vinh Trịnh
578e0507b3 feat: add wasm parallel testcase and simplify the witness_calculator.js (#328)
- Tested the parallel feature for rln-wasm on this branch:
https://github.com/vacp2p/zerokit/tree/benchmark-v0.9.0
- Simplified the test case by using the default generated
witness_calculator.js file for both Node and browser tests
- Added a WASM parallel test case using the latest wasm-bindgen-rayon
version 1.3.0
- [Successful CI
run](https://github.com/vacp2p/zerokit/actions/runs/16570298449) with
Cargo.lock is included, but it fails if ignored from the codebase.
- Requires publishing new pmtree version [on this
PR](https://github.com/vacp2p/pmtree/pull/4) before merging this branch.
2025-07-30 19:18:30 +07:00
63 changed files with 4845 additions and 1307 deletions

View File

@@ -9,7 +9,9 @@ on:
- "!rln/src/**"
- "!rln/resources/**"
- "!utils/src/**"
- "!rln-wasm-utils/**"
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
@@ -17,11 +19,14 @@ on:
- "!rln/src/**"
- "!rln/resources/**"
- "!utils/src/**"
- "!rln-wasm-utils/**"
name: Tests
name: CI
jobs:
utils-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
@@ -44,6 +49,8 @@ jobs:
working-directory: ${{ matrix.crate }}
rln-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
@@ -71,6 +78,8 @@ jobs:
working-directory: ${{ matrix.crate }}
rln-wasm-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
@@ -88,77 +97,78 @@ jobs:
- name: Install dependencies
run: make installdeps
- name: Build rln-wasm
run: |
if [ ${{ matrix.feature }} == default ]; then
cargo make build
else
cargo make build_${{ matrix.feature }}
fi
run: cargo make build
working-directory: ${{ matrix.crate }}
- name: Test rln-wasm
run: |
if [ ${{ matrix.feature }} == default ]; then
cargo make test --release
else
cargo make test_${{ matrix.feature }} --release
fi
- name: Test rln-wasm on node
run: cargo make test --release
working-directory: ${{ matrix.crate }}
- name: Test rln-wasm on browser
run: |
if [ ${{ matrix.feature }} == default ]; then
cargo make test_browser --release
else
cargo make test_browser_${{ matrix.feature }} --release
fi
run: cargo make test_browser --release
working-directory: ${{ matrix.crate }}
# rln-wasm-parallel-test:
# strategy:
# matrix:
# platform: [ubuntu-latest, macos-latest]
# crate: [rln-wasm]
# feature: ["parallel"]
# runs-on: ${{ matrix.platform }}
# timeout-minutes: 60
rln-wasm-parallel-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
crate: [rln-wasm]
feature: ["parallel"]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
# name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
# steps:
# - uses: actions/checkout@v4
# - name: Install nightly toolchain
# uses: dtolnay/rust-toolchain@nightly
# with:
# components: rust-src
# targets: wasm32-unknown-unknown
# - uses: Swatinem/rust-cache@v2
# - name: Install dependencies
# run: make installdeps
# - name: Build rln-wasm in parallel mode
# run: |
# if [ ${{ matrix.feature }} == default ]; then
# cargo make build
# else
# cargo make build_${{ matrix.feature }}
# fi
# working-directory: ${{ matrix.crate }}
# - name: Test rln-wasm in parallel mode
# run: |
# if [ ${{ matrix.feature }} == default ]; then
# cargo make test --release
# else
# cargo make test_${{ matrix.feature }} --release
# fi
# working-directory: ${{ matrix.crate }}
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- uses: actions/checkout@v4
- name: Install nightly toolchain
uses: dtolnay/rust-toolchain@nightly
with:
components: rust-src
targets: wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: Build rln-wasm in parallel mode
run: cargo make build_parallel
working-directory: ${{ matrix.crate }}
- name: Test rln-wasm in parallel mode on browser
run: cargo make test_parallel --release
working-directory: ${{ matrix.crate }}
rln-wasm-utils-test:
# skip tests on draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
platform: [ubuntu-latest, macos-latest]
crate: [rln-wasm-utils]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: Test rln-wasm-utils
run: cargo make test --release
working-directory: ${{ matrix.crate }}
lint:
# run on both ready and draft PRs
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
strategy:
matrix:
# we run lint tests only on ubuntu
platform: [ubuntu-latest]
crate: [rln, rln-wasm, utils]
crate: [rln, rln-wasm, rln-wasm-utils, utils]
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: lint - ${{ matrix.crate }} - ${{ matrix.platform }}
name: Lint - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
@@ -180,8 +190,8 @@ jobs:
working-directory: ${{ matrix.crate }}
benchmark-utils:
# run only in pull requests
if: github.event_name == 'pull_request'
# run only on ready pull requests
if: github.event_name == 'pull_request' && !github.event.pull_request.draft
strategy:
matrix:
# we run benchmark tests only on ubuntu
@@ -190,7 +200,7 @@ jobs:
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: benchmark - ${{ matrix.crate }} - ${{ matrix.platform }}
name: Benchmark - ${{ matrix.crate }} - ${{ matrix.platform }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
@@ -201,8 +211,8 @@ jobs:
cwd: ${{ matrix.crate }}
benchmark-rln:
# run only in pull requests
if: github.event_name == 'pull_request'
# run only on ready pull requests
if: github.event_name == 'pull_request' && !github.event.pull_request.draft
strategy:
matrix:
# we run benchmark tests only on ubuntu
@@ -212,7 +222,7 @@ jobs:
runs-on: ${{ matrix.platform }}
timeout-minutes: 60
name: benchmark - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
name: Benchmark - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
steps:
- name: Checkout sources
uses: actions/checkout@v4

View File

@@ -34,7 +34,7 @@ jobs:
run: make installdeps
- name: Cross build
run: |
cross build --release --target ${{ matrix.target }} --no-default-features --features ${{ matrix.feature }} --workspace --exclude rln-cli
cross build --release --target ${{ matrix.target }} --no-default-features --features ${{ matrix.feature }} --workspace
mkdir release
cp target/${{ matrix.target }}/release/librln* release/
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
@@ -74,7 +74,7 @@ jobs:
run: make installdeps
- name: Cross build
run: |
cross build --release --target ${{ matrix.target }} --no-default-features --features ${{ matrix.feature }} --workspace --exclude rln-cli
cross build --release --target ${{ matrix.target }} --no-default-features --features ${{ matrix.feature }} --workspace
mkdir release
cp target/${{ matrix.target }}/release/librln* release/
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
@@ -85,8 +85,8 @@ jobs:
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
retention-days: 2
browser-rln-wasm:
name: Browser build
rln-wasm:
name: Build rln-wasm
runs-on: ubuntu-latest
strategy:
matrix:
@@ -107,7 +107,7 @@ jobs:
components: rust-src
- uses: Swatinem/rust-cache@v2
with:
key: wasm-${{ matrix.feature }}
key: rln-wasm-${{ matrix.feature }}
- name: Install dependencies
run: make installdeps
- name: Install wasm-pack
@@ -116,25 +116,14 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y binaryen
- name: Build wasm package
- name: Build rln-wasm package
run: |
if [[ ${{ matrix.feature }} == *parallel* ]]; then
env RUSTFLAGS="-C target-feature=+atomics,+bulk-memory,+mutable-globals" \
rustup run nightly wasm-pack build --release --target web --scope waku \
--features ${{ matrix.feature }} -Z build-std=panic_abort,std
wasm-bindgen --target web --split-linked-modules --out-dir ./pkg \
./target/wasm32-unknown-unknown/release/rln_wasm.wasm
find ./pkg/snippets -name "workerHelpers.worker.js" \
-exec sed -i.bak 's|from '\''\.\.\/\.\.\/\.\.\/'\'';|from "../../../rln_wasm.js";|g' {} \; \
-exec rm -f {}.bak \;
find ./pkg/snippets -name "workerHelpers.worker.js" \
-exec sed -i.bak 's|await initWbg(module, memory);|await initWbg({ module, memory });|g' {} \; \
-exec rm -f {}.bak \;
else
wasm-pack build --release --target web --scope waku --features ${{ matrix.feature }}
wasm-pack build --release --target web --scope waku
fi
sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak
@@ -144,18 +133,64 @@ jobs:
mkdir release
cp -r pkg/* release/
tar -czvf browser-rln-wasm-${{ matrix.feature }}.tar.gz release/
tar -czvf rln-wasm-${{ matrix.feature }}.tar.gz release/
working-directory: rln-wasm
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
name: Browser-${{ matrix.feature }}-rln-wasm-archive
path: rln-wasm/browser-${{ matrix.feature }}-rln-wasm.tar.gz
name: rln-wasm-${{ matrix.feature }}-archive
path: rln-wasm/rln-wasm-${{ matrix.feature }}.tar.gz
retention-days: 2
rln-wasm-utils:
name: Build rln-wasm-utils
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-unknown
- name: Install nightly toolchain
uses: dtolnay/rust-toolchain@nightly
with:
targets: wasm32-unknown-unknown
components: rust-src
- uses: Swatinem/rust-cache@v2
with:
key: rln-wasm-utils
- name: Install dependencies
run: make installdeps
- name: Install wasm-pack
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
- name: Install binaryen
run: |
sudo apt-get update
sudo apt-get install -y binaryen
- name: Build rln-wasm-utils package
run: |
wasm-pack build --release --target web --scope waku
sed -i.bak 's/rln-wasm-utils/zerokit-rln-wasm-utils/g' pkg/package.json && rm pkg/package.json.bak
wasm-opt pkg/rln_wasm_utils_bg.wasm -Oz --strip-debug --strip-dwarf \
--remove-unused-module-elements --vacuum -o pkg/rln_wasm_utils_bg.wasm
mkdir release
cp -r pkg/* release/
tar -czvf rln-wasm-utils.tar.gz release/
working-directory: rln-wasm-utils
- name: Upload archive artifact
uses: actions/upload-artifact@v4
with:
name: rln-wasm-utils-archive
path: rln-wasm-utils/rln-wasm-utils.tar.gz
retention-days: 2
prepare-prerelease:
name: Prepare pre-release
needs: [linux, macos, browser-rln-wasm]
needs: [linux, macos, rln-wasm, rln-wasm-utils]
runs-on: ubuntu-latest
steps:
- name: Checkout code

10
.gitignore vendored
View File

@@ -1,14 +1,14 @@
# Common files to ignore in Rust projects
.DS_Store
.idea
*.log
tmp/
rln/pmtree_db
rln-cli/database
# Generated by Cargo
# will have compiled files and executables
# Generated by Cargo will have compiled files and executables
/target
/Cargo.lock
# Generated by rln-cli
rln-cli/database
# Generated by Nix
result

205
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,205 @@
# Contributing to Zerokit
Thank you for your interest in contributing to Zerokit!
This guide will discuss how the Zerokit team handles [Commits](#commits),
[Pull Requests](#pull-requests) and [Merging](#merging).
**Note:** We won't force external contributors to follow this verbatim.
Following these guidelines definitely helps us in accepting your contributions.
## Getting Started
1. Fork the repository
2. Create a feature branch: `git checkout -b fix/your-bug-fix` or `git checkout -b feat/your-feature-name`
3. Make your changes following our guidelines
4. Ensure relevant tests pass (see [testing guidelines](#building-and-testing))
5. Commit your changes (signed commits are highly encouraged - see [commit guidelines](#commits))
6. Push and create a Pull Request
## Development Setup
### Prerequisites
Install the required dependencies:
```bash
make installdeps
```
Or use Nix:
```bash
nix develop
```
### Building and Testing
```bash
# Build all crates
make build
# Run standard tests
make test
# Module-specific testing
cd rln && cargo make test_stateless # Test stateless features
cd rln-wasm && cargo make test_browser # Test in browser headless mode
cd rln-wasm && cargo make test_parallel # Test parallel features
```
Choose the appropriate test commands based on your changes:
- Core RLN changes: `make test`
- Stateless features: `cargo make test_stateless`
- WASM/browser features: `cargo make test_browser`
- Parallel computation: `cargo make test_parallel`
### Tools
We recommend using the [markdownlint extension](https://marketplace.visualstudio.com/items?itemName=DavidAnson.vscode-markdownlint)
for VS Code to maintain consistent documentation formatting.
## Commits
We want to keep our commits small and focused.
This allows for easily reviewing individual commits and/or
splitting up pull requests when they grow too big.
Additionally, this allows us to merge smaller changes quicker and release more often.
**All commits must be GPG signed.**
This ensures the authenticity and integrity of contributions.
### Conventional Commits
When making the commit, write the commit message
following the [Conventional Commits (v1.0.0)](https://www.conventionalcommits.org/en/v1.0.0/) specification.
Following this convention allows us to provide an automated release process
that also generates a detailed Changelog.
As described by the specification, our commit messages should be written as:
```markdown
<type>[optional scope]: <description>
[optional body]
[optional footer(s)]
```
Some examples of this pattern include:
```markdown
feat(rln): add parallel witness calculation support
```
```markdown
fix(rln-wasm): resolve memory leak in browser threading
```
```markdown
docs: update RLN protocol flow documentation
```
#### Scopes
Use scopes to improve the Changelog:
- `rln` - Core RLN implementation
- `rln-cli` - Command-line interface
- `rln-wasm` - WebAssembly bindings
- `rln-wasm-utils` - WebAssembly utilities
- `utils` - Cryptographic utilities (Merkle trees, Poseidon hash)
- `ci` - Continuous integration
#### Breaking Changes
Mark breaking changes by adding `!` after the type:
```markdown
feat(rln)!: change proof generation API
```
## Pull Requests
Before creating a pull request, search for related issues.
If none exist, create an issue describing the problem you're solving.
### CI Flow
Our continuous integration automatically runs when you create a Pull Request:
- **Build verification**: All crates compile successfully
- **Test execution**: Comprehensive testing across all modules and feature combinations
- **Code formatting**: `cargo fmt` compliance
- **Linting**: `cargo clippy` checks
- **Cross-platform builds**: Testing on multiple platforms
Ensure the following commands pass before submitting:
```bash
# Format code
cargo fmt --all
# Check for common mistakes
cargo clippy --all-targets
# Run all tests
make test
```
### Adding Tests
Include tests for new functionality:
- **Unit tests** for specific functions
- **Integration tests** for broader functionality
- **WASM tests** for browser compatibility
### Typos and Small Changes
For minor fixes like typos, please report them as issues instead of opening PRs.
This helps us manage resources effectively and ensures meaningful contributions.
## Merging
We use "squash merging" for all pull requests.
This combines all commits into one commit, so keep pull requests small and focused.
### Requirements
- CI checks must pass
- At least one maintainer review and approval
- All review feedback addressed
### Squash Guidelines
When squashing, update the commit title to be a proper Conventional Commit and
include any other relevant commits in the body:
```markdown
feat(rln): implement parallel witness calculation (#123)
fix(tests): resolve memory leak in test suite
chore(ci): update rust toolchain version
```
## Roadmap Alignment
Please refer to our [project roadmap](https://roadmap.vac.dev/) for current development priorities.
Consider how your changes align with these strategic goals, when contributing.
## Getting Help
- **Issues**: Create a GitHub issue for bugs or feature requests
- **Discussions**: Use GitHub Discussions for questions
- **Documentation**: Check existing docs and unit tests for examples
## License
By contributing to Zerokit, you agree that your contributions will be licensed under both MIT and
Apache 2.0 licenses, consistent with the project's dual licensing.
## Additional Resources
- [Conventional Commits Guide](https://www.conventionalcommits.org/en/v1.0.0/)
- [Project GitHub Repository](https://github.com/vacp2p/zerokit)

2118
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[workspace]
members = ["rln", "rln-cli", "utils"]
exclude = ["rln-wasm"]
members = ["rln", "utils"]
exclude = ["rln-cli", "rln-wasm", "rln-wasm-utils"]
resolver = "2"
# Compilation profile for any non-workspace member.

View File

@@ -13,7 +13,7 @@ endif
installdeps: .pre-build
ifeq ($(shell uname),Darwin)
@brew install cmake ninja binaryen
@brew install ninja binaryen
else ifeq ($(shell uname),Linux)
@if [ -f /etc/os-release ] && grep -q "ID=nixos" /etc/os-release; then \
echo "Detected NixOS, skipping apt-get installation."; \
@@ -22,7 +22,6 @@ else ifeq ($(shell uname),Linux)
fi
endif
@which wasm-pack > /dev/null && wasm-pack --version | grep -q "0.13.1" || cargo install wasm-pack --version=0.13.1
@which wasm-bindgen > /dev/null && wasm-bindgen --version | grep -q "0.2.100" || cargo install wasm-bindgen-cli --version=0.2.100
@test -s "$$HOME/.nvm/nvm.sh" || curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.2/install.sh | bash
@bash -c '. "$$HOME/.nvm/nvm.sh"; [ "$$(node -v 2>/dev/null)" = "v22.14.0" ] || nvm install 22.14.0; nvm use 22.14.0; nvm alias default 22.14.0'

View File

@@ -12,7 +12,8 @@ A collection of Zero Knowledge modules written in Rust and designed to be used i
Zerokit provides zero-knowledge cryptographic primitives with a focus on performance, security, and usability.
The current focus is on Rate-Limiting Nullifier [RLN](https://github.com/Rate-Limiting-Nullifier) implementation.
Current implementation is based on the following [specification](https://github.com/vacp2p/rfc-index/blob/main/vac/raw/rln-v2.md)
Current implementation is based on the following
[specification](https://github.com/vacp2p/rfc-index/blob/main/vac/raw/rln-v2.md)
and focused on RLNv2 which allows to set a rate limit for the number of messages that can be sent by a user.
## Features
@@ -24,7 +25,8 @@ and focused on RLNv2 which allows to set a rate limit for the number of messages
## Architecture
Zerokit currently focuses on RLN (Rate-Limiting Nullifier) implementation using [Circom](https://iden3.io/circom) circuits through ark-circom, providing an alternative to existing native Rust implementations.
Zerokit currently focuses on RLN (Rate-Limiting Nullifier) implementation using [Circom](https://iden3.io/circom)
circuits through ark-circom, providing an alternative to existing native Rust implementations.
## Build and Test

View File

@@ -31,7 +31,7 @@
buildPackage = pkgs.callPackage ./nix/default.nix;
buildRln = (buildPackage { src = self; project = "rln"; }).override;
in rec {
rln = buildRln
rln = buildRln { };
rln-linux-arm64 = buildRln {
target-platform = "aarch64-multiplatform";

View File

@@ -6,6 +6,7 @@
release ? true,
target-platform ? null,
rust-target ? null,
features ? null,
}:
let
@@ -29,10 +30,12 @@ in rustPlatform.buildRustPackage {
src = builtins.path { path = src; name = "zerokit"; };
cargoLock = {
lockFile = ../Cargo.lock;
lockFile = src + "/Cargo.lock";
allowBuiltinFetchGit = true;
};
nativeBuildInputs = [ pkgs.rust-cbindgen ];
doCheck = false;
CARGO_HOME = "/tmp";
@@ -46,11 +49,11 @@ in rustPlatform.buildRustPackage {
'';
installPhase = ''
mkdir -p $out/
for file in $(find target -name 'librln.*' | grep -v deps/); do
mkdir -p $out/$(dirname $file)
cp -r $file $out/$file
done
set -eu
mkdir -p $out/lib
find target -type f -name 'librln.*' -not -path '*/deps/*' -exec cp -v '{}' "$out/lib/" \;
mkdir -p $out/include
cbindgen ${src}/rln -l c > "$out/include/rln.h"
'';

View File

@@ -37,9 +37,9 @@ You can run the example using the following command:
cargo run --example relay
```
You can also change **MESSAGE_LIMIT** and **TREEE_HEIGHT** in the [relay.rs](src/examples/relay.rs) file to see how the RLN instance behaves with different parameters.
You can also change **MESSAGE_LIMIT** and **TREE_DEPTH** in the [relay.rs](src/examples/relay.rs) file to see how the RLN instance behaves with different parameters.
The customize **TREEE_HEIGHT** constant differs from the default value of `20` should follow [Custom Circuit Compilation](../rln/README.md#advanced-custom-circuit-compilation) instructions.
The customize **TREE_DEPTH** constant differs from the default value of `20` should follow [Custom Circuit Compilation](../rln/README.md#advanced-custom-circuit-compilation) instructions.
## Stateless Example
@@ -60,19 +60,19 @@ cargo run --example stateless --no-default-features --features stateless
To initialize a new RLN instance:
```bash
cargo run new --tree-height <HEIGHT>
cargo run new --tree-depth <DEPTH>
```
To initialize an RLN instance with custom parameters:
```bash
cargo run new-with-params --resources-path <PATH> --tree-height <HEIGHT>
cargo run new-with-params --resources-path <PATH> --tree-depth <DEPTH>
```
To update the Merkle tree height:
To update the Merkle tree depth:
```bash
cargo run set-tree --tree-height <HEIGHT>
cargo run set-tree --tree-depth <DEPTH>
```
### Leaf Operations

View File

@@ -1,11 +1,9 @@
{
"tree_config": {
"path": "database",
"temporary": false,
"cache_capacity": 150000,
"flush_every_ms": 12000,
"mode": "HighThroughput",
"use_compression": false
},
"tree_height": 20
"path": "database",
"temporary": false,
"cache_capacity": 1073741824,
"flush_every_ms": 500,
"mode": "HighThroughput",
"use_compression": false,
"tree_depth": 20
}

View File

@@ -1,23 +1,23 @@
use std::path::PathBuf;
use clap::Subcommand;
use rln::circuit::TEST_TREE_HEIGHT;
use rln::circuit::TEST_TREE_DEPTH;
#[derive(Subcommand)]
pub(crate) enum Commands {
New {
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
tree_height: usize,
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
tree_depth: usize,
},
NewWithParams {
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
tree_height: usize,
#[arg(short, long, default_value = "../rln/resources/tree_height_20")]
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
tree_depth: usize,
#[arg(short, long, default_value = "../rln/resources/tree_depth_10")]
resources_path: PathBuf,
},
SetTree {
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
tree_height: usize,
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
tree_depth: usize,
},
SetLeaf {
#[arg(short, long)]

View File

@@ -6,15 +6,9 @@ use serde_json::Value;
pub const RLN_CONFIG_PATH: &str = "RLN_CONFIG_PATH";
#[derive(Default, Serialize, Deserialize)]
#[derive(Serialize, Deserialize)]
pub(crate) struct Config {
pub inner: Option<InnerConfig>,
}
#[derive(Default, Serialize, Deserialize)]
pub(crate) struct InnerConfig {
pub tree_height: usize,
pub tree_config: Value,
pub tree_config: Option<String>,
}
impl Config {
@@ -25,14 +19,13 @@ impl Config {
let mut file = File::open(path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let inner: InnerConfig = serde_json::from_str(&contents)?;
Ok(Config { inner: Some(inner) })
let tree_config: Value = serde_json::from_str(&contents)?;
println!("Initializing RLN with custom config");
Ok(Config {
tree_config: Some(tree_config.to_string()),
})
}
Err(_) => Ok(Config::default()),
Err(_) => Ok(Config { tree_config: None }),
}
}
pub(crate) fn as_bytes(&self) -> Vec<u8> {
serde_json::to_string(&self.inner).unwrap().into_bytes()
}
}

View File

@@ -9,7 +9,7 @@ use clap::{Parser, Subcommand};
use color_eyre::{eyre::eyre, Report, Result};
use rln::{
circuit::Fr,
hashers::{hash_to_field, poseidon_hash},
hashers::{hash_to_field_le, poseidon_hash},
protocol::{keygen, prepare_prove_input, prepare_verify_input},
public::RLN,
utils::{fr_to_bytes_le, generate_input_buffer, IdSecret},
@@ -17,7 +17,7 @@ use rln::{
const MESSAGE_LIMIT: u32 = 1;
const TREEE_HEIGHT: usize = 20;
const TREE_DEPTH: usize = 10;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
@@ -67,7 +67,7 @@ struct RLNSystem {
impl RLNSystem {
fn new() -> Result<Self> {
let mut resources: Vec<Vec<u8>> = Vec::new();
let resources_path: PathBuf = format!("../rln/resources/tree_height_{TREEE_HEIGHT}").into();
let resources_path: PathBuf = format!("../rln/resources/tree_depth_{TREE_DEPTH}").into();
let filenames = ["rln_final.arkzkey", "graph.bin"];
for filename in filenames {
let fullpath = resources_path.join(Path::new(filename));
@@ -78,7 +78,7 @@ impl RLNSystem {
resources.push(output_buffer);
}
let rln = RLN::new_with_params(
TREEE_HEIGHT,
TREE_DEPTH,
resources[0].clone(),
resources[1].clone(),
generate_input_buffer(),
@@ -120,7 +120,7 @@ impl RLNSystem {
self.local_identities.insert(index, identity);
}
Err(_) => {
println!("Maximum user limit reached: 2^{TREEE_HEIGHT}");
println!("Maximum user limit reached: 2^{TREE_DEPTH}");
}
};
@@ -244,8 +244,8 @@ fn main() -> Result<()> {
println!("Initializing RLN instance...");
print!("\x1B[2J\x1B[1;1H");
let mut rln_system = RLNSystem::new()?;
let rln_epoch = hash_to_field(b"epoch");
let rln_identifier = hash_to_field(b"rln-identifier");
let rln_epoch = hash_to_field_le(b"epoch");
let rln_identifier = hash_to_field_le(b"rln-identifier");
let external_nullifier = poseidon_hash(&[rln_epoch, rln_identifier]);
println!("RLN Relay Example:");
println!("Message Limit: {MESSAGE_LIMIT}");

View File

@@ -8,8 +8,8 @@ use std::{
use clap::{Parser, Subcommand};
use color_eyre::{eyre::eyre, Result};
use rln::{
circuit::{Fr, TEST_TREE_HEIGHT},
hashers::{hash_to_field, poseidon_hash, PoseidonHash},
circuit::{Fr, TEST_TREE_DEPTH},
hashers::{hash_to_field_le, poseidon_hash, PoseidonHash},
protocol::{keygen, prepare_verify_input, rln_witness_from_values, serialize_witness},
public::RLN,
utils::{fr_to_bytes_le, IdSecret},
@@ -71,7 +71,7 @@ impl RLNSystem {
let rln = RLN::new()?;
let default_leaf = Fr::from(0);
let tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
@@ -128,7 +128,7 @@ impl RLNSystem {
};
let merkle_proof = self.tree.proof(user_index)?;
let x = hash_to_field(signal.as_bytes());
let x = hash_to_field_le(signal.as_bytes());
let rln_witness = rln_witness_from_values(
identity.identity_secret_hash.clone(),
@@ -244,8 +244,8 @@ fn main() -> Result<()> {
println!("Initializing RLN instance...");
print!("\x1B[2J\x1B[1;1H");
let mut rln_system = RLNSystem::new()?;
let rln_epoch = hash_to_field(b"epoch");
let rln_identifier = hash_to_field(b"rln-identifier");
let rln_epoch = hash_to_field_le(b"epoch");
let rln_identifier = hash_to_field_le(b"rln-identifier");
let external_nullifier = poseidon_hash(&[rln_epoch, rln_identifier]);
println!("RLN Stateless Relay Example:");
println!("Message Limit: {MESSAGE_LIMIT}");

View File

@@ -7,7 +7,7 @@ use std::{
use clap::Parser;
use color_eyre::{eyre::Report, Result};
use commands::Commands;
use config::{Config, InnerConfig};
use config::Config;
use rln::{
public::RLN,
utils::{bytes_le_to_fr, bytes_le_to_vec_fr},
@@ -35,19 +35,19 @@ fn main() -> Result<()> {
};
match cli.command {
Some(Commands::New { tree_height }) => {
Some(Commands::New { tree_depth }) => {
let config = Config::load_config()?;
state.rln = if let Some(InnerConfig { tree_height, .. }) = config.inner {
state.rln = if let Some(tree_config) = config.tree_config {
println!("Initializing RLN with custom config");
Some(RLN::new(tree_height, Cursor::new(config.as_bytes()))?)
Some(RLN::new(tree_depth, Cursor::new(tree_config.as_bytes()))?)
} else {
println!("Initializing RLN with default config");
Some(RLN::new(tree_height, Cursor::new(json!({}).to_string()))?)
Some(RLN::new(tree_depth, Cursor::new(json!({}).to_string()))?)
};
Ok(())
}
Some(Commands::NewWithParams {
tree_height,
tree_depth,
resources_path,
}) => {
let mut resources: Vec<Vec<u8>> = Vec::new();
@@ -56,19 +56,15 @@ fn main() -> Result<()> {
let fullpath = resources_path.join(Path::new(filename));
let mut file = File::open(&fullpath)?;
let metadata = std::fs::metadata(&fullpath)?;
let mut output_buffer = vec![0; metadata.len() as usize];
file.read_exact(&mut output_buffer)?;
resources.push(output_buffer);
let mut buffer = vec![0; metadata.len() as usize];
file.read_exact(&mut buffer)?;
resources.push(buffer);
}
let config = Config::load_config()?;
if let Some(InnerConfig {
tree_height,
tree_config,
}) = config.inner
{
if let Some(tree_config) = config.tree_config {
println!("Initializing RLN with custom config");
state.rln = Some(RLN::new_with_params(
tree_height,
tree_depth,
resources[0].clone(),
resources[1].clone(),
Cursor::new(tree_config.to_string().as_bytes()),
@@ -76,7 +72,7 @@ fn main() -> Result<()> {
} else {
println!("Initializing RLN with default config");
state.rln = Some(RLN::new_with_params(
tree_height,
tree_depth,
resources[0].clone(),
resources[1].clone(),
Cursor::new(json!({}).to_string()),
@@ -84,11 +80,11 @@ fn main() -> Result<()> {
};
Ok(())
}
Some(Commands::SetTree { tree_height }) => {
Some(Commands::SetTree { tree_depth }) => {
state
.rln
.ok_or(Report::msg("no RLN instance initialized"))?
.set_tree(tree_height)?;
.set_tree(tree_depth)?;
Ok(())
}
Some(Commands::SetLeaf { index, input }) => {

View File

@@ -1,9 +1,10 @@
use std::io::Cursor;
use color_eyre::Result;
use rln::public::RLN;
use rln::{circuit::TEST_TREE_DEPTH, public::RLN};
use serde_json::Value;
use crate::config::{Config, InnerConfig};
use crate::config::Config;
#[derive(Default)]
pub(crate) struct State {
@@ -13,8 +14,15 @@ pub(crate) struct State {
impl State {
pub(crate) fn load_state() -> Result<State> {
let config = Config::load_config()?;
let rln = if let Some(InnerConfig { tree_height, .. }) = config.inner {
Some(RLN::new(tree_height, Cursor::new(config.as_bytes()))?)
let rln = if let Some(tree_config) = config.tree_config {
let config_json: Value = serde_json::from_str(&tree_config)?;
let tree_depth = config_json["tree_depth"]
.as_u64()
.unwrap_or(TEST_TREE_DEPTH as u64);
Some(RLN::new(
tree_depth as usize,
Cursor::new(tree_config.as_bytes()),
)?)
} else {
None
};

21
rln-wasm-utils/.gitignore vendored Normal file
View File

@@ -0,0 +1,21 @@
# Common files to ignore in Rust projects
.DS_Store
.idea
*.log
tmp/
# Generated by Cargo will have compiled files and executables
/target
Cargo.lock
# Generated by rln-wasm
pkg/
# Generated by Nix
result
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

35
rln-wasm-utils/Cargo.toml Normal file
View File

@@ -0,0 +1,35 @@
[package]
name = "rln-wasm-utils"
version = "0.1.0"
edition = "2024"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
# TODO: remove this once we have a proper release
rln = { path = "../rln", default-features = false, features = ["stateless"] }
js-sys = "0.3.77"
wasm-bindgen = "0.2.100"
rand = "0.8.5"
# The `console_error_panic_xhook` crate provides better debugging of panics by
# logging them with `console.error`. This is great for development, but requires
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
# code size when deploying.
console_error_panic_hook = { version = "0.1.7", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.16", features = ["js"] }
[dev-dependencies]
wasm-bindgen-test = "0.3.50"
web-sys = { version = "0.3.77", features = ["console"] }
ark-std = { version = "0.5.0", default-features = false }
[features]
default = ["console_error_panic_hook"]
[package.metadata.docs.rs]
all-features = true

View File

@@ -0,0 +1,36 @@
[tasks.build]
clear = true
dependencies = ["pack_build", "pack_rename", "pack_resize"]
[tasks.pack_build]
command = "wasm-pack"
args = ["build", "--release", "--target", "web", "--scope", "waku"]
[tasks.pack_rename]
script = "sed -i.bak 's/rln-wasm-utils/zerokit-rln-wasm-utils/g' pkg/package.json && rm pkg/package.json.bak"
[tasks.pack_resize]
command = "wasm-opt"
args = [
"pkg/rln_wasm_utils_bg.wasm",
"-Oz",
"--strip-debug",
"--strip-dwarf",
"--remove-unused-module-elements",
"--vacuum",
"-o",
"pkg/rln_wasm_utils_bg.wasm",
]
[tasks.test]
command = "wasm-pack"
args = [
"test",
"--release",
"--node",
"--target",
"wasm32-unknown-unknown",
"--",
"--nocapture",
]
dependencies = ["build"]

206
rln-wasm-utils/README.md Normal file
View File

@@ -0,0 +1,206 @@
# RLN WASM Utils
[![npm version](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm.svg)](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm-utils)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
The Zerokit RLN WASM Utils Module provides WebAssembly bindings for Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) cryptographic primitives.
This module offers comprehensive functionality for identity generation and hashing needed for RLN applications.
## Features
### Identity Generation
- **Random Identity Generation**: Generate cryptographically secure random identities
- **Seeded Identity Generation**: Generate deterministic identities from seeds
- **Extended Identity Generation**: Generate extended identities with additional parameters
- **Seeded Extended Identity Generation**: Generate deterministic extended identities from seeds
- **Endianness Support**: Both little-endian and big-endian serialization support
### Hashing
- **Standard Hashing**: Hash arbitrary data to field elements
- **Poseidon Hashing**: Advanced cryptographic hashing using Poseidon hash function
- **Endianness Support**: Both little-endian and big-endian serialization support
## API Reference
### Identity Generation Functions
#### `generateMembershipKey(isLittleEndian: boolean): Uint8Array`
Generates a random membership key pair (identity secret and commitment).
**Inputs:**
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized identity pair as `Uint8Array` in corresponding endianness
#### `generateExtendedMembershipKey(isLittleEndian: boolean): Uint8Array`
Generates an extended membership key with additional parameters.
**Inputs:**
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized extended identity tuple as `Uint8Array` in corresponding endianness
#### `generateSeededMembershipKey(seed: Uint8Array, isLittleEndian: boolean): Uint8Array`
Generates a deterministic membership key from a seed.
**Inputs:**
- `seed`: Seed data as `Uint8Array`
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized identity pair as `Uint8Array` in corresponding endianness
#### `generateSeededExtendedMembershipKey(seed: Uint8Array, isLittleEndian: boolean): Uint8Array`
Generates a deterministic extended membership key from a seed.
**Inputs:**
- `seed`: Seed data as `Uint8Array`
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized extended identity tuple as `Uint8Array` in corresponding endianness
### Hashing Functions
#### `hash(input: Uint8Array, isLittleEndian: boolean): Uint8Array`
Hashes input data to a field element.
**Inputs:**
- `input`: Input data as `Uint8Array`
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized hash result as `Uint8Array` in corresponding endianness
#### `poseidonHash(input: Uint8Array, isLittleEndian: boolean): Uint8Array`
Computes Poseidon hash of input field elements.
**Inputs:**
- `input`: Serialized field elements as `Uint8Array` (format: length + field elements)
- `isLittleEndian`: Boolean indicating endianness for serialization
**Outputs:** Serialized hash result as `Uint8Array` in corresponding endianness
## Usage Examples
### JavaScript/TypeScript
```javascript
import init, {
generateMembershipKey,
generateSeededMembershipKey,
hash,
poseidonHash
} from '@waku/zerokit-rln-wasm-utils';
// Initialize the WASM module
await init();
// Generate a random membership key
const membershipKey = generateMembershipKey(true); // little-endian
console.log('Membership key:', membershipKey);
// Generate a deterministic membership key from seed
const seed = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
const seededKey = generateSeededMembershipKey(seed, true);
console.log('Seeded key:', seededKey);
// Hash some data
const input = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
const hashResult = hash(input, true);
console.log('Hash result:', hashResult);
// Poseidon hash with field elements
const fieldElements = new Uint8Array([
// Length (8 bytes) + field elements (32 bytes each)
1, 0, 0, 0, 0, 0, 0, 0, // length = 1
// field element data...
]);
const poseidonResult = poseidonHash(fieldElements, true);
console.log('Poseidon hash:', poseidonResult);
```
## Install Dependencies
> [!NOTE]
> This project requires the following tools:
>
> - `wasm-pack` - for compiling Rust to WebAssembly
> - `cargo-make` - for running build commands
> - `nvm` - to install and manage Node.js
>
> Ensure all dependencies are installed before proceeding.
### Manually
#### Install `wasm-pack`
```bash
cargo install wasm-pack --version=0.13.1
```
#### Install `cargo-make`
```bash
cargo install cargo-make
```
#### Install `Node.js`
If you don't have `nvm` (Node Version Manager), install it by following
the [installation instructions](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script).
After installing `nvm`, install and use Node.js `v22.14.0`:
```bash
nvm install 22.14.0
nvm use 22.14.0
nvm alias default 22.14.0
```
If you already have Node.js installed,
check your version with `node -v` command — the version must be strictly greater than 22.
### Or install everything
You can run the following command from the root of the repository to install all required dependencies for `zerokit`
```bash
make installdeps
```
## Building the library
First, navigate to the rln-wasm-utils directory:
```bash
cd rln-wasm-utils
```
Compile rln-wasm-utils for `wasm32-unknown-unknown`:
```bash
cargo make build
```
## Running tests
```bash
cargo make test
```
## License
This project is licensed under both MIT and Apache 2.0 licenses. See the LICENSE files for details.

112
rln-wasm-utils/src/lib.rs Normal file
View File

@@ -0,0 +1,112 @@
#![cfg(target_arch = "wasm32")]
use js_sys::Uint8Array;
use rln::public::{
extended_key_gen, hash, key_gen, poseidon_hash, seeded_extended_key_gen, seeded_key_gen,
};
use std::vec::Vec;
use wasm_bindgen::prelude::*;
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateMembershipKey)]
pub fn wasm_key_gen(is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = key_gen(&mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership keys, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
pub fn wasm_extended_key_gen(is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = extended_key_gen(&mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership keys, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
pub fn wasm_seeded_key_gen(seed: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &seed.to_vec()[..];
if let Err(err) = seeded_key_gen(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership key, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
pub fn wasm_seeded_extended_key_gen(
seed: Uint8Array,
is_little_endian: bool,
) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &seed.to_vec()[..];
if let Err(err) = seeded_extended_key_gen(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate membership key, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[wasm_bindgen(js_name = hash)]
pub fn wasm_hash(input: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &input.to_vec()[..];
if let Err(err) = hash(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!("Msg: could not generate hash, Error: {:#?}", err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
#[wasm_bindgen(js_name = poseidonHash)]
pub fn wasm_poseidon_hash(input: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
let mut output_data: Vec<u8> = Vec::new();
let input_data = &input.to_vec()[..];
if let Err(err) = poseidon_hash(input_data, &mut output_data, is_little_endian) {
std::mem::forget(output_data);
Err(format!(
"Msg: could not generate poseidon hash, Error: {:#?}",
err
))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}

View File

@@ -0,0 +1,114 @@
#![cfg(target_arch = "wasm32")]
#[cfg(test)]
mod test {
use ark_std::{UniformRand, rand::thread_rng};
use rand::Rng;
use rln::circuit::Fr;
use rln::hashers::{ROUND_PARAMS, hash_to_field_le, poseidon_hash};
use rln::protocol::{
deserialize_identity_pair_be, deserialize_identity_pair_le, deserialize_identity_tuple_be,
deserialize_identity_tuple_le,
};
use rln::utils::{bytes_le_to_fr, vec_fr_to_bytes_le};
use rln_wasm_utils::{
wasm_extended_key_gen, wasm_hash, wasm_key_gen, wasm_poseidon_hash,
wasm_seeded_extended_key_gen, wasm_seeded_key_gen,
};
use wasm_bindgen_test::*;
#[wasm_bindgen_test]
fn test_wasm_key_gen() {
let result_le = wasm_key_gen(true);
assert!(result_le.is_ok());
deserialize_identity_pair_le(result_le.unwrap().to_vec());
let result_be = wasm_key_gen(false);
assert!(result_be.is_ok());
deserialize_identity_pair_be(result_be.unwrap().to_vec());
}
#[wasm_bindgen_test]
fn test_wasm_extended_key_gen() {
let result_le = wasm_extended_key_gen(true);
assert!(result_le.is_ok());
deserialize_identity_tuple_le(result_le.unwrap().to_vec());
let result_be = wasm_extended_key_gen(false);
assert!(result_be.is_ok());
deserialize_identity_tuple_be(result_be.unwrap().to_vec());
}
#[wasm_bindgen_test]
fn test_wasm_seeded_key_gen() {
// Create a test seed
let seed_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let seed = js_sys::Uint8Array::from(&seed_data[..]);
let result_le = wasm_seeded_key_gen(seed.clone(), true);
assert!(result_le.is_ok());
let fr_le = deserialize_identity_pair_le(result_le.unwrap().to_vec());
let result_be = wasm_seeded_key_gen(seed, false);
assert!(result_be.is_ok());
let fr_be = deserialize_identity_pair_be(result_be.unwrap().to_vec());
assert_eq!(fr_le, fr_be);
}
#[wasm_bindgen_test]
fn test_wasm_seeded_extended_key_gen() {
// Create a test seed
let seed_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let seed = js_sys::Uint8Array::from(&seed_data[..]);
let result_le = wasm_seeded_extended_key_gen(seed.clone(), true);
assert!(result_le.is_ok());
let fr_le = deserialize_identity_tuple_le(result_le.unwrap().to_vec());
let result_be = wasm_seeded_extended_key_gen(seed, false);
assert!(result_be.is_ok());
let fr_be = deserialize_identity_tuple_be(result_be.unwrap().to_vec());
assert_eq!(fr_le, fr_be);
}
#[wasm_bindgen_test]
fn test_wasm_hash() {
// Create test input data
let signal: [u8; 32] = [0; 32];
let input = js_sys::Uint8Array::from(&signal[..]);
let result_le = wasm_hash(input.clone(), true);
assert!(result_le.is_ok());
let serialized_hash = result_le.unwrap().to_vec();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field_le(&signal);
assert_eq!(hash1, hash2);
}
#[wasm_bindgen_test]
fn test_wasm_poseidon_hash() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_le(&inputs);
let input = js_sys::Uint8Array::from(&inputs_ser[..]);
let expected_hash = poseidon_hash(inputs.as_ref());
let result_le = wasm_poseidon_hash(input.clone(), true);
assert!(result_le.is_ok());
let serialized_hash = result_le.unwrap().to_vec();
let (received_hash, _) = bytes_le_to_fr(&serialized_hash);
assert_eq!(received_hash, expected_hash);
}
}

21
rln-wasm/.gitignore vendored
View File

@@ -1,6 +1,21 @@
# Common files to ignore in Rust projects
.DS_Store
.idea
*.log
tmp/
# Generated by Cargo will have compiled files and executables
/target
**/*.rs.bk
Cargo.lock
bin/
# Generated by rln-wasm
pkg/
wasm-pack.log
# Generated by Nix
result
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

View File

@@ -11,12 +11,15 @@ crate-type = ["cdylib", "rlib"]
rln = { path = "../rln", version = "0.8.0", default-features = false, features = [
"stateless",
] }
rln-wasm-utils = { path = "../rln-wasm-utils", version = "0.1.0", default-features = false }
zerokit_utils = { path = "../utils", version = "0.6.0", default-features = false }
num-bigint = { version = "0.4.6", default-features = false }
js-sys = "0.3.77"
wasm-bindgen = "0.2.100"
serde-wasm-bindgen = "0.6.5"
wasm-bindgen-rayon = { version = "1.2.0", optional = true }
wasm-bindgen-rayon = { version = "1.3.0", features = [
"no-bundler",
], optional = true }
# The `console_error_panic_xhook` crate provides better debugging of panics by
# logging them with `console.error`. This is great for development, but requires

View File

@@ -4,12 +4,7 @@ dependencies = ["pack_build", "pack_rename", "pack_resize"]
[tasks.build_parallel]
clear = true
dependencies = [
"pack_build_parallel",
"post_build_parallel",
"pack_rename",
"pack_resize",
]
dependencies = ["pack_build_parallel", "pack_rename", "pack_resize"]
[tasks.pack_build]
command = "wasm-pack"
@@ -34,14 +29,6 @@ args = [
"-Z",
"build-std=panic_abort,std",
]
[tasks.post_build_parallel]
script = '''
wasm-bindgen --target web --split-linked-modules --out-dir ./pkg ./target/wasm32-unknown-unknown/release/rln_wasm.wasm && \
find ./pkg/snippets -name "workerHelpers.worker.js" -exec sed -i.bak 's|from '\''\.\.\/\.\.\/\.\.\/'\'';|from "../../../rln_wasm.js";|g' {} \; -exec rm -f {}.bak \; && \
find ./pkg/snippets -name "workerHelpers.worker.js" -exec sed -i.bak 's|await initWbg(module, memory);|await initWbg({ module, memory });|g' {} \; -exec rm -f {}.bak \;
'''
[tasks.pack_rename]
script = "sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak"
@@ -77,8 +64,6 @@ args = [
"test",
"--release",
"--chrome",
# "--firefox",
# "--safari",
"--headless",
"--target",
"wasm32-unknown-unknown",
@@ -98,8 +83,6 @@ args = [
"test",
"--release",
"--chrome",
# "--firefox",
# "--safari",
"--headless",
"--target",
"wasm32-unknown-unknown",
@@ -114,11 +97,3 @@ dependencies = ["build_parallel"]
[tasks.bench]
disabled = true
[tasks.login]
command = "wasm-pack"
args = ["login"]
[tasks.publish]
command = "wasm-pack"
args = ["publish", "--access", "public", "--target", "web"]

View File

@@ -91,7 +91,7 @@ enabling multi-threaded execution in the browser.
> [!NOTE]
> Parallel support is not enabled by default due to WebAssembly and browser limitations. \
> Compiling this feature requires `nightly` Rust and the `wasm-bindgen-cli` tool.
> Compiling this feature requires `nightly` Rust.
### Build Setup
@@ -101,12 +101,6 @@ enabling multi-threaded execution in the browser.
rustup install nightly
```
#### Install `wasm-bindgen-cli`
```bash
cargo install wasm-bindgen-cli --version=0.2.100
```
### Build Commands
To enable parallel computation for WebAssembly threads, you can use the following command:

View File

@@ -1,5 +1,8 @@
// Node.js module compatible witness calculator
module.exports = async function builder(code, options) {
// File generated with https://github.com/iden3/circom
// following the instructions from:
// https://github.com/vacp2p/zerokit/tree/master/rln#advanced-custom-circuit-compilation
export async function builder(code, options) {
options = options || {};
let wasmModule;
@@ -102,7 +105,7 @@ module.exports = async function builder(code, options) {
// Then append the value to the message we are creating
msgStr += fromArray32(arr).toString();
}
};
}
class WitnessCalculator {
constructor(instance, sanityCheck) {

View File

@@ -1,335 +0,0 @@
// Browser compatible witness calculator
(function (global) {
async function builder(code, options) {
options = options || {};
let wasmModule;
try {
wasmModule = await WebAssembly.compile(code);
} catch (err) {
console.log(err);
console.log(
"\nTry to run circom --c in order to generate c++ code instead\n"
);
throw new Error(err);
}
let wc;
let errStr = "";
let msgStr = "";
const instance = await WebAssembly.instantiate(wasmModule, {
runtime: {
exceptionHandler: function (code) {
let err;
if (code == 1) {
err = "Signal not found.\n";
} else if (code == 2) {
err = "Too many signals set.\n";
} else if (code == 3) {
err = "Signal already set.\n";
} else if (code == 4) {
err = "Assert Failed.\n";
} else if (code == 5) {
err = "Not enough memory.\n";
} else if (code == 6) {
err = "Input signal array access exceeds the size.\n";
} else {
err = "Unknown error.\n";
}
throw new Error(err + errStr);
},
printErrorMessage: function () {
errStr += getMessage() + "\n";
// console.error(getMessage());
},
writeBufferMessage: function () {
const msg = getMessage();
// Any calls to `log()` will always end with a `\n`, so that's when we print and reset
if (msg === "\n") {
console.log(msgStr);
msgStr = "";
} else {
// If we've buffered other content, put a space in between the items
if (msgStr !== "") {
msgStr += " ";
}
// Then append the message to the message we are creating
msgStr += msg;
}
},
showSharedRWMemory: function () {
printSharedRWMemory();
},
},
});
const sanityCheck = options;
// options &&
// (
// options.sanityCheck ||
// options.logGetSignal ||
// options.logSetSignal ||
// options.logStartComponent ||
// options.logFinishComponent
// );
wc = new WitnessCalculator(instance, sanityCheck);
return wc;
function getMessage() {
var message = "";
var c = instance.exports.getMessageChar();
while (c != 0) {
message += String.fromCharCode(c);
c = instance.exports.getMessageChar();
}
return message;
}
function printSharedRWMemory() {
const shared_rw_memory_size = instance.exports.getFieldNumLen32();
const arr = new Uint32Array(shared_rw_memory_size);
for (let j = 0; j < shared_rw_memory_size; j++) {
arr[shared_rw_memory_size - 1 - j] =
instance.exports.readSharedRWMemory(j);
}
// If we've buffered other content, put a space in between the items
if (msgStr !== "") {
msgStr += " ";
}
// Then append the value to the message we are creating
msgStr += fromArray32(arr).toString();
}
}
class WitnessCalculator {
constructor(instance, sanityCheck) {
this.instance = instance;
this.version = this.instance.exports.getVersion();
this.n32 = this.instance.exports.getFieldNumLen32();
this.instance.exports.getRawPrime();
const arr = new Uint32Array(this.n32);
for (let i = 0; i < this.n32; i++) {
arr[this.n32 - 1 - i] = this.instance.exports.readSharedRWMemory(i);
}
this.prime = fromArray32(arr);
this.witnessSize = this.instance.exports.getWitnessSize();
this.sanityCheck = sanityCheck;
}
circom_version() {
return this.instance.exports.getVersion();
}
async _doCalculateWitness(input, sanityCheck) {
//input is assumed to be a map from signals to arrays of bigints
this.instance.exports.init(this.sanityCheck || sanityCheck ? 1 : 0);
const keys = Object.keys(input);
var input_counter = 0;
keys.forEach((k) => {
const h = fnvHash(k);
const hMSB = parseInt(h.slice(0, 8), 16);
const hLSB = parseInt(h.slice(8, 16), 16);
const fArr = flatArray(input[k]);
let signalSize = this.instance.exports.getInputSignalSize(hMSB, hLSB);
if (signalSize < 0) {
throw new Error(`Signal ${k} not found\n`);
}
if (fArr.length < signalSize) {
throw new Error(`Not enough values for input signal ${k}\n`);
}
if (fArr.length > signalSize) {
throw new Error(`Too many values for input signal ${k}\n`);
}
for (let i = 0; i < fArr.length; i++) {
const arrFr = toArray32(BigInt(fArr[i]) % this.prime, this.n32);
for (let j = 0; j < this.n32; j++) {
this.instance.exports.writeSharedRWMemory(
j,
arrFr[this.n32 - 1 - j]
);
}
try {
this.instance.exports.setInputSignal(hMSB, hLSB, i);
input_counter++;
} catch (err) {
// console.log(`After adding signal ${i} of ${k}`)
throw new Error(err);
}
}
});
if (input_counter < this.instance.exports.getInputSize()) {
throw new Error(
`Not all inputs have been set. Only ${input_counter} out of ${this.instance.exports.getInputSize()}`
);
}
}
async calculateWitness(input, sanityCheck) {
const w = [];
await this._doCalculateWitness(input, sanityCheck);
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const arr = new Uint32Array(this.n32);
for (let j = 0; j < this.n32; j++) {
arr[this.n32 - 1 - j] = this.instance.exports.readSharedRWMemory(j);
}
w.push(fromArray32(arr));
}
return w;
}
async calculateBinWitness(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize * this.n32);
const buff = new Uint8Array(buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const pos = i * this.n32;
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
}
return buff;
}
async calculateWTNSBin(input, sanityCheck) {
const buff32 = new Uint32Array(
this.witnessSize * this.n32 + this.n32 + 11
);
const buff = new Uint8Array(buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
//"wtns"
buff[0] = "w".charCodeAt(0);
buff[1] = "t".charCodeAt(0);
buff[2] = "n".charCodeAt(0);
buff[3] = "s".charCodeAt(0);
//version 2
buff32[1] = 2;
//number of sections: 2
buff32[2] = 2;
//id section 1
buff32[3] = 1;
const n8 = this.n32 * 4;
//id section 1 length in 64bytes
const idSection1length = 8 + n8;
const idSection1lengthHex = idSection1length.toString(16);
buff32[4] = parseInt(idSection1lengthHex.slice(0, 8), 16);
buff32[5] = parseInt(idSection1lengthHex.slice(8, 16), 16);
//this.n32
buff32[6] = n8;
//prime number
this.instance.exports.getRawPrime();
var pos = 7;
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
// witness size
buff32[pos] = this.witnessSize;
pos++;
//id section 2
buff32[pos] = 2;
pos++;
// section 2 length
const idSection2length = n8 * this.witnessSize;
const idSection2lengthHex = idSection2length.toString(16);
buff32[pos] = parseInt(idSection2lengthHex.slice(0, 8), 16);
buff32[pos + 1] = parseInt(idSection2lengthHex.slice(8, 16), 16);
pos += 2;
for (let i = 0; i < this.witnessSize; i++) {
this.instance.exports.getWitness(i);
for (let j = 0; j < this.n32; j++) {
buff32[pos + j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
}
return buff;
}
}
function toArray32(rem, size) {
const res = []; //new Uint32Array(size); //has no unshift
const radix = BigInt(0x100000000);
while (rem) {
res.unshift(Number(rem % radix));
rem = rem / radix;
}
if (size) {
var i = size - res.length;
while (i > 0) {
res.unshift(0);
i--;
}
}
return res;
}
function fromArray32(arr) {
//returns a BigInt
var res = BigInt(0);
const radix = BigInt(0x100000000);
for (let i = 0; i < arr.length; i++) {
res = res * radix + BigInt(arr[i]);
}
return res;
}
function flatArray(a) {
var res = [];
fillArray(res, a);
return res;
function fillArray(res, a) {
if (Array.isArray(a)) {
for (let i = 0; i < a.length; i++) {
fillArray(res, a[i]);
}
} else {
res.push(a);
}
}
}
function fnvHash(str) {
const uint64_max = BigInt(2) ** BigInt(64);
let hash = BigInt("0xCBF29CE484222325");
for (var i = 0; i < str.length; i++) {
hash ^= BigInt(str[i].charCodeAt());
hash *= BigInt(0x100000001b3);
hash %= uint64_max;
}
let shash = hash.toString(16);
let n = 16 - shash.length;
shash = "0".repeat(n).concat(shash);
return shash;
}
// Make it globally available
global.witnessCalculatorBuilder = builder;
})(typeof self !== "undefined" ? self : window);

View File

@@ -2,7 +2,7 @@
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
use num_bigint::BigInt;
use rln::public::{hash, poseidon_hash, RLN};
use rln::public::RLN;
use std::vec::Vec;
use wasm_bindgen::prelude::*;
@@ -78,40 +78,6 @@ macro_rules! call_bool_method_with_error_msg {
}
}
// Macro to execute a function with arbitrary amount of arguments,
// First argument is the function to execute
// Rest are all other arguments to the method
macro_rules! fn_call_with_output_and_error_msg {
// this variant is needed for the case when
// there are zero other arguments
($func:ident, $error_msg:expr) => {
{
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = $func(&mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
($func:ident, $error_msg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = $func($($arg.process()),*, &mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
}
trait ProcessArg {
type ReturnType;
fn process(self) -> Self::ReturnType;
@@ -213,43 +179,6 @@ pub fn wasm_generate_rln_proof_with_witness(
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateMembershipKey)]
pub fn wasm_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(ctx, key_gen, "could not generate membership keys")
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
pub fn wasm_extended_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(ctx, extended_key_gen, "could not generate membership keys")
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
pub fn wasm_seeded_key_gen(ctx: *const RLNWrapper, seed: Uint8Array) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(
ctx,
seeded_key_gen,
"could not generate membership key",
&seed.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
pub fn wasm_seeded_extended_key_gen(
ctx: *const RLNWrapper,
seed: Uint8Array,
) -> Result<Uint8Array, String> {
call_with_output_and_error_msg!(
ctx,
seeded_extended_key_gen,
"could not generate membership key",
&seed.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = recovedIDSecret)]
pub fn wasm_recover_id_secret(
@@ -281,17 +210,3 @@ pub fn wasm_verify_with_roots(
&roots.to_vec()[..]
)
}
#[wasm_bindgen(js_name = hash)]
pub fn wasm_hash(input: Uint8Array) -> Result<Uint8Array, String> {
fn_call_with_output_and_error_msg!(hash, "could not generate hash", &input.to_vec()[..])
}
#[wasm_bindgen(js_name = poseidonHash)]
pub fn wasm_poseidon_hash(input: Uint8Array) -> Result<Uint8Array, String> {
fn_call_with_output_and_error_msg!(
poseidon_hash,
"could not generate poseidon hash",
&input.to_vec()[..]
)
}

View File

@@ -3,14 +3,15 @@
#[cfg(test)]
mod tests {
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
use rln::hashers::{hash_to_field, poseidon_hash, PoseidonHash};
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::hashers::{hash_to_field_le, poseidon_hash, PoseidonHash};
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, IdSecret};
use rln_wasm::{
wasm_generate_rln_proof_with_witness, wasm_key_gen, wasm_new, wasm_rln_witness_to_json,
wasm_generate_rln_proof_with_witness, wasm_new, wasm_rln_witness_to_json,
wasm_verify_with_roots,
};
use rln_wasm_utils::wasm_key_gen;
use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
use wasm_bindgen_test::{console_log, wasm_bindgen_test, wasm_bindgen_test_configure};
use zerokit_utils::{
@@ -29,7 +30,15 @@ mod tests {
}
export function initWitnessCalculator(jsCode) {
eval(jsCode);
const processedCode = jsCode
.replace(/export\s+async\s+function\s+builder/, 'async function builder')
.replace(/export\s*\{\s*builder\s*\};?/g, '');
const moduleFunc = new Function(processedCode + '\nreturn { builder };');
const witnessCalculatorModule = moduleFunc();
window.witnessCalculatorBuilder = witnessCalculatorModule.builder;
if (typeof window.witnessCalculatorBuilder !== 'function') {
return false;
}
@@ -63,12 +72,12 @@ mod tests {
async fn calculateWitness(circom_data: &[u8], inputs: Object) -> Result<JsValue, JsValue>;
}
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator_browser.js");
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator.js");
const ARKZKEY_BYTES: &[u8] =
include_bytes!("../../rln/resources/tree_height_20/rln_final.arkzkey");
include_bytes!("../../rln/resources/tree_depth_10/rln_final.arkzkey");
const CIRCOM_BYTES: &[u8] = include_bytes!("../../rln/resources/tree_height_20/rln.wasm");
const CIRCOM_BYTES: &[u8] = include_bytes!("../../rln/resources/tree_depth_10/rln.wasm");
wasm_bindgen_test_configure!(run_in_browser);
@@ -89,7 +98,7 @@ mod tests {
.expect("Failed to initialize thread pool");
}
// Initialize the witness calculator
// Initialize witness calculator
initWitnessCalculator(WITNESS_CALCULATOR_JS)
.expect("Failed to initialize witness calculator");
@@ -108,23 +117,23 @@ mod tests {
// Create RLN instance for other benchmarks
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
let mut tree: OptimalMerkleTree<PoseidonHash> =
OptimalMerkleTree::default(TEST_TREE_HEIGHT).expect("Failed to create tree");
OptimalMerkleTree::default(TEST_TREE_DEPTH).expect("Failed to create tree");
// Benchmark wasm_key_gen
let start_wasm_key_gen = Date::now();
for _ in 0..iterations {
let _ = wasm_key_gen(rln_instance).expect("Failed to generate keys");
let _ = wasm_key_gen(true).expect("Failed to generate keys");
}
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
// Generate identity pair for other benchmarks
let mem_keys = wasm_key_gen(rln_instance).expect("Failed to generate keys");
let mem_keys = wasm_key_gen(true).expect("Failed to generate keys");
let id_key = mem_keys.subarray(0, 32);
let (identity_secret_hash, _) = IdSecret::from_bytes_le(&id_key.to_vec());
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
let identity_index = tree.leaves_set();
@@ -137,7 +146,7 @@ mod tests {
let message_id = Fr::from(0);
let signal: [u8; 32] = [0; 32];
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let merkle_proof: OptimalMerkleProof<PoseidonHash> = tree
.proof(identity_index)

View File

@@ -4,36 +4,54 @@
#[cfg(test)]
mod tests {
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
use rln::hashers::{hash_to_field, poseidon_hash, PoseidonHash};
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::hashers::{hash_to_field_le, poseidon_hash, PoseidonHash};
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, IdSecret};
use rln_wasm::{
wasm_generate_rln_proof_with_witness, wasm_key_gen, wasm_new, wasm_rln_witness_to_json,
wasm_generate_rln_proof_with_witness, wasm_new, wasm_rln_witness_to_json,
wasm_verify_with_roots,
};
use rln_wasm_utils::wasm_key_gen;
use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
use wasm_bindgen_test::{console_log, wasm_bindgen_test};
use zerokit_utils::{
OptimalMerkleProof, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree,
};
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator.js");
#[wasm_bindgen(inline_js = r#"
const fs = require("fs");
let witnessCalculatorModule = null;
module.exports = {
initWitnessCalculator: function(code) {
const processedCode = code
.replace(/export\s+async\s+function\s+builder/, 'async function builder')
.replace(/export\s*\{\s*builder\s*\};?/g, '');
const moduleFunc = new Function(processedCode + '\nreturn { builder };');
witnessCalculatorModule = moduleFunc();
if (typeof witnessCalculatorModule.builder !== 'function') {
return false;
}
return true;
},
readFile: function (path) {
return fs.readFileSync(path);
},
calculateWitness: async function (circom_path, inputs) {
const wc = require("resources/witness_calculator_node.js");
const wasmFile = fs.readFileSync(circom_path);
const wasmFileBuffer = wasmFile.slice(
wasmFile.byteOffset,
wasmFile.byteOffset + wasmFile.byteLength
);
const witnessCalculator = await wc(wasmFileBuffer);
const witnessCalculator = await witnessCalculatorModule.builder(wasmFileBuffer);
const calculatedWitness = await witnessCalculator.calculateWitness(
inputs,
false
@@ -45,6 +63,9 @@ mod tests {
};
"#)]
extern "C" {
#[wasm_bindgen(catch)]
fn initWitnessCalculator(code: &str) -> Result<bool, JsValue>;
#[wasm_bindgen(catch)]
fn readFile(path: &str) -> Result<Uint8Array, JsValue>;
@@ -52,12 +73,16 @@ mod tests {
async fn calculateWitness(circom_path: &str, input: Object) -> Result<JsValue, JsValue>;
}
const ARKZKEY_PATH: &str = "../rln/resources/tree_height_20/rln_final.arkzkey";
const ARKZKEY_PATH: &str = "../rln/resources/tree_depth_10/rln_final.arkzkey";
const CIRCOM_PATH: &str = "../rln/resources/tree_height_20/rln.wasm";
const CIRCOM_PATH: &str = "../rln/resources/tree_depth_10/rln.wasm";
#[wasm_bindgen_test]
pub async fn rln_wasm_benchmark() {
// Initialize witness calculator
initWitnessCalculator(WITNESS_CALCULATOR_JS)
.expect("Failed to initialize witness calculator");
let mut results = String::from("\nbenchmarks:\n");
let iterations = 10;
@@ -73,23 +98,23 @@ mod tests {
// Create RLN instance for other benchmarks
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
let mut tree: OptimalMerkleTree<PoseidonHash> =
OptimalMerkleTree::default(TEST_TREE_HEIGHT).expect("Failed to create tree");
OptimalMerkleTree::default(TEST_TREE_DEPTH).expect("Failed to create tree");
// Benchmark wasm_key_gen
let start_wasm_key_gen = Date::now();
for _ in 0..iterations {
let _ = wasm_key_gen(rln_instance).expect("Failed to generate keys");
let _ = wasm_key_gen(true).expect("Failed to generate keys");
}
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
// Generate identity pair for other benchmarks
let mem_keys = wasm_key_gen(rln_instance).expect("Failed to generate keys");
let mem_keys = wasm_key_gen(true).expect("Failed to generate keys");
let id_key = mem_keys.subarray(0, 32);
let (identity_secret_hash, _) = IdSecret::from_bytes_le(&id_key.to_vec());
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
let identity_index = tree.leaves_set();
@@ -102,7 +127,7 @@ mod tests {
let message_id = Fr::from(0);
let signal: [u8; 32] = [0; 32];
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let merkle_proof: OptimalMerkleProof<PoseidonHash> = tree
.proof(identity_index)

View File

@@ -30,7 +30,7 @@ ark-serialize = { version = "0.5.0", default-features = false }
thiserror = "2.0.12"
# utilities
rayon = { version = "1.7.0" }
rayon = { version = "1.10.0", optional = true }
byteorder = "1.5.0"
cfg-if = "1.0"
num-bigint = { version = "0.4.6", default-features = false, features = ["std"] }
@@ -42,6 +42,7 @@ rand_chacha = "0.3.1"
ruint = { version = "1.15.0", features = ["rand", "serde", "ark-ff-04"] }
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
zeroize = "1.8"
tempfile = "3.21.0"
utils = { package = "zerokit_utils", version = "0.6.0", path = "../utils", default-features = false }
# serialization
@@ -58,6 +59,7 @@ criterion = { version = "0.7.0", features = ["html_reports"] }
default = ["parallel", "pmtree-ft"]
stateless = []
parallel = [
"rayon",
"utils/parallel",
"ark-ff/parallel",
"ark-ec/parallel",
@@ -66,10 +68,9 @@ parallel = [
"ark-groth16/parallel",
"ark-serialize/parallel",
]
fullmerkletree = []
optimalmerkletree = []
# Note: pmtree feature is still experimental
pmtree-ft = ["utils/pmtree-ft"]
fullmerkletree = [] # Pre-allocated tree, fastest access
optimalmerkletree = [] # Sparse storage, memory efficient
pmtree-ft = ["utils/pmtree-ft"] # Persistent storage, disk-based
[[bench]]
name = "pmtree_benchmark"

View File

@@ -53,11 +53,11 @@ use serde_json::json;
fn main() {
// 1. Initialize RLN with parameters:
// - the tree height;
// - the tree depth;
// - the tree config, if it is not defined, the default value will be set
let tree_height = 20;
let tree_depth = 10;
let input = Cursor::new(json!({}).to_string());
let mut rln = RLN::new(tree_height, input).unwrap();
let mut rln = RLN::new(tree_depth, input).unwrap();
// 2. Generate an identity keypair
let (identity_secret_hash, id_commitment) = keygen();
@@ -141,6 +141,10 @@ for one application to be re-used in another one.
- Browser and Node.js compatibility
- Optional parallel feature support using [wasm-bindgen-rayon](https://github.com/RReverser/wasm-bindgen-rayon)
- Headless browser testing capabilities
- **Merkle Tree Implementations**: Multiple tree variants optimized for different use cases:
- **Full Merkle Tree**: Fastest access with complete pre-allocated tree in memory. Best for frequent random access (enable with `fullmerkletree` feature).
- **Optimal Merkle Tree**: Memory-efficient sparse storage using HashMap. Ideal for partially populated trees (enable with `optimalmerkletree` feature).
- **Persistent Merkle Tree**: Disk-based storage with [sled](https://github.com/spacejam/sled) for persistence across application restarts and large datasets (enable with `pmtree-ft` feature).
## Building and Testing

View File

@@ -1,6 +1,6 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use rln::{
circuit::{Fr, TEST_TREE_HEIGHT},
circuit::{Fr, TEST_TREE_DEPTH},
hashers::PoseidonHash,
};
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleTree};
@@ -10,9 +10,9 @@ pub fn get_leaves(n: u32) -> Vec<Fr> {
}
pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
c.bench_function("OptimalMerkleTree::<Poseidon>::full_height_gen", |b| {
c.bench_function("OptimalMerkleTree::<Poseidon>::full_depth_gen", |b| {
b.iter(|| {
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
})
});
@@ -20,7 +20,7 @@ pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
for &n in [1u32, 10, 100].iter() {
let leaves = get_leaves(n);
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
group.bench_function(
BenchmarkId::new("OptimalMerkleTree::<Poseidon>::set", n),
|b| {
@@ -41,9 +41,9 @@ pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
}
pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
c.bench_function("FullMerkleTree::<Poseidon>::full_height_gen", |b| {
c.bench_function("FullMerkleTree::<Poseidon>::full_depth_gen", |b| {
b.iter(|| {
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
})
});
@@ -51,7 +51,7 @@ pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
for &n in [1u32, 10, 100].iter() {
let leaves = get_leaves(n);
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
group.bench_function(
BenchmarkId::new("FullMerkleTree::<Poseidon>::set", n),
|b| {

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -19,17 +19,17 @@ use {ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::Canonica
use crate::utils::FrOrSecret;
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/rln_final.arkzkey");
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_depth_10/rln_final.arkzkey");
#[cfg(not(target_arch = "wasm32"))]
const GRAPH_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/graph.bin");
const GRAPH_BYTES: &[u8] = include_bytes!("../../resources/tree_depth_10/graph.bin");
lazy_static! {
static ref ARKZKEY: (ProvingKey<Curve>, ConstraintMatrices<Fr>) =
read_arkzkey_from_bytes_uncompressed(ARKZKEY_BYTES).expect("Failed to read arkzkey");
}
pub const TEST_TREE_HEIGHT: usize = 20;
pub const TEST_TREE_DEPTH: usize = 10;
// The following types define the pairing friendly elliptic curve, the underlying finite fields and groups default to this module
// Note that proofs are serialized assuming Fr to be 4x8 = 32 bytes in size. Hence, changing to a curve with different encoding will make proof verification to fail

View File

@@ -19,6 +19,8 @@ pub enum ConversionError {
ToUsize(#[from] TryFromIntError),
#[error("{0}")]
FromSlice(#[from] TryFromSliceError),
#[error("Input data too short: expected at least {expected} bytes, got {actual} bytes")]
InsufficientData { expected: usize, actual: usize },
}
#[derive(Error, Debug)]

View File

@@ -2,7 +2,12 @@
use std::slice;
use crate::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
use crate::public::{
extended_key_gen as public_extended_key_gen, hash as public_hash, key_gen as public_key_gen,
poseidon_hash as public_poseidon_hash,
seeded_extended_key_gen as public_seeded_extended_key_gen,
seeded_key_gen as public_seeded_key_gen, RLN,
};
// Macro to call methods with arbitrary amount of arguments,
// First argument to the macro is context,
@@ -80,23 +85,48 @@ macro_rules! call_with_output_arg {
// Second argument is the output buffer argument
// The remaining arguments are all other inputs to the method
macro_rules! no_ctx_call_with_output_arg {
($method:ident, $output_arg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
match $method($($arg.process()),*, &mut output_data) {
Ok(()) => {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
}
Err(err) => {
std::mem::forget(output_data);
eprintln!("execution error: {err}");
false
}
($method:ident, $output_arg:expr, $input_arg:expr, $endianness_arg:expr) => {{
let mut output_data: Vec<u8> = Vec::new();
match $method(
$input_arg.process(),
&mut output_data,
$endianness_arg.process(),
) {
Ok(()) => {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
}
Err(err) => {
std::mem::forget(output_data);
eprintln!("execution error: {err}");
false
}
}
}
}};
}
// Macro to call methods with arbitrary amount of arguments,
// which are not implemented in a ctx RLN object
// First argument is the method to call
// Second argument is the output buffer argument
// The remaining arguments are all other inputs to the method
macro_rules! no_ctx_call_with_output_arg_and_endianness {
($method:ident, $output_arg:expr, $endianness_arg:expr) => {{
let mut output_data: Vec<u8> = Vec::new();
match $method(&mut output_data, $endianness_arg.process()) {
Ok(()) => {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
}
Err(err) => {
std::mem::forget(output_data);
eprintln!("execution error: {err}");
false
}
}
}};
}
// Macro to call methods with arbitrary amount of arguments,
@@ -158,6 +188,13 @@ impl ProcessArg for *mut RLN {
}
}
impl ProcessArg for bool {
type ReturnType = bool;
fn process(self) -> Self::ReturnType {
self
}
}
///// Buffer struct is taken from
///// <https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs>
/////
@@ -195,8 +232,8 @@ impl<'a> From<&Buffer> for &'a [u8] {
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[cfg(not(feature = "stateless"))]
#[no_mangle]
pub extern "C" fn new(tree_height: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
match RLN::new(tree_height, input_buffer.process()) {
pub extern "C" fn new(tree_depth: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
match RLN::new(tree_depth, input_buffer.process()) {
Ok(rln) => {
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
true
@@ -228,14 +265,14 @@ pub extern "C" fn new(ctx: *mut *mut RLN) -> bool {
#[cfg(not(feature = "stateless"))]
#[no_mangle]
pub extern "C" fn new_with_params(
tree_height: usize,
tree_depth: usize,
zkey_buffer: *const Buffer,
graph_data: *const Buffer,
tree_config: *const Buffer,
ctx: *mut *mut RLN,
) -> bool {
match RLN::new_with_params(
tree_height,
tree_depth,
zkey_buffer.process().to_vec(),
graph_data.process().to_vec(),
tree_config.process(),
@@ -280,8 +317,8 @@ pub extern "C" fn new_with_params(
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
#[cfg(not(feature = "stateless"))]
pub extern "C" fn set_tree(ctx: *mut RLN, tree_height: usize) -> bool {
call!(ctx, set_tree, tree_height)
pub extern "C" fn set_tree(ctx: *mut RLN, tree_depth: usize) -> bool {
call!(ctx, set_tree, tree_depth)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -460,38 +497,6 @@ pub extern "C" fn verify_with_roots(
////////////////////////////////////////////////////////
// Utils
////////////////////////////////////////////////////////
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn key_gen(ctx: *const RLN, output_buffer: *mut Buffer) -> bool {
call_with_output_arg!(ctx, key_gen, output_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_key_gen(
ctx: *const RLN,
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
) -> bool {
call_with_output_arg!(ctx, seeded_key_gen, output_buffer, input_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn extended_key_gen(ctx: *const RLN, output_buffer: *mut Buffer) -> bool {
call_with_output_arg!(ctx, extended_key_gen, output_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_extended_key_gen(
ctx: *const RLN,
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
) -> bool {
call_with_output_arg!(ctx, seeded_extended_key_gen, output_buffer, input_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn recover_id_secret(
@@ -534,14 +539,77 @@ pub extern "C" fn flush(ctx: *mut RLN) -> bool {
call!(ctx, flush)
}
////////////////////////////////////////////////////////
// Utils APIs
////////////////////////////////////////////////////////
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer)
pub extern "C" fn hash(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer, is_little_endian)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn poseidon_hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
no_ctx_call_with_output_arg!(public_poseidon_hash, output_buffer, input_buffer)
pub extern "C" fn poseidon_hash(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(
public_poseidon_hash,
output_buffer,
input_buffer,
is_little_endian
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn key_gen(output_buffer: *mut Buffer, is_little_endian: bool) -> bool {
no_ctx_call_with_output_arg_and_endianness!(public_key_gen, output_buffer, is_little_endian)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_key_gen(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(
public_seeded_key_gen,
output_buffer,
input_buffer,
is_little_endian
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn extended_key_gen(output_buffer: *mut Buffer, is_little_endian: bool) -> bool {
no_ctx_call_with_output_arg_and_endianness!(
public_extended_key_gen,
output_buffer,
is_little_endian
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn seeded_extended_key_gen(
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
is_little_endian: bool,
) -> bool {
no_ctx_call_with_output_arg!(
public_seeded_extended_key_gen,
output_buffer,
input_buffer,
is_little_endian
)
}

View File

@@ -1,5 +1,8 @@
/// This crate instantiates the Poseidon hash algorithm.
use crate::{circuit::Fr, utils::bytes_le_to_fr};
use crate::{
circuit::Fr,
utils::{bytes_be_to_fr, bytes_le_to_fr},
};
use once_cell::sync::Lazy;
use tiny_keccak::{Hasher, Keccak};
use utils::poseidon::Poseidon;
@@ -45,7 +48,7 @@ impl utils::merkle_tree::Hasher for PoseidonHash {
}
/// Hashes arbitrary signal to the underlying prime field.
pub fn hash_to_field(signal: &[u8]) -> Fr {
pub fn hash_to_field_le(signal: &[u8]) -> Fr {
// We hash the input signal using Keccak256
let mut hash = [0; 32];
let mut hasher = Keccak::v256();
@@ -56,3 +59,19 @@ pub fn hash_to_field(signal: &[u8]) -> Fr {
let (el, _) = bytes_le_to_fr(hash.as_ref());
el
}
/// Hashes arbitrary signal to the underlying prime field.
pub fn hash_to_field_be(signal: &[u8]) -> Fr {
// We hash the input signal using Keccak256
let mut hash = [0; 32];
let mut hasher = Keccak::v256();
hasher.update(signal);
hasher.finalize(&mut hash);
// Reverse the bytes to get big endian representation
hash.reverse();
// We export the hash as a field element
let (el, _) = bytes_be_to_fr(hash.as_ref());
el
}

View File

@@ -2,6 +2,7 @@ use serde_json::Value;
use std::fmt::Debug;
use std::path::PathBuf;
use std::str::FromStr;
use tempfile::Builder;
use crate::circuit::Fr;
use crate::hashers::{poseidon_hash, PoseidonHash};
@@ -50,16 +51,100 @@ impl Hasher for PoseidonHash {
}
}
fn get_tmp_path() -> PathBuf {
std::env::temp_dir().join(format!("pmtree-{}", rand::random::<u64>()))
fn default_tmp_path() -> PathBuf {
Builder::new()
.prefix("pmtree-")
.tempfile()
.expect("Failed to create temp file")
.into_temp_path()
.to_path_buf()
}
fn get_tmp() -> bool {
true
const DEFAULT_TEMPORARY: bool = true;
const DEFAULT_CACHE_CAPACITY: u64 = 1073741824; // 1 Gigabyte
const DEFAULT_FLUSH_EVERY_MS: u64 = 500; // 500 Milliseconds
const DEFAULT_MODE: Mode = Mode::HighThroughput;
const DEFAULT_USE_COMPRESSION: bool = false;
pub struct PmtreeConfigBuilder {
path: Option<PathBuf>,
temporary: bool,
cache_capacity: u64,
flush_every_ms: u64,
mode: Mode,
use_compression: bool,
}
impl PmtreeConfigBuilder {
fn new() -> Self {
PmtreeConfigBuilder {
path: None,
temporary: DEFAULT_TEMPORARY,
cache_capacity: DEFAULT_CACHE_CAPACITY,
flush_every_ms: DEFAULT_FLUSH_EVERY_MS,
mode: DEFAULT_MODE,
use_compression: DEFAULT_USE_COMPRESSION,
}
}
pub fn path<P: Into<PathBuf>>(mut self, path: P) -> Self {
self.path = Some(path.into());
self
}
pub fn temporary(mut self, temporary: bool) -> Self {
self.temporary = temporary;
self
}
pub fn cache_capacity(mut self, capacity: u64) -> Self {
self.cache_capacity = capacity;
self
}
pub fn flush_every_ms(mut self, ms: u64) -> Self {
self.flush_every_ms = ms;
self
}
pub fn mode(mut self, mode: Mode) -> Self {
self.mode = mode;
self
}
pub fn use_compression(mut self, compression: bool) -> Self {
self.use_compression = compression;
self
}
pub fn build(self) -> Result<PmtreeConfig, FromConfigError> {
let path = match (self.temporary, self.path) {
(true, None) => default_tmp_path(),
(false, None) => return Err(FromConfigError::MissingPath),
(true, Some(path)) if path.exists() => return Err(FromConfigError::PathExists),
(_, Some(path)) => path,
};
let config = Config::new()
.temporary(self.temporary)
.path(path)
.cache_capacity(self.cache_capacity)
.flush_every_ms(Some(self.flush_every_ms))
.mode(self.mode)
.use_compression(self.use_compression);
Ok(PmtreeConfig(config))
}
}
pub struct PmtreeConfig(Config);
impl PmtreeConfig {
pub fn builder() -> PmtreeConfigBuilder {
PmtreeConfigBuilder::new()
}
}
impl FromStr for PmtreeConfig {
type Err = FromConfigError;
@@ -78,18 +163,16 @@ impl FromStr for PmtreeConfig {
};
let use_compression = config["use_compression"].as_bool();
if temporary.is_some()
&& path.is_some()
&& temporary.unwrap()
&& path.as_ref().unwrap().exists()
{
return Err(FromConfigError::PathExists);
if let (Some(true), Some(path)) = (temporary, path.as_ref()) {
if path.exists() {
return Err(FromConfigError::PathExists);
}
}
let config = Config::new()
.temporary(temporary.unwrap_or(get_tmp()))
.path(path.unwrap_or(get_tmp_path()))
.cache_capacity(cache_capacity.unwrap_or(1024 * 1024 * 1024))
.temporary(temporary.unwrap_or(DEFAULT_TEMPORARY))
.path(path.unwrap_or(default_tmp_path()))
.cache_capacity(cache_capacity.unwrap_or(DEFAULT_CACHE_CAPACITY))
.flush_every_ms(flush_every_ms)
.mode(mode)
.use_compression(use_compression.unwrap_or(false));
@@ -99,16 +182,9 @@ impl FromStr for PmtreeConfig {
impl Default for PmtreeConfig {
fn default() -> Self {
let tmp_path = get_tmp_path();
PmtreeConfig(
Config::new()
.temporary(true)
.path(tmp_path)
.cache_capacity(150_000)
.mode(Mode::HighThroughput)
.use_compression(false)
.flush_every_ms(Some(12_000)),
)
Self::builder()
.build()
.expect("Default configuration should never fail")
}
}
impl Debug for PmtreeConfig {
@@ -398,3 +474,32 @@ impl ZerokitMerkleProof for PmTreeProof {
self.proof.compute_root_from(leaf)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pmtree_json_config() {
let json = r#"
{
"path": "pmtree-123456",
"temporary": false,
"cache_capacity": 1073741824,
"flush_every_ms": 500,
"mode": "HighThroughput",
"use_compression": false
}"#;
let _: PmtreeConfig = json.parse().expect("Failed to parse JSON config");
let _ = PmtreeConfig::builder()
.path(default_tmp_path())
.temporary(DEFAULT_TEMPORARY)
.cache_capacity(DEFAULT_CACHE_CAPACITY)
.mode(DEFAULT_MODE)
.use_compression(DEFAULT_USE_COMPRESSION)
.build()
.expect("Failed to build config");
}
}

View File

@@ -9,11 +9,12 @@ use {
use crate::circuit::{calculate_rln_witness, qap::CircomReduction, Curve};
use crate::error::{ComputeIdSecretError, ProofError, ProtocolError};
use crate::hashers::{hash_to_field, poseidon_hash};
use crate::hashers::{hash_to_field_le, poseidon_hash};
use crate::public::RLN_IDENTIFIER;
use crate::utils::{
bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, fr_byte_size, fr_to_bytes_le,
normalize_usize, to_bigint, vec_fr_to_bytes_le, vec_u8_to_bytes_le, FrOrSecret, IdSecret,
bytes_be_to_fr, bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, fr_byte_size,
fr_to_bytes_le, normalize_usize_le, to_bigint, vec_fr_to_bytes_le, vec_u8_to_bytes_le,
FrOrSecret, IdSecret,
};
use ark_bn254::{Fr, FrConfig};
use ark_ff::{AdditiveGroup, Fp, MontBackend};
@@ -71,14 +72,21 @@ pub fn deserialize_field_element(serialized: Vec<u8>) -> Fr {
element
}
pub fn deserialize_identity_pair(serialized: Vec<u8>) -> (Fr, Fr) {
pub fn deserialize_identity_pair_le(serialized: Vec<u8>) -> (Fr, Fr) {
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized);
let (id_commitment, _) = bytes_le_to_fr(&serialized[read..]);
(identity_secret_hash, id_commitment)
}
pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
pub fn deserialize_identity_pair_be(serialized: Vec<u8>) -> (Fr, Fr) {
let (identity_secret_hash, read) = bytes_be_to_fr(&serialized);
let (id_commitment, _) = bytes_be_to_fr(&serialized[read..]);
(identity_secret_hash, id_commitment)
}
pub fn deserialize_identity_tuple_le(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
let mut all_read = 0;
let (identity_trapdoor, read) = bytes_le_to_fr(&serialized[all_read..]);
@@ -100,6 +108,28 @@ pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
)
}
pub fn deserialize_identity_tuple_be(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
let mut all_read = 0;
let (identity_trapdoor, read) = bytes_be_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_nullifier, read) = bytes_be_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_secret_hash, read) = bytes_be_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_commitment, _) = bytes_be_to_fr(&serialized[all_read..]);
(
identity_trapdoor,
identity_nullifier,
identity_secret_hash,
identity_commitment,
)
}
/// Serializes witness
///
/// # Errors
@@ -223,7 +253,7 @@ pub fn proof_inputs_to_rln_witness(
let path_elements = merkle_proof.get_path_elements();
let identity_path_index = merkle_proof.get_path_index();
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
Ok((
RLNWitnessInput {
@@ -266,19 +296,19 @@ pub fn rln_witness_from_values(
})
}
pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
pub fn random_rln_witness(tree_depth: usize) -> RLNWitnessInput {
let mut rng = thread_rng();
let identity_secret = IdSecret::rand(&mut rng);
let x = hash_to_field(&rng.gen::<[u8; 32]>());
let epoch = hash_to_field(&rng.gen::<[u8; 32]>());
let rln_identifier = hash_to_field(RLN_IDENTIFIER); //hash_to_field(&rng.gen::<[u8; 32]>());
let x = hash_to_field_le(&rng.gen::<[u8; 32]>());
let epoch = hash_to_field_le(&rng.gen::<[u8; 32]>());
let rln_identifier = hash_to_field_le(RLN_IDENTIFIER);
let mut path_elements: Vec<Fr> = Vec::new();
let mut identity_path_index: Vec<u8> = Vec::new();
for _ in 0..tree_height {
path_elements.push(hash_to_field(&rng.gen::<[u8; 32]>()));
for _ in 0..tree_depth {
path_elements.push(hash_to_field_le(&rng.gen::<[u8; 32]>()));
identity_path_index.push(rng.gen_range(0..2) as u8);
}
@@ -395,11 +425,11 @@ pub fn prepare_prove_input(
let mut serialized = Vec::with_capacity(fr_byte_size() * 4 + 16 + signal.len()); // length of 4 fr elements + 16 bytes (id_index + len) + signal length
serialized.extend_from_slice(&identity_secret.to_bytes_le());
serialized.extend_from_slice(&normalize_usize(id_index));
serialized.extend_from_slice(&normalize_usize_le(id_index));
serialized.extend_from_slice(&fr_to_bytes_le(&user_message_limit));
serialized.extend_from_slice(&fr_to_bytes_le(&message_id));
serialized.extend_from_slice(&fr_to_bytes_le(&external_nullifier));
serialized.extend_from_slice(&normalize_usize(signal.len()));
serialized.extend_from_slice(&normalize_usize_le(signal.len()));
serialized.extend_from_slice(signal);
serialized
@@ -414,7 +444,7 @@ pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
let mut serialized = Vec::with_capacity(proof_data.len() + 8 + signal.len());
serialized.extend(proof_data);
serialized.extend_from_slice(&normalize_usize(signal.len()));
serialized.extend_from_slice(&normalize_usize_le(signal.len()));
serialized.extend_from_slice(signal);
serialized

View File

@@ -1,11 +1,14 @@
use crate::circuit::{zkey_from_raw, Curve, Fr};
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
use crate::protocol::{
compute_id_secret, deserialize_proof_values, deserialize_witness, extended_keygen,
extended_seeded_keygen, keygen, proof_values_from_witness, rln_witness_to_bigint_json,
rln_witness_to_json, seeded_keygen, serialize_proof_values, verify_proof,
};
use crate::utils::{bytes_le_to_fr, bytes_le_to_vec_fr, fr_byte_size, fr_to_bytes_le};
use crate::utils::{
bytes_be_to_vec_fr, bytes_le_to_fr, bytes_le_to_vec_fr, fr_byte_size, fr_to_bytes_be,
fr_to_bytes_le,
};
#[cfg(not(target_arch = "wasm32"))]
use {
crate::{
@@ -24,8 +27,8 @@ use crate::protocol::generate_proof_with_witness;
use {
crate::protocol::{proof_inputs_to_rln_witness, serialize_witness},
crate::utils::{bytes_le_to_vec_u8, vec_fr_to_bytes_le, vec_u8_to_bytes_le},
crate::{circuit::TEST_TREE_HEIGHT, poseidon_tree::PoseidonTree},
serde_json::{json, Value},
crate::{circuit::TEST_TREE_DEPTH, poseidon_tree::PoseidonTree},
serde_json::json,
std::str::FromStr,
utils::error::ZerokitMerkleTreeError,
utils::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree},
@@ -44,6 +47,39 @@ use std::io::Cursor;
/// Prevents a RLN ZK proof generated for one application to be re-used in another one.
pub const RLN_IDENTIFIER: &[u8] = b"zerokit/rln/010203040506070809";
/// Trait for inputs that can be converted to a tree configuration.
/// This allows accepting both JSON/string and direct config structs.
#[cfg(not(feature = "stateless"))]
pub trait TreeConfigInput {
/// Convert the input to a tree configuration struct.
fn into_tree_config(self) -> Result<<PoseidonTree as ZerokitMerkleTree>::Config, RLNError>;
}
#[cfg(not(feature = "stateless"))]
impl<R: Read> TreeConfigInput for R {
/// Convert the input reader into a tree configuration.
fn into_tree_config(mut self) -> Result<<PoseidonTree as ZerokitMerkleTree>::Config, RLNError> {
let mut input_buffer: Vec<u8> = Vec::new();
self.read_to_end(&mut input_buffer)?;
let config_string = String::from_utf8(input_buffer)?;
if config_string.is_empty() {
Ok(<PoseidonTree as ZerokitMerkleTree>::Config::default())
} else {
Ok(<PoseidonTree as ZerokitMerkleTree>::Config::from_str(
&config_string,
)?)
}
}
}
#[cfg(feature = "pmtree-ft")]
impl TreeConfigInput for <PoseidonTree as ZerokitMerkleTree>::Config {
fn into_tree_config(self) -> Result<<PoseidonTree as ZerokitMerkleTree>::Config, RLNError> {
Ok(self)
}
}
/// The RLN object.
///
/// It implements the methods required to update the internal Merkle Tree, generate and verify RLN ZK proofs.
@@ -62,41 +98,29 @@ impl RLN {
/// Creates a new RLN object by loading circuit resources from a folder.
///
/// Input parameters are
/// - `tree_height`: the height of the internal Merkle tree
/// - `input_data`: include `tree_config` a reader for a string containing a json with the merkle tree configuration
/// - `tree_depth`: the depth of the internal Merkle tree
/// - `input_buffer`: a reader containing JSON configuration or a direct tree configuration struct
///
/// Example:
/// ```
/// use std::io::Cursor;
///
/// let tree_height = 20;
/// let input = Cursor::new(json!({}).to_string());
/// let tree_depth = 10;
/// let input_buffer = Cursor::new(json!({}).to_string());
///
/// // We create a new RLN instance
/// let mut rln = RLN::new(tree_height, input);
/// let mut rln = RLN::new(tree_depth, input_buffer);
/// ```
#[cfg(all(not(target_arch = "wasm32"), not(feature = "stateless")))]
pub fn new<R: Read>(tree_height: usize, mut input_data: R) -> Result<RLN, RLNError> {
// We read input
let mut input: Vec<u8> = Vec::new();
input_data.read_to_end(&mut input)?;
let rln_config: Value = serde_json::from_str(&String::from_utf8(input)?)?;
let tree_config = rln_config["tree_config"].to_string();
pub fn new<T: TreeConfigInput>(tree_depth: usize, input_buffer: T) -> Result<RLN, RLNError> {
let proving_key = zkey_from_folder().to_owned();
let verification_key = proving_key.0.vk.to_owned();
let graph_data = graph_from_folder().to_owned();
let tree_config: <PoseidonTree as ZerokitMerkleTree>::Config = if tree_config.is_empty() {
<PoseidonTree as ZerokitMerkleTree>::Config::default()
} else {
<PoseidonTree as ZerokitMerkleTree>::Config::from_str(&tree_config)?
};
let tree_config = input_buffer.into_tree_config()?;
// We compute a default empty tree
let tree = PoseidonTree::new(
tree_height,
tree_depth,
<PoseidonTree as ZerokitMerkleTree>::Hasher::default_leaf(),
tree_config,
)?;
@@ -113,7 +137,6 @@ impl RLN {
/// Creates a new stateless RLN object by loading circuit resources from a folder.
///
/// Example:
///
/// ```
/// // We create a new RLN instance
/// let mut rln = RLN::new();
@@ -134,18 +157,18 @@ impl RLN {
/// Creates a new RLN object by passing circuit resources as byte vectors.
///
/// Input parameters are
/// - `tree_height`: the height of the internal Merkle tree
/// - `tree_depth`: the depth of the internal Merkle tree
/// - `zkey_vec`: a byte vector containing to the proving key (`rln_final.arkzkey`) as binary file
/// - `graph_data`: a byte vector containing the graph data (`graph.bin`) as binary file
/// - `tree_config_input`: a reader for a string containing a json with the merkle tree configuration
/// - `input_buffer`: a reader containing JSON configuration or a direct tree configuration struct
///
/// Example:
/// ```
/// use std::fs::File;
/// use std::io::Read;
///
/// let tree_height = 20;
/// let resources_folder = "./resources/tree_height_20/";
/// let tree_depth = 10;
/// let resources_folder = "./resources/tree_depth_10/";
///
/// let mut resources: Vec<Vec<u8>> = Vec::new();
/// for filename in ["rln_final.arkzkey", "graph.bin"] {
@@ -158,38 +181,29 @@ impl RLN {
/// }
///
/// let tree_config = "".to_string();
/// let tree_config_buffer = &Buffer::from(tree_config.as_bytes());
/// let input_buffer = &Buffer::from(tree_config.as_bytes());
///
/// let mut rln = RLN::new_with_params(
/// tree_height,
/// tree_depth,
/// resources[0].clone(),
/// resources[1].clone(),
/// tree_config_buffer,
/// input_buffer,
/// );
/// ```
#[cfg(all(not(target_arch = "wasm32"), not(feature = "stateless")))]
pub fn new_with_params<R: Read>(
tree_height: usize,
pub fn new_with_params<T: TreeConfigInput>(
tree_depth: usize,
zkey_vec: Vec<u8>,
graph_data: Vec<u8>,
mut tree_config_input: R,
input_buffer: T,
) -> Result<RLN, RLNError> {
let proving_key = zkey_from_raw(&zkey_vec)?;
let verification_key = proving_key.0.vk.to_owned();
let mut tree_config_vec: Vec<u8> = Vec::new();
tree_config_input.read_to_end(&mut tree_config_vec)?;
let tree_config_str = String::from_utf8(tree_config_vec)?;
let tree_config: <PoseidonTree as ZerokitMerkleTree>::Config = if tree_config_str.is_empty()
{
<PoseidonTree as ZerokitMerkleTree>::Config::default()
} else {
<PoseidonTree as ZerokitMerkleTree>::Config::from_str(&tree_config_str)?
};
let tree_config = input_buffer.into_tree_config()?;
// We compute a default empty tree
let tree = PoseidonTree::new(
tree_height,
tree_depth,
<PoseidonTree as ZerokitMerkleTree>::Hasher::default_leaf(),
tree_config,
)?;
@@ -214,7 +228,7 @@ impl RLN {
/// use std::fs::File;
/// use std::io::Read;
///
/// let resources_folder = "./resources/tree_height_20/";
/// let resources_folder = "./resources/tree_depth_10/";
///
/// let mut resources: Vec<Vec<u8>> = Vec::new();
/// for filename in ["rln_final.arkzkey", "graph.bin"] {
@@ -253,7 +267,7 @@ impl RLN {
/// use std::fs::File;
/// use std::io::Read;
///
/// let zkey_path = "./resources/tree_height_20/rln_final.arkzkey";
/// let zkey_path = "./resources/tree_depth_10/rln_final.arkzkey";
///
/// let mut file = File::open(zkey_path).expect("Failed to open file");
/// let metadata = std::fs::metadata(zkey_path).expect("Failed to read metadata");
@@ -281,11 +295,11 @@ impl RLN {
/// Leaves are set to the default value implemented in PoseidonTree implementation.
///
/// Input values are:
/// - `tree_height`: the height of the Merkle tree.
/// - `tree_depth`: the depth of the Merkle tree.
#[cfg(not(feature = "stateless"))]
pub fn set_tree(&mut self, tree_height: usize) -> Result<(), RLNError> {
// We compute a default empty tree of desired height
self.tree = PoseidonTree::default(tree_height)?;
pub fn set_tree(&mut self, tree_depth: usize) -> Result<(), RLNError> {
// We compute a default empty tree of desired depth
self.tree = PoseidonTree::default(tree_depth)?;
Ok(())
}
@@ -413,8 +427,8 @@ impl RLN {
#[cfg(not(feature = "stateless"))]
pub fn init_tree_with_leaves<R: Read>(&mut self, input_data: R) -> Result<(), RLNError> {
// reset the tree
// NOTE: this requires the tree to be initialized with the correct height initially
// TODO: accept tree_height as a parameter and initialize the tree with that height
// NOTE: this requires the tree to be initialized with the correct depth initially
// TODO: accept tree_depth as a parameter and initialize the tree with that depth
self.set_tree(self.tree.depth())?;
self.set_leaves_from(0, input_data)
}
@@ -503,12 +517,12 @@ impl RLN {
/// use rln::circuit::Fr;
/// use rln::utils::*;
///
/// let tree_height = 20;
/// let tree_depth = 10;
/// let start_index = 10;
/// let no_of_leaves = 256;
///
/// // We reset the tree
/// rln.set_tree(tree_height).unwrap();
/// rln.set_tree(tree_depth).unwrap();
///
/// // Internal Merkle tree next_index value is now 0
///
@@ -729,10 +743,12 @@ impl RLN {
////////////////////////////////////////////////////////
// zkSNARK APIs
////////////////////////////////////////////////////////
/// Computes a zkSNARK RLN proof using a [`RLNWitnessInput`].
/// Computes a zkSNARK RLN proof using a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object.
///
/// Input values are:
/// - `input_data`: a reader for the serialization of a [`RLNWitnessInput`] object, containing the public and private inputs to the ZK circuits (serialization done using [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness))
/// - `input_data`: a reader for the serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput)
/// object, containing the public and private inputs to the ZK circuits (serialization done using
/// [`serialize_witness`])
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the zkSNARK proof
@@ -741,7 +757,7 @@ impl RLN {
/// ```
/// use rln::protocol::*;
///
/// let rln_witness = random_rln_witness(tree_height);
/// let rln_witness = random_rln_witness(tree_depth);
/// let proof_values = proof_values_from_witness(&rln_witness);
///
/// // We compute a Groth16 proof
@@ -781,7 +797,7 @@ impl RLN {
/// ```
/// use rln::protocol::*;
///
/// let rln_witness = random_rln_witness(tree_height);
/// let rln_witness = random_rln_witness(tree_depth);
///
/// // We compute a Groth16 proof
/// let mut input_buffer = Cursor::new(serialize_witness(&rln_witness));
@@ -987,7 +1003,7 @@ impl RLN {
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
let verified = verify_proof(&self.verification_key, &proof, &proof_values)?;
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
// Consistency checks to counter proof tampering
Ok(verified && (self.tree.root() == proof_values.root) && (x == proof_values.x))
@@ -1071,7 +1087,7 @@ impl RLN {
let verified = verify_proof(&self.verification_key, &proof, &proof_values)?;
// First consistency checks to counter proof tampering
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let partial_result = verified && (x == proof_values.x);
// We skip root validation if proof is already invalid
@@ -1113,151 +1129,6 @@ impl RLN {
////////////////////////////////////////////////////////
// Utils
////////////////////////////////////////////////////////
/// Returns an identity secret and identity commitment pair.
///
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the identity secret and identity commitment (serialization done with `rln::utils::fr_to_bytes_le`)
///
/// Example
/// ```
/// use rln::protocol::*;
///
/// // We generate an identity pair
/// let mut buffer = Cursor::new(Vec::<u8>::new());
/// rln.key_gen(&mut buffer).unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair(buffer.into_inner());
/// ```
pub fn key_gen<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
let (identity_secret_hash, id_commitment) = keygen();
output_data.write_all(&identity_secret_hash.to_bytes_le())?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
Ok(())
}
/// Returns an identity trapdoor, nullifier, secret and commitment tuple.
///
/// The identity secret is the Poseidon hash of the identity trapdoor and identity nullifier.
///
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// Generated credentials are compatible with [Semaphore](https://semaphore.appliedzkp.org/docs/guides/identities)'s credentials.
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the identity trapdoor, identity nullifier, identity secret and identity commitment (serialization done with `rln::utils::fr_to_bytes_le`)
///
/// Example
/// ```
/// use rln::protocol::*;
///
/// // We generate an identity tuple
/// let mut buffer = Cursor::new(Vec::<u8>::new());
/// rln.extended_key_gen(&mut buffer).unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
/// ```
pub fn extended_key_gen<W: Write>(&self, mut output_data: W) -> Result<(), RLNError> {
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
extended_keygen();
output_data.write_all(&fr_to_bytes_le(&identity_trapdoor))?;
output_data.write_all(&fr_to_bytes_le(&identity_nullifier))?;
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
Ok(())
}
/// Returns an identity secret and identity commitment pair generated using a seed.
///
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// Input values are:
/// - `input_data`: a reader for the byte vector containing the seed
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the identity secret and identity commitment (serialization done with [`rln::utils::fr_to_bytes_le`](crate::utils::fr_to_bytes_le))
///
/// Example
/// ```
/// use rln::protocol::*;
///
/// let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
///
/// let mut input_buffer = Cursor::new(&seed_bytes);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair(output_buffer.into_inner());
/// ```
pub fn seeded_key_gen<R: Read, W: Write>(
&self,
mut input_data: R,
mut output_data: W,
) -> Result<(), RLNError> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (identity_secret_hash, id_commitment) = seeded_keygen(&serialized);
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
Ok(())
}
/// Returns an identity trapdoor, nullifier, secret and commitment tuple generated using a seed.
///
/// The identity secret is the Poseidon hash of the identity trapdoor and identity nullifier.
///
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// Generated credentials are compatible with [Semaphore](https://semaphore.appliedzkp.org/docs/guides/identities)'s credentials.
///
/// Input values are:
/// - `input_data`: a reader for the byte vector containing the seed
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the identity trapdoor, identity nullifier, identity secret and identity commitment (serialization done with `rln::utils::fr_to_bytes_le`)
///
/// Example
/// ```
/// use rln::protocol::*;
///
/// let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
///
/// let mut input_buffer = Cursor::new(&seed_bytes);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
/// ```
pub fn seeded_extended_key_gen<R: Read, W: Write>(
&self,
mut input_data: R,
mut output_data: W,
) -> Result<(), RLNError> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
extended_seeded_keygen(&serialized);
output_data.write_all(&fr_to_bytes_le(&identity_trapdoor))?;
output_data.write_all(&fr_to_bytes_le(&identity_nullifier))?;
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
Ok(())
}
/// Recovers the identity secret from two set of proof values computed for same secret in same epoch with same rln identifier.
///
/// Input values are:
@@ -1331,12 +1202,12 @@ impl RLN {
Ok(())
}
/// Returns the serialization of a [`RLNWitnessInput`] populated from the identity secret, the Merkle tree index, the user message limit, the message id, the external nullifier (which include epoch and rln identifier) and signal.
/// Returns the serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) populated from the identity secret, the Merkle tree index, the user message limit, the message id, the external nullifier (which include epoch and rln identifier) and signal.
///
/// Input values are:
/// - `input_data`: a reader for the serialization of `[ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]`
///
/// The function returns the corresponding [`RLNWitnessInput`] object serialized using [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness).
/// The function returns the corresponding [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object serialized using [`serialize_witness`].
#[cfg(not(feature = "stateless"))]
pub fn get_serialized_rln_witness<R: Read>(
&mut self,
@@ -1350,12 +1221,13 @@ impl RLN {
serialize_witness(&rln_witness).map_err(RLNError::Protocol)
}
/// Converts a byte serialization of a [`RLNWitnessInput`] object to the corresponding JSON serialization.
/// Converts a byte serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object to the corresponding JSON serialization.
///
/// Input values are:
/// - `serialized_witness`: the byte serialization of a [`RLNWitnessInput`] object (serialization done with [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
/// - `serialized_witness`: the byte serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput)
/// object (serialization done with [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
///
/// The function returns the corresponding JSON encoding of the input [`RLNWitnessInput`] object.
/// The function returns the corresponding JSON encoding of the input.
pub fn get_rln_witness_json(
&mut self,
serialized_witness: &[u8],
@@ -1364,13 +1236,15 @@ impl RLN {
rln_witness_to_json(&rln_witness)
}
/// Converts a byte serialization of a [`RLNWitnessInput`] object to the corresponding JSON serialization.
/// Converts a byte serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object
/// to the corresponding JSON serialization.
/// Before serialization the data will be translated into big int for further calculation in the witness calculator.
///
/// Input values are:
/// - `serialized_witness`: the byte serialization of a [`RLNWitnessInput`] object (serialization done with [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
/// - `serialized_witness`: the byte serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput)
/// object (serialization done with [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
///
/// The function returns the corresponding JSON encoding of the input [`RLNWitnessInput`] object.
/// The function returns the corresponding JSON encoding of the input [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object.
pub fn get_rln_witness_bigint_json(
&mut self,
serialized_witness: &[u8],
@@ -1394,9 +1268,9 @@ impl Default for RLN {
fn default() -> Self {
#[cfg(not(feature = "stateless"))]
{
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let buffer = Cursor::new(json!({}).to_string());
Self::new(tree_height, buffer).unwrap()
Self::new(tree_depth, buffer).unwrap()
}
#[cfg(feature = "stateless")]
Self::new().unwrap()
@@ -1428,12 +1302,18 @@ impl Default for RLN {
pub fn hash<R: Read, W: Write>(
mut input_data: R,
mut output_data: W,
is_little_endian: bool,
) -> Result<(), std::io::Error> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let hash = hash_to_field(&serialized);
output_data.write_all(&fr_to_bytes_le(&hash))?;
if is_little_endian {
let hash = hash_to_field_le(&serialized);
output_data.write_all(&fr_to_bytes_le(&hash))?;
} else {
let hash = hash_to_field_le(&serialized);
output_data.write_all(&fr_to_bytes_be(&hash))?;
}
Ok(())
}
@@ -1464,13 +1344,208 @@ pub fn hash<R: Read, W: Write>(
pub fn poseidon_hash<R: Read, W: Write>(
mut input_data: R,
mut output_data: W,
is_little_endian: bool,
) -> Result<(), RLNError> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (inputs, _) = bytes_le_to_vec_fr(&serialized)?;
let hash = utils_poseidon_hash(inputs.as_ref());
output_data.write_all(&fr_to_bytes_le(&hash))?;
if is_little_endian {
let (inputs, _) = bytes_le_to_vec_fr(&serialized)?;
let hash = utils_poseidon_hash(inputs.as_ref());
output_data.write_all(&fr_to_bytes_le(&hash))?;
} else {
let (inputs, _) = bytes_be_to_vec_fr(&serialized)?;
let hash = utils_poseidon_hash(inputs.as_ref());
output_data.write_all(&fr_to_bytes_be(&hash))?;
}
Ok(())
}
/// Generate an identity which is composed of an identity secret and identity commitment.
/// The identity secret is a random field element.
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// # Inputs
///
/// - `output_data`: a writer receiving the serialization of
/// the identity secret and identity commitment in correct endianness.
/// - `is_little_endian`: a boolean indicating whether the identity secret and identity commitment
/// should be serialized in little endian or big endian.
///
/// # Example
/// ```
/// use rln::protocol::*;
///
/// // We generate an identity pair
/// let mut buffer = Cursor::new(Vec::<u8>::new());
/// let is_little_endian = true;
/// key_gen(&mut buffer, is_little_endian).unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair_le(buffer.into_inner());
/// ```
pub fn key_gen<W: Write>(mut output_data: W, is_little_endian: bool) -> Result<(), RLNError> {
let (identity_secret_hash, id_commitment) = keygen();
if is_little_endian {
output_data.write_all(&identity_secret_hash.to_bytes_le())?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
} else {
output_data.write_all(&identity_secret_hash.to_bytes_be())?;
output_data.write_all(&fr_to_bytes_be(&id_commitment))?;
}
Ok(())
}
/// Generate an identity which is composed of an identity trapdoor, nullifier, secret and commitment.
/// The identity secret is the Poseidon hash of the identity trapdoor and identity nullifier.
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// # Inputs
///
/// - `output_data`: a writer receiving the serialization of
/// the identity trapdoor, nullifier, secret and commitment in correct endianness.
/// - `is_little_endian`: a boolean indicating whether the identity trapdoor, nullifier, secret and commitment
/// should be serialized in little endian or big endian.
///
/// Generated credentials are compatible with
/// [Semaphore](https://semaphore.appliedzkp.org/docs/guides/identities)'s credentials.
///
/// # Example
/// ```
/// use rln::protocol::*;
///
/// // We generate an identity tuple
/// let mut buffer = Cursor::new(Vec::<u8>::new());
/// let is_little_endian = true;
/// extended_key_gen(&mut buffer, is_little_endian).unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment)
/// = deserialize_identity_tuple_le(buffer.into_inner());
/// ```
pub fn extended_key_gen<W: Write>(
mut output_data: W,
is_little_endian: bool,
) -> Result<(), RLNError> {
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
extended_keygen();
if is_little_endian {
output_data.write_all(&fr_to_bytes_le(&identity_trapdoor))?;
output_data.write_all(&fr_to_bytes_le(&identity_nullifier))?;
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
} else {
output_data.write_all(&fr_to_bytes_be(&identity_trapdoor))?;
output_data.write_all(&fr_to_bytes_be(&identity_nullifier))?;
output_data.write_all(&fr_to_bytes_be(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_be(&id_commitment))?;
}
Ok(())
}
/// Generate an identity which is composed of an identity secret and identity commitment using a seed.
/// The identity secret is a random field element,
/// where RNG is instantiated using 20 rounds of ChaCha seeded with the hash of the input.
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// # Inputs
///
/// - `input_data`: a reader for the byte vector containing the seed
/// - `output_data`: a writer receiving the serialization of
/// the identity secret and identity commitment in correct endianness.
/// - `is_little_endian`: a boolean indicating whether the identity secret and identity commitment
/// should be serialized in little endian or big endian.
///
/// # Example
/// ```
/// use rln::protocol::*;
///
/// let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
///
/// let mut input_buffer = Cursor::new(&seed_bytes);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// let is_little_endian = true;
/// seeded_key_gen(&mut input_buffer, &mut output_buffer, is_little_endian).unwrap();
///
///
/// // We serialize_compressed the keygen output
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair_le(output_buffer.into_inner());
/// ```
pub fn seeded_key_gen<R: Read, W: Write>(
mut input_data: R,
mut output_data: W,
is_little_endian: bool,
) -> Result<(), RLNError> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (identity_secret_hash, id_commitment) = seeded_keygen(&serialized);
if is_little_endian {
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
} else {
output_data.write_all(&fr_to_bytes_be(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_be(&id_commitment))?;
}
Ok(())
}
/// Generate an identity which is composed of an identity trapdoor, nullifier, secret and commitment using a seed.
/// The identity trapdoor and nullifier are random field elements,
/// where RNG is instantiated using 20 rounds of ChaCha seeded with the hash of the input.
/// The identity secret is the Poseidon hash of the identity trapdoor and identity nullifier.
/// The identity commitment is the Poseidon hash of the identity secret.
///
/// # Inputs
///
/// - `input_data`: a reader for the byte vector containing the seed
/// - `output_data`: a writer receiving the serialization of
/// the identity trapdoor, nullifier, secret and commitment in correct endianness.
/// - `is_little_endian`: a boolean indicating whether the identity trapdoor, nullifier, secret and commitment
/// should be serialized in little endian or big endian.
///
/// Generated credentials are compatible with
/// [Semaphore](https://semaphore.appliedzkp.org/docs/guides/identities)'s credentials.
///
/// # Example
/// ```
/// use rln::protocol::*;
///
/// let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
///
/// let mut input_buffer = Cursor::new(&seed_bytes);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// let is_little_endian = true;
/// seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, is_little_endian).unwrap();
///
/// // We serialize_compressed the keygen output
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
/// ```
pub fn seeded_extended_key_gen<R: Read, W: Write>(
mut input_data: R,
mut output_data: W,
is_little_endian: bool,
) -> Result<(), RLNError> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
extended_seeded_keygen(&serialized);
if is_little_endian {
output_data.write_all(&fr_to_bytes_le(&identity_trapdoor))?;
output_data.write_all(&fr_to_bytes_le(&identity_nullifier))?;
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
} else {
output_data.write_all(&fr_to_bytes_be(&identity_trapdoor))?;
output_data.write_all(&fr_to_bytes_be(&identity_nullifier))?;
output_data.write_all(&fr_to_bytes_be(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_be(&id_commitment))?;
}
Ok(())
}

View File

@@ -1,4 +1,4 @@
use crate::circuit::TEST_TREE_HEIGHT;
use crate::circuit::TEST_TREE_DEPTH;
use crate::protocol::{
proof_values_from_witness, random_rln_witness, serialize_proof_values, serialize_witness,
verify_proof, RLNProofValues,
@@ -53,7 +53,7 @@ fn value_to_string_vec(value: &Value) -> Vec<String> {
#[test]
fn test_groth16_proof_hardcoded() {
#[cfg(not(feature = "stateless"))]
let rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
let rln = RLN::new(TEST_TREE_DEPTH, generate_input_buffer()).unwrap();
#[cfg(feature = "stateless")]
let rln = RLN::new().unwrap();
@@ -133,15 +133,15 @@ fn test_groth16_proof_hardcoded() {
#[test]
// This test is similar to the one in lib, but uses only public API
fn test_groth16_proof() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
#[cfg(not(feature = "stateless"))]
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
#[cfg(feature = "stateless")]
let mut rln = RLN::new().unwrap();
// Note: we only test Groth16 proof generation, so we ignore setting the tree in the RLN object
let rln_witness = random_rln_witness(tree_height);
let rln_witness = random_rln_witness(tree_depth);
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
// We compute a Groth16 proof
@@ -171,12 +171,14 @@ fn test_groth16_proof() {
#[cfg(not(feature = "stateless"))]
mod tree_test {
use crate::circuit::{Fr, TEST_TREE_HEIGHT};
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
use crate::circuit::{Fr, TEST_TREE_DEPTH};
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
use crate::pm_tree_adapter::PmtreeConfig;
use crate::protocol::*;
use crate::public::RLN;
use crate::public::{TreeConfigInput, RLN};
use crate::utils::*;
use ark_serialize::Read;
use serde_json::json;
use std::io::Cursor;
use utils::ZerokitMerkleTree;
@@ -186,7 +188,7 @@ mod tree_test {
#[test]
// We test merkle batch Merkle tree additions
fn test_merkle_operations() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -197,7 +199,7 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We first add leaves one by one specifying the index
for (i, leaf) in leaves.iter().enumerate() {
@@ -214,7 +216,7 @@ mod tree_test {
let (root_single, _) = bytes_le_to_fr(&buffer.into_inner());
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
// We add leaves one by one using the internal index (new leaves goes in next available position)
for leaf in &leaves {
@@ -233,7 +235,7 @@ mod tree_test {
assert_eq!(root_single, root_next);
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -263,7 +265,7 @@ mod tree_test {
let (root_delete, _) = bytes_le_to_fr(&buffer.into_inner());
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
let mut buffer = Cursor::new(Vec::<u8>::new());
rln.get_root(&mut buffer).unwrap();
@@ -276,7 +278,7 @@ mod tree_test {
// We test leaf setting with a custom index, to enable batch updates to the root
// Uses `set_leaves_from` to set leaves in a batch, from index `start_index`
fn test_leaf_setting_with_index() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -291,7 +293,7 @@ mod tree_test {
let set_index = rng.gen_range(0..no_of_leaves) as usize;
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -305,7 +307,7 @@ mod tree_test {
rln.get_root(&mut buffer).unwrap();
let (root_batch_with_init, _) = bytes_le_to_fr(&buffer.into_inner());
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
// `init_tree_with_leaves` resets the tree to the depth it was initialized with, using `set_tree`
// We add leaves in a batch starting from index 0..set_index
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]));
@@ -326,7 +328,7 @@ mod tree_test {
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
// We reset the tree to default
rln.set_tree(tree_height).unwrap();
rln.set_tree(tree_depth).unwrap();
// We add leaves one by one using the internal index (new leaves goes in next available position)
for leaf in &leaves {
@@ -350,7 +352,7 @@ mod tree_test {
#[test]
// Tests the atomic_operation fn, which set_leaves_from uses internally
fn test_atomic_operation() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -361,7 +363,7 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -399,7 +401,7 @@ mod tree_test {
#[test]
fn test_atomic_operation_zero_indexed() {
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -410,7 +412,7 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -443,7 +445,7 @@ mod tree_test {
#[test]
fn test_atomic_operation_consistency() {
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -454,7 +456,7 @@ mod tree_test {
}
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -494,7 +496,7 @@ mod tree_test {
#[test]
// This test checks if `set_leaves_from` throws an error when the index is out of bounds
fn test_set_leaves_bad_index() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -503,10 +505,10 @@ mod tree_test {
for _ in 0..no_of_leaves {
leaves.push(Fr::rand(&mut rng));
}
let bad_index = (1 << tree_height) - rng.gen_range(0..no_of_leaves) as usize;
let bad_index = (1 << tree_depth) - rng.gen_range(0..no_of_leaves) as usize;
// We create a new tree
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// Get root of empty tree
let mut buffer = Cursor::new(Vec::<u8>::new());
@@ -534,9 +536,9 @@ mod tree_test {
#[test]
fn test_get_leaf() {
// We generate a random tree
let tree_height = 10;
let tree_depth = 10;
let mut rng = thread_rng();
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We generate a random leaf
let leaf = Fr::rand(&mut rng);
@@ -559,9 +561,9 @@ mod tree_test {
#[test]
fn test_valid_metadata() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
let arbitrary_metadata: &[u8] = b"block_number:200000";
rln.set_metadata(arbitrary_metadata).unwrap();
@@ -575,9 +577,9 @@ mod tree_test {
#[test]
fn test_empty_metadata() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
let mut buffer = Cursor::new(Vec::<u8>::new());
rln.get_metadata(&mut buffer).unwrap();
@@ -588,7 +590,7 @@ mod tree_test {
#[test]
fn test_rln_proof() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -601,7 +603,7 @@ mod tree_test {
}
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -622,9 +624,9 @@ mod tree_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -662,7 +664,7 @@ mod tree_test {
#[test]
fn test_rln_with_witness() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -673,7 +675,7 @@ mod tree_test {
}
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -694,9 +696,9 @@ mod tree_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -745,7 +747,7 @@ mod tree_test {
#[test]
fn proof_verification_with_roots() {
// The first part is similar to test_rln_with_witness
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
let no_of_leaves = 256;
// We generate a vector of random leaves
@@ -756,7 +758,7 @@ mod tree_test {
}
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
@@ -777,9 +779,9 @@ mod tree_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -847,10 +849,10 @@ mod tree_test {
#[test]
fn test_recover_id_secret() {
let tree_height = TEST_TREE_HEIGHT;
let tree_depth = TEST_TREE_DEPTH;
// We create a new RLN instance
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
// Generate identity pair
let (identity_secret_hash, id_commitment) = keygen();
@@ -869,9 +871,9 @@ mod tree_test {
let signal2: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -989,12 +991,49 @@ mod tree_test {
*identity_secret_hash_new
);
}
#[test]
fn test_tree_config_input_trait() {
let empty_json_input = generate_input_buffer();
let rln_with_empty_json_config = RLN::new(TEST_TREE_DEPTH, empty_json_input);
assert!(rln_with_empty_json_config.is_ok());
let json_config = json!({
"tree_config": {
"path": "pmtree-123456",
"temporary": false,
"cache_capacity": 1073741824,
"flush_every_ms": 500,
"mode": "HighThroughput",
"use_compression": false
}
});
let json_input = Cursor::new(json_config.to_string());
let rln_with_json_config = RLN::new(TEST_TREE_DEPTH, json_input.clone());
assert!(rln_with_json_config.is_ok());
let json_to_tree_config = json_input.into_tree_config();
assert!(json_to_tree_config.is_ok());
let rln_with_json_to_tree_config = RLN::new(TEST_TREE_DEPTH, json_to_tree_config.unwrap());
assert!(rln_with_json_to_tree_config.is_ok());
let default_pmtree_config = PmtreeConfig::default();
let rln_with_default_tree_config = RLN::new(TEST_TREE_DEPTH, default_pmtree_config);
assert!(rln_with_default_tree_config.is_ok());
let custom_pmtree_config = PmtreeConfig::builder()
.temporary(true)
.use_compression(false)
.build();
let rln_with_custom_tree_config = RLN::new(TEST_TREE_DEPTH, custom_pmtree_config.unwrap());
assert!(rln_with_custom_tree_config.is_ok());
}
}
#[cfg(feature = "stateless")]
mod stateless_test {
use crate::circuit::{Fr, TEST_TREE_HEIGHT};
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, PoseidonHash};
use crate::circuit::{Fr, TEST_TREE_DEPTH};
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash, PoseidonHash};
use crate::protocol::*;
use crate::public::RLN;
use crate::utils::*;
@@ -1013,7 +1052,7 @@ mod stateless_test {
let default_leaf = Fr::from(0);
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
@@ -1033,15 +1072,15 @@ mod stateless_test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
let rln_witness = rln_witness_from_values(
@@ -1111,7 +1150,7 @@ mod stateless_test {
let default_leaf = Fr::from(0);
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
@@ -1124,18 +1163,18 @@ mod stateless_test {
tree.update_next(rate_commitment).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We generate a random signal
let mut rng = thread_rng();
let signal1: [u8; 32] = rng.gen();
let x1 = hash_to_field(&signal1);
let x1 = hash_to_field_le(&signal1);
let signal2: [u8; 32] = rng.gen();
let x2 = hash_to_field(&signal2);
let x2 = hash_to_field_le(&signal2);
let identity_index = tree.leaves_set();
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
@@ -1203,7 +1242,7 @@ mod stateless_test {
tree.update_next(rate_commitment_new).unwrap();
let signal3: [u8; 32] = rng.gen();
let x3 = hash_to_field(&signal3);
let x3 = hash_to_field_le(&signal3);
let identity_index_new = tree.leaves_set();
let merkle_proof_new = tree.proof(identity_index_new).expect("proof should exist");

View File

@@ -53,6 +53,15 @@ pub fn bytes_le_to_fr(input: &[u8]) -> (Fr, usize) {
)
}
#[inline(always)]
pub fn bytes_be_to_fr(input: &[u8]) -> (Fr, usize) {
let el_size = fr_byte_size();
(
Fr::from(BigUint::from_bytes_be(&input[0..el_size])),
el_size,
)
}
#[inline(always)]
pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
let input_biguint: BigUint = (*input).into();
@@ -62,6 +71,19 @@ pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
res
}
#[inline(always)]
pub fn fr_to_bytes_be(input: &Fr) -> Vec<u8> {
let input_biguint: BigUint = (*input).into();
let mut res = input_biguint.to_bytes_be();
// For BE, insert 0 at the start of the Vec (see also fr_to_bytes_le comments)
let to_insert_count = fr_byte_size().saturating_sub(res.len());
if to_insert_count > 0 {
// Insert multi 0 at index 0
res.splice(0..0, std::iter::repeat_n(0, to_insert_count));
}
res
}
#[inline(always)]
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
// Calculate capacity for Vec:
@@ -70,7 +92,7 @@ pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
let mut bytes = Vec::with_capacity(8 + input.len() * fr_byte_size());
// We store the vector length
bytes.extend_from_slice(&normalize_usize(input.len()));
bytes.extend_from_slice(&normalize_usize_le(input.len()));
// We store each element
for el in input {
@@ -80,6 +102,24 @@ pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
bytes
}
#[inline(always)]
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Vec<u8> {
// Calculate capacity for Vec:
// - 8 bytes for normalized vector length (usize)
// - each Fr element requires fr_byte_size() bytes (typically 32 bytes)
let mut bytes = Vec::with_capacity(8 + input.len() * fr_byte_size());
// We store the vector length
bytes.extend_from_slice(&normalize_usize_be(input.len()));
// We store each element
for el in input {
bytes.extend_from_slice(&fr_to_bytes_be(el));
}
bytes
}
#[inline(always)]
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
// Calculate capacity for Vec:
@@ -88,7 +128,23 @@ pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
let mut bytes = Vec::with_capacity(8 + input.len());
// We store the vector length
bytes.extend_from_slice(&normalize_usize(input.len()));
bytes.extend_from_slice(&normalize_usize_le(input.len()));
// We store the input
bytes.extend_from_slice(input);
bytes
}
#[inline(always)]
pub fn vec_u8_to_bytes_be(input: &[u8]) -> Vec<u8> {
// Calculate capacity for Vec:
// - 8 bytes for normalized vector length (usize)
// - variable length input data
let mut bytes = Vec::with_capacity(8 + input.len());
// We store the vector length
bytes.extend_from_slice(&normalize_usize_be(input.len()));
// We store the input
bytes.extend_from_slice(input);
@@ -99,58 +155,177 @@ pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
#[inline(always)]
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
read += 8;
if input.len() < 8 + len {
return Err(ConversionError::InsufficientData {
expected: 8 + len,
actual: input.len(),
});
}
let res = input[8..8 + len].to_vec();
read += res.len();
Ok((res, read))
}
#[inline(always)]
pub fn bytes_be_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
read += 8;
if input.len() < 8 + len {
return Err(ConversionError::InsufficientData {
expected: 8 + len,
actual: input.len(),
});
}
let res = input[8..8 + len].to_vec();
read += res.len();
Ok((res, read))
}
#[inline(always)]
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
read += 8;
let mut res: Vec<Fr> = Vec::with_capacity(len);
let el_size = fr_byte_size();
if input.len() < 8 + len * el_size {
return Err(ConversionError::InsufficientData {
expected: 8 + len * el_size,
actual: input.len(),
});
}
let mut res: Vec<Fr> = Vec::with_capacity(len);
for i in 0..len {
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
res.push(curr_el);
read += el_size;
}
Ok((res, read))
}
#[inline(always)]
pub fn bytes_be_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
let mut read: usize = 0;
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
read += 8;
let el_size = fr_byte_size();
if input.len() < 8 + len * el_size {
return Err(ConversionError::InsufficientData {
expected: 8 + len * el_size,
actual: input.len(),
});
}
let mut res: Vec<Fr> = Vec::with_capacity(len);
for i in 0..len {
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
res.push(curr_el);
read += el_size;
}
Ok((res, read))
}
#[inline(always)]
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let nof_elem = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
if nof_elem == 0 {
Ok(vec![])
} else {
if input.len() < 8 + nof_elem * 8 {
return Err(ConversionError::InsufficientData {
expected: 8 + nof_elem * 8,
actual: input.len(),
});
}
let elements: Vec<usize> = input[8..]
.chunks(8)
.take(nof_elem)
.map(|ch| usize::from_le_bytes(ch[0..8].try_into().unwrap()))
.collect();
Ok(elements)
}
}
#[inline(always)]
pub fn bytes_be_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
if input.len() < 8 {
return Err(ConversionError::InsufficientData {
expected: 8,
actual: input.len(),
});
}
let nof_elem = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
if nof_elem == 0 {
Ok(vec![])
} else {
if input.len() < 8 + nof_elem * 8 {
return Err(ConversionError::InsufficientData {
expected: 8 + nof_elem * 8,
actual: input.len(),
});
}
let elements: Vec<usize> = input[8..]
.chunks(8)
.take(nof_elem)
.map(|ch| usize::from_be_bytes(ch[0..8].try_into().unwrap()))
.collect();
Ok(elements)
}
}
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
/// On 32-bit systems, the result is zero-padded to 8 bytes.
/// On 64-bit systems, it directly represents the `usize` value.
#[inline(always)]
pub fn normalize_usize(input: usize) -> [u8; 8] {
pub fn normalize_usize_le(input: usize) -> [u8; 8] {
let mut bytes = [0u8; 8];
let input_bytes = input.to_le_bytes();
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
bytes
}
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
/// On 32-bit systems, the result is zero-padded to 8 bytes.
/// On 64-bit systems, it directly represents the `usize` value.
#[inline(always)]
pub fn normalize_usize_be(input: usize) -> [u8; 8] {
let mut bytes = [0u8; 8];
let input_bytes = input.to_be_bytes();
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
bytes
}
#[inline(always)] // using for test
pub fn generate_input_buffer() -> Cursor<String> {
Cursor::new(json!({}).to_string())
@@ -186,6 +361,17 @@ impl IdSecret {
Zeroizing::new(res)
}
pub(crate) fn to_bytes_be(&self) -> Zeroizing<Vec<u8>> {
let input_biguint: BigUint = self.0.into();
let mut res = input_biguint.to_bytes_be();
let to_insert_count = fr_byte_size().saturating_sub(res.len());
if to_insert_count > 0 {
// Insert multi 0 at index 0
res.splice(0..0, std::iter::repeat_n(0, to_insert_count));
}
Zeroizing::new(res)
}
/// Warning: this can leak the secret value
/// Warning: Leaked value is of type 'U256' which implement Copy (every copy will not be zeroized)
pub(crate) fn to_u256(&self) -> U256 {

View File

@@ -3,10 +3,10 @@
mod test {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::protocol::*;
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::ffi::*;
use rln::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
use rln::protocol::{deserialize_identity_tuple_le, *};
use rln::public::RLN;
use rln::utils::*;
use serde_json::json;
@@ -22,7 +22,7 @@ mod test {
let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit();
let input_config = json!({}).to_string();
let input_buffer = &Buffer::from(input_config.as_bytes());
let success = new(TEST_TREE_HEIGHT, input_buffer, rln_pointer.as_mut_ptr());
let success = new(TEST_TREE_DEPTH, input_buffer, rln_pointer.as_mut_ptr());
assert!(success, "RLN object creation failed");
unsafe { &mut *rln_pointer.assume_init() }
}
@@ -50,9 +50,9 @@ mod test {
root
}
fn identity_pair_gen(rln_pointer: &mut RLN) -> (IdSecret, Fr) {
fn identity_pair_gen() -> (IdSecret, Fr) {
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
let success = key_gen(output_buffer.as_mut_ptr(), true);
assert!(success, "key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
@@ -91,7 +91,7 @@ mod test {
let root_single = get_tree_root(rln_pointer);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We add leaves one by one using the internal index (new leaves goes in next available position)
@@ -109,7 +109,7 @@ mod test {
assert_eq!(root_single, root_next);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We add leaves in a batch into the tree
@@ -132,7 +132,7 @@ mod test {
let root_delete = get_tree_root(rln_pointer);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We get the root of the empty tree
@@ -165,7 +165,7 @@ mod test {
// We get the root of the tree obtained adding leaves in batch
let root_batch_with_init = get_tree_root(rln_pointer);
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
// `init_tree_with_leaves` resets the tree to the depth it was initialized with, using `set_tree`
// We add leaves in a batch starting from index 0..set_index
set_leaves_init(rln_pointer, &leaves[0..set_index]);
@@ -184,7 +184,7 @@ mod test {
);
// We reset the tree to default
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
assert!(success, "set tree call failed");
// We add leaves one by one using the internal index (new leaves goes in next available position)
@@ -243,7 +243,7 @@ mod test {
let rln_pointer = create_rln_instance();
let mut rng = thread_rng();
let bad_index = (1 << TEST_TREE_HEIGHT) - rng.gen_range(0..NO_OF_LEAVES) as usize;
let bad_index = (1 << TEST_TREE_DEPTH) - rng.gen_range(0..NO_OF_LEAVES) as usize;
// Get root of empty tree
let root_empty = get_tree_root(rln_pointer);
@@ -267,7 +267,7 @@ mod test {
let rln_pointer = create_rln_instance();
// generate identity
let mut identity_secret_hash_ = hash_to_field(b"test-merkle-proof");
let mut identity_secret_hash_ = hash_to_field_le(b"test-merkle-proof");
let identity_secret_hash = IdSecret::from(&mut identity_secret_hash_);
let mut to_hash = [*identity_secret_hash.clone()];
let id_commitment = utils_poseidon_hash(&to_hash);
@@ -364,7 +364,7 @@ mod test {
for _ in 0..sample_size {
// We generate random witness instances and relative proof values
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
let rln_witness = random_rln_witness(TEST_TREE_DEPTH);
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
// We prepare id_commitment and we set the leaf at provided index
@@ -414,7 +414,7 @@ mod test {
// We obtain the root from the RLN instance
let root_rln_folder = get_tree_root(rln_pointer);
let zkey_path = "./resources/tree_height_20/rln_final.arkzkey";
let zkey_path = "./resources/tree_depth_10/rln_final.arkzkey";
let mut zkey_file = File::open(zkey_path).expect("no file found");
let metadata = std::fs::metadata(zkey_path).expect("unable to read metadata");
let mut zkey_buffer = vec![0; metadata.len() as usize];
@@ -424,7 +424,7 @@ mod test {
let zkey_data = &Buffer::from(&zkey_buffer[..]);
let graph_data = "./resources/tree_height_20/graph.bin";
let graph_data = "./resources/tree_depth_10/graph.bin";
let mut graph_file = File::open(graph_data).expect("no file found");
let metadata = std::fs::metadata(graph_data).expect("unable to read metadata");
let mut graph_buffer = vec![0; metadata.len() as usize];
@@ -439,7 +439,7 @@ mod test {
let tree_config = "".to_string();
let tree_config_buffer = &Buffer::from(tree_config.as_bytes());
let success = new_with_params(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
zkey_data,
graph_data,
tree_config_buffer,
@@ -472,7 +472,7 @@ mod test {
set_leaves_init(rln_pointer, &leaves);
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let identity_index: usize = NO_OF_LEAVES;
// We generate a random signal
@@ -480,9 +480,9 @@ mod test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -539,7 +539,7 @@ mod test {
set_leaves_init(rln_pointer, &leaves);
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
let identity_index: usize = NO_OF_LEAVES;
@@ -548,9 +548,9 @@ mod test {
let signal: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -636,7 +636,7 @@ mod test {
let rln_pointer = create_rln_instance();
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let user_message_limit = Fr::from(100);
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
@@ -659,9 +659,9 @@ mod test {
let signal2: [u8; 32] = rng.gen();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let epoch = hash_to_field_le(b"test-epoch");
// We generate a random rln_identifier
let rln_identifier = hash_to_field(b"test-rln-identifier");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
// We generate a external nullifier
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
@@ -719,7 +719,7 @@ mod test {
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
// We generate a new identity pair
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen();
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
// We set as leaf id_commitment, its index would be equal to 1 since at 0 there is id_commitment
@@ -774,143 +774,10 @@ mod test {
);
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_keygen_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity pair from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_extended_keygen_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity tuple from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success =
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple(result_data);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
);
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
);
assert_eq!(
identity_trapdoor,
expected_identity_trapdoor_seed_bytes.unwrap()
);
assert_eq!(
identity_nullifier,
expected_identity_nullifier_seed_bytes.unwrap()
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_hash_to_field_ffi() {
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
// We prepare id_commitment and we set the leaf at provided index
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
// We read the returned proof and we append proof values for verify
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field(&signal);
assert_eq!(hash1, hash2);
}
#[test]
// Test Poseidon hash FFI
fn test_poseidon_hash_ffi() {
// generate random number between 1..ROUND_PARAMS.len()
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_le(&inputs);
let input_buffer = &Buffer::from(inputs_ser.as_ref());
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (received_hash, _) = bytes_le_to_fr(&result_data);
assert_eq!(received_hash, expected_hash);
}
#[test]
fn test_get_leaf_ffi() {
// We create a RLN instance
let no_of_leaves = 1 << TEST_TREE_HEIGHT;
let no_of_leaves = 1 << TEST_TREE_DEPTH;
// We create a RLN instance
let rln_pointer = create_rln_instance();
@@ -919,13 +786,12 @@ mod test {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success =
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (_, _, _, id_commitment) = deserialize_identity_tuple(result_data);
let (_, _, _, id_commitment) = deserialize_identity_tuple_le(result_data);
// We insert the id_commitment into the tree at a random index
let mut rng = thread_rng();
@@ -989,10 +855,8 @@ mod stateless_test {
use rand::Rng;
use rln::circuit::*;
use rln::ffi::generate_rln_proof_with_witness;
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{
hash_to_field, poseidon_hash as utils_poseidon_hash, PoseidonHash, ROUND_PARAMS,
};
use rln::ffi::*;
use rln::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash, PoseidonHash};
use rln::protocol::*;
use rln::public::RLN;
use rln::utils::*;
@@ -1009,9 +873,9 @@ mod stateless_test {
unsafe { &mut *rln_pointer.assume_init() }
}
fn identity_pair_gen(rln_pointer: &mut RLN) -> (IdSecret, Fr) {
fn identity_pair_gen() -> (IdSecret, Fr) {
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
let success = key_gen(output_buffer.as_mut_ptr(), true);
assert!(success, "key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
@@ -1034,7 +898,7 @@ mod stateless_test {
fn test_recover_id_secret_stateless_ffi() {
let default_leaf = Fr::from(0);
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
@@ -1043,25 +907,25 @@ mod stateless_test {
let rln_pointer = create_rln_instance();
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let user_message_limit = Fr::from(100);
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
tree.update_next(rate_commitment).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We generate two proofs using same epoch but different signals.
// We generate a random signal
let mut rng = thread_rng();
let signal1: [u8; 32] = rng.gen();
let x1 = hash_to_field(&signal1);
let x1 = hash_to_field_le(&signal1);
let signal2: [u8; 32] = rng.gen();
let x2 = hash_to_field(&signal2);
let x2 = hash_to_field_le(&signal2);
let identity_index = tree.leaves_set();
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
@@ -1124,13 +988,13 @@ mod stateless_test {
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
// We generate a new identity pair
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen();
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
tree.update_next(rate_commitment_new).unwrap();
// We generate a random signals
let signal3: [u8; 32] = rng.gen();
let x3 = hash_to_field(&signal3);
let x3 = hash_to_field_le(&signal3);
let identity_index_new = tree.leaves_set();
let merkle_proof_new = tree.proof(identity_index_new).expect("proof should exist");
@@ -1180,7 +1044,7 @@ mod stateless_test {
fn test_verify_with_roots_stateless_ffi() {
let default_leaf = Fr::from(0);
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
)
@@ -1189,7 +1053,7 @@ mod stateless_test {
let rln_pointer = create_rln_instance();
// We generate a new identity pair
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
let (identity_secret_hash, id_commitment) = identity_pair_gen();
let identity_index = tree.leaves_set();
let user_message_limit = Fr::from(100);
@@ -1197,15 +1061,15 @@ mod stateless_test {
tree.update_next(rate_commitment).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
// We generate two proofs using same epoch but different signals.
// We generate a random signal
let mut rng = thread_rng();
let signal: [u8; 32] = rng.gen();
let x = hash_to_field(&signal);
let x = hash_to_field_le(&signal);
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
@@ -1283,7 +1147,7 @@ mod stateless_test {
for _ in 0..sample_size {
// We generate random witness instances and relative proof values
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
let rln_witness = random_rln_witness(TEST_TREE_DEPTH);
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
// We prepare id_commitment and we set the leaf at provided index
@@ -1323,18 +1187,29 @@ mod stateless_test {
Duration::from_nanos((verify_time / sample_size).try_into().unwrap())
);
}
}
#[cfg(test)]
mod general_tests {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::*;
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{
hash_to_field_be, hash_to_field_le, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS,
};
use rln::protocol::*;
use rln::utils::*;
use std::mem::MaybeUninit;
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_keygen_stateless_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity pair from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = seeded_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
@@ -1358,23 +1233,47 @@ mod stateless_test {
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
fn test_seeded_keygen_big_endian_ffi() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_key_gen(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_secret_hash, read) = bytes_be_to_fr(&result_data);
let (id_commitment, _) = bytes_be_to_fr(&result_data[read..]);
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_extended_keygen_stateless_ffi() {
// We create a RLN instance
let rln_pointer = create_rln_instance();
// We generate a new identity tuple from an input seed
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success =
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple(result_data);
deserialize_identity_tuple_le(result_data);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
@@ -1409,6 +1308,50 @@ mod stateless_test {
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
fn test_seeded_extended_keygen_big_endian_ffi() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let input_buffer = &Buffer::from(seed_bytes);
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "seeded key gen call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple_be(result_data);
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
);
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
);
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
);
assert_eq!(
identity_trapdoor,
expected_identity_trapdoor_seed_bytes.unwrap()
);
assert_eq!(
identity_nullifier,
expected_identity_nullifier_seed_bytes.unwrap()
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_hash_to_field_stateless_ffi() {
@@ -1418,7 +1361,7 @@ mod stateless_test {
// We prepare id_commitment and we set the leaf at provided index
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
@@ -1426,7 +1369,25 @@ mod stateless_test {
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field(&signal);
let hash2 = hash_to_field_le(&signal);
assert_eq!(hash1, hash2);
}
#[test]
fn test_hash_to_field_big_endian_ffi() {
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
let (hash1, _) = bytes_be_to_fr(&serialized_hash);
let hash2 = hash_to_field_be(&signal);
assert_eq!(hash1, hash2);
}
@@ -1447,7 +1408,7 @@ mod stateless_test {
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr(), true);
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
@@ -1456,4 +1417,28 @@ mod stateless_test {
assert_eq!(received_hash, expected_hash);
}
#[test]
fn test_poseidon_hash_big_endian_ffi() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_be(&inputs);
let input_buffer = &Buffer::from(inputs_ser.as_ref());
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr(), false);
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (received_hash, _) = bytes_be_to_fr(&result_data);
assert_eq!(received_hash, expected_hash);
}
}

View File

@@ -8,7 +8,7 @@
mod test {
use rln::hashers::{poseidon_hash, PoseidonHash};
use rln::{
circuit::{Fr, TEST_TREE_HEIGHT},
circuit::{Fr, TEST_TREE_DEPTH},
poseidon_tree::PoseidonTree,
};
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
@@ -19,8 +19,8 @@ mod test {
let sample_size = 100;
let leaves: Vec<Fr> = (0..sample_size).map(Fr::from).collect();
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
for (i, leave) in leaves
.into_iter()

View File

@@ -4,8 +4,8 @@
mod test {
use ark_ff::BigInt;
use rln::circuit::{graph_from_folder, zkey_from_folder};
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
use rln::hashers::{hash_to_field, poseidon_hash};
use rln::circuit::{Fr, TEST_TREE_DEPTH};
use rln::hashers::{hash_to_field_le, poseidon_hash};
use rln::poseidon_tree::PoseidonTree;
use rln::protocol::{
deserialize_proof_values, deserialize_witness, generate_proof, keygen,
@@ -24,14 +24,14 @@ mod test {
let leaf_index = 3;
// generate identity
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
let identity_secret_hash = hash_to_field_le(b"test-merkle-proof");
let id_commitment = poseidon_hash(&[identity_secret_hash]);
let rate_commitment = poseidon_hash(&[id_commitment, 100.into()]);
// generate merkle tree
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
)
@@ -102,7 +102,7 @@ mod test {
//// generate merkle tree
let default_leaf = Fr::from(0);
let mut tree = PoseidonTree::new(
TEST_TREE_HEIGHT,
TEST_TREE_DEPTH,
default_leaf,
ConfigOf::<PoseidonTree>::default(),
)
@@ -112,11 +112,11 @@ mod test {
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
let signal = b"hey hey";
let x = hash_to_field(signal);
let x = hash_to_field_le(signal);
// We set the remaining values to random ones
let epoch = hash_to_field(b"test-epoch");
let rln_identifier = hash_to_field(b"test-rln-identifier");
let epoch = hash_to_field_le(b"test-epoch");
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
rln_witness_from_values(

View File

@@ -4,8 +4,9 @@ mod test {
use {
ark_ff::BigInt,
rln::{
circuit::TEST_TREE_HEIGHT,
circuit::TEST_TREE_DEPTH,
protocol::compute_tree_root,
public::RLN,
utils::{
bytes_le_to_vec_fr, bytes_le_to_vec_u8, bytes_le_to_vec_usize, fr_to_bytes_le,
generate_input_buffer, IdSecret,
@@ -17,10 +18,20 @@ mod test {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::Fr;
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::protocol::deserialize_identity_tuple;
use rln::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
use rln::utils::{bytes_le_to_fr, str_to_fr, vec_fr_to_bytes_le};
use rln::hashers::{
hash_to_field_be, hash_to_field_le, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS,
};
use rln::protocol::{
deserialize_identity_pair_be, deserialize_identity_pair_le, deserialize_identity_tuple_be,
deserialize_identity_tuple_le,
};
use rln::public::{
hash as public_hash, poseidon_hash as public_poseidon_hash, seeded_extended_key_gen,
seeded_key_gen,
};
use rln::utils::{
bytes_be_to_fr, bytes_le_to_fr, str_to_fr, vec_fr_to_bytes_be, vec_fr_to_bytes_le,
};
use std::io::Cursor;
#[test]
@@ -30,10 +41,10 @@ mod test {
let leaf_index = 3;
let user_message_limit = 1;
let mut rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
let mut rln = RLN::new(TEST_TREE_DEPTH, generate_input_buffer()).unwrap();
// generate identity
let mut identity_secret_hash_ = hash_to_field(b"test-merkle-proof");
let mut identity_secret_hash_ = hash_to_field_le(b"test-merkle-proof");
let identity_secret_hash = IdSecret::from(&mut identity_secret_hash_);
let mut to_hash = [*identity_secret_hash.clone()];
@@ -115,9 +126,9 @@ mod test {
// check subtree root computation for leaf 0 for all corresponding node until the root
let l_idx = 0;
for n in (1..=TEST_TREE_HEIGHT).rev() {
let idx_l = l_idx * (1 << (TEST_TREE_HEIGHT - n));
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_HEIGHT - n));
for n in (1..=TEST_TREE_DEPTH).rev() {
let idx_l = l_idx * (1 << (TEST_TREE_DEPTH - n));
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_DEPTH - n));
let idx_sr = idx_l;
let mut buffer = Cursor::new(Vec::<u8>::new());
@@ -149,19 +160,46 @@ mod test {
#[test]
fn test_seeded_keygen() {
let rln = RLN::default();
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
.unwrap();
seeded_key_gen(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized_output);
let (id_commitment, _) = bytes_le_to_fr(&serialized_output[read..]);
let (identity_secret_hash, id_commitment) = deserialize_identity_pair_le(serialized_output);
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
)
.unwrap();
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
}
#[test]
fn test_seeded_keygen_big_endian() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
seeded_key_gen(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_secret_hash, id_commitment) = deserialize_identity_pair_be(serialized_output);
// We check against expected values
let expected_identity_secret_hash_seed_bytes = str_to_fr(
@@ -184,19 +222,60 @@ mod test {
#[test]
fn test_seeded_extended_keygen() {
let rln = RLN::default();
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.seeded_extended_key_gen(&mut input_buffer, &mut output_buffer)
.unwrap();
seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple(serialized_output);
deserialize_identity_tuple_le(serialized_output);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
)
.unwrap();
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
)
.unwrap();
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
)
.unwrap();
assert_eq!(identity_trapdoor, expected_identity_trapdoor_seed_bytes);
assert_eq!(identity_nullifier, expected_identity_nullifier_seed_bytes);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
}
#[test]
fn test_seeded_extended_keygen_big_endian() {
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let mut input_buffer = Cursor::new(&seed_bytes);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_output = output_buffer.into_inner();
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
deserialize_identity_tuple_be(serialized_output);
// We check against expected values
let expected_identity_trapdoor_seed_bytes = str_to_fr(
@@ -237,11 +316,28 @@ mod test {
let mut input_buffer = Cursor::new(&signal);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_hash(&mut input_buffer, &mut output_buffer).unwrap();
public_hash(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
let hash2 = hash_to_field(&signal);
let hash2 = hash_to_field_le(&signal);
assert_eq!(hash1, hash2);
}
#[test]
fn test_hash_to_field_big_endian() {
let mut rng = thread_rng();
let signal: [u8; 32] = rng.gen();
let mut input_buffer = Cursor::new(&signal);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_hash(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash1, _) = bytes_be_to_fr(&serialized_hash);
let hash2 = hash_to_field_be(&signal);
assert_eq!(hash1, hash2);
}
@@ -259,10 +355,30 @@ mod test {
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs));
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_poseidon_hash(&mut input_buffer, &mut output_buffer).unwrap();
public_poseidon_hash(&mut input_buffer, &mut output_buffer, true).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash, _) = bytes_le_to_fr(&serialized_hash);
assert_eq!(hash, expected_hash);
}
#[test]
fn test_poseidon_hash_big_endian() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let expected_hash = utils_poseidon_hash(&inputs);
let mut input_buffer = Cursor::new(vec_fr_to_bytes_be(&inputs));
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_poseidon_hash(&mut input_buffer, &mut output_buffer, false).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash, _) = bytes_be_to_fr(&serialized_hash);
assert_eq!(hash, expected_hash);
}
}

411
rln/tests/utils.rs Normal file
View File

@@ -0,0 +1,411 @@
#[cfg(test)]
mod test {
use rln::utils::{
bytes_be_to_fr, bytes_be_to_vec_fr, bytes_be_to_vec_u8, bytes_be_to_vec_usize,
bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, bytes_le_to_vec_usize,
fr_to_bytes_be, fr_to_bytes_le, normalize_usize_be, normalize_usize_le, str_to_fr,
vec_fr_to_bytes_be, vec_fr_to_bytes_le, vec_u8_to_bytes_be, vec_u8_to_bytes_le,
};
use ark_std::{rand::thread_rng, UniformRand};
use rln::circuit::Fr;
#[test]
fn test_normalize_usize_le() {
// Test basic cases
assert_eq!(normalize_usize_le(0), [0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(1), [1, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(255), [255, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(256), [0, 1, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(65535), [255, 255, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_le(65536), [0, 0, 1, 0, 0, 0, 0, 0]);
// Test 32-bit boundary
assert_eq!(
normalize_usize_le(4294967295),
[255, 255, 255, 255, 0, 0, 0, 0]
);
assert_eq!(normalize_usize_le(4294967296), [0, 0, 0, 0, 1, 0, 0, 0]);
// Test maximum value
assert_eq!(
normalize_usize_le(usize::MAX),
[255, 255, 255, 255, 255, 255, 255, 255]
);
// Test that result is always 8 bytes
assert_eq!(normalize_usize_le(0).len(), 8);
assert_eq!(normalize_usize_le(usize::MAX).len(), 8);
}
#[test]
fn test_normalize_usize_be() {
// Test basic cases
assert_eq!(normalize_usize_be(0), [0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(normalize_usize_be(1), [0, 0, 0, 0, 0, 0, 0, 1]);
assert_eq!(normalize_usize_be(255), [0, 0, 0, 0, 0, 0, 0, 255]);
assert_eq!(normalize_usize_be(256), [0, 0, 0, 0, 0, 0, 1, 0]);
assert_eq!(normalize_usize_be(65535), [0, 0, 0, 0, 0, 0, 255, 255]);
assert_eq!(normalize_usize_be(65536), [0, 0, 0, 0, 0, 1, 0, 0]);
// Test 32-bit boundary
assert_eq!(
normalize_usize_be(4294967295),
[0, 0, 0, 0, 255, 255, 255, 255]
);
assert_eq!(normalize_usize_be(4294967296), [0, 0, 0, 1, 0, 0, 0, 0]);
// Test maximum value
assert_eq!(
normalize_usize_be(usize::MAX),
[255, 255, 255, 255, 255, 255, 255, 255]
);
// Test that result is always 8 bytes
assert_eq!(normalize_usize_be(0).len(), 8);
assert_eq!(normalize_usize_be(usize::MAX).len(), 8);
}
#[test]
fn test_normalize_usize_endianness() {
// Test that little-endian and big-endian produce different results for non-zero values
let test_values = vec![1, 255, 256, 65535, 65536, 4294967295, 4294967296];
for &value in &test_values {
let le_result = normalize_usize_le(value);
let be_result = normalize_usize_be(value);
// For non-zero values, LE and BE should be different
assert_ne!(
le_result, be_result,
"LE and BE should differ for value {value}"
);
// Both should be 8 bytes
assert_eq!(le_result.len(), 8);
assert_eq!(be_result.len(), 8);
}
// Zero should be the same in both endianness
assert_eq!(normalize_usize_le(0), normalize_usize_be(0));
}
#[test]
fn test_normalize_usize_roundtrip() {
// Test that we can reconstruct the original value from the normalized bytes
let test_values = vec![
0,
1,
255,
256,
65535,
65536,
4294967295,
4294967296,
usize::MAX,
];
for &value in &test_values {
let le_bytes = normalize_usize_le(value);
let be_bytes = normalize_usize_be(value);
// Reconstruct from little-endian bytes
let reconstructed_le = usize::from_le_bytes(le_bytes);
assert_eq!(
reconstructed_le, value,
"LE roundtrip failed for value {value}"
);
// Reconstruct from big-endian bytes
let reconstructed_be = usize::from_be_bytes(be_bytes);
assert_eq!(
reconstructed_be, value,
"BE roundtrip failed for value {value}"
);
}
}
#[test]
fn test_normalize_usize_edge_cases() {
// Test edge cases and boundary values
let edge_cases = vec![
0,
1,
255,
256,
65535,
65536,
16777215, // 2^24 - 1
16777216, // 2^24
4294967295, // 2^32 - 1
4294967296, // 2^32
1099511627775, // 2^40 - 1
1099511627776, // 2^40
281474976710655, // 2^48 - 1
281474976710656, // 2^48
72057594037927935, // 2^56 - 1
72057594037927936, // 2^56
usize::MAX,
];
for &value in &edge_cases {
let le_result = normalize_usize_le(value);
let be_result = normalize_usize_be(value);
// Both should be 8 bytes
assert_eq!(le_result.len(), 8);
assert_eq!(be_result.len(), 8);
// Roundtrip should work
assert_eq!(usize::from_le_bytes(le_result), value);
assert_eq!(usize::from_be_bytes(be_result), value);
}
}
#[test]
fn test_normalize_usize_architecture_independence() {
// Test that the functions work consistently regardless of the underlying architecture
// This test ensures that the functions provide consistent 8-byte output
// even on 32-bit systems where usize might be 4 bytes
let test_values = vec![0, 1, 255, 256, 65535, 65536, 4294967295, 4294967296];
for &value in &test_values {
let le_result = normalize_usize_le(value);
let be_result = normalize_usize_be(value);
// Always 8 bytes regardless of architecture
assert_eq!(le_result.len(), 8);
assert_eq!(be_result.len(), 8);
// The result should be consistent with the original value
assert_eq!(usize::from_le_bytes(le_result), value);
assert_eq!(usize::from_be_bytes(be_result), value);
}
}
#[test]
fn test_fr_serialization_roundtrip() {
let mut rng = thread_rng();
// Test multiple random Fr values
for _ in 0..10 {
let fr = Fr::rand(&mut rng);
// Test little-endian roundtrip
let le_bytes = fr_to_bytes_le(&fr);
let (reconstructed_le, _) = bytes_le_to_fr(&le_bytes);
assert_eq!(fr, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = fr_to_bytes_be(&fr);
let (reconstructed_be, _) = bytes_be_to_fr(&be_bytes);
assert_eq!(fr, reconstructed_be);
}
}
#[test]
fn test_vec_fr_serialization_roundtrip() {
let mut rng = thread_rng();
// Test with different vector sizes
for size in [0, 1, 5, 10] {
let fr_vec: Vec<Fr> = (0..size).map(|_| Fr::rand(&mut rng)).collect();
// Test little-endian roundtrip
let le_bytes = vec_fr_to_bytes_le(&fr_vec);
let (reconstructed_le, _) = bytes_le_to_vec_fr(&le_bytes).unwrap();
assert_eq!(fr_vec, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = vec_fr_to_bytes_be(&fr_vec);
let (reconstructed_be, _) = bytes_be_to_vec_fr(&be_bytes).unwrap();
assert_eq!(fr_vec, reconstructed_be);
}
}
#[test]
fn test_vec_u8_serialization_roundtrip() {
// Test with different vector sizes and content
let test_cases = vec![
vec![],
vec![0],
vec![255],
vec![1, 2, 3, 4, 5],
vec![0, 255, 128, 64, 32, 16, 8, 4, 2, 1],
(0..100).collect::<Vec<u8>>(),
];
for test_case in test_cases {
// Test little-endian roundtrip
let le_bytes = vec_u8_to_bytes_le(&test_case);
let (reconstructed_le, _) = bytes_le_to_vec_u8(&le_bytes).unwrap();
assert_eq!(test_case, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = vec_u8_to_bytes_be(&test_case);
let (reconstructed_be, _) = bytes_be_to_vec_u8(&be_bytes).unwrap();
assert_eq!(test_case, reconstructed_be);
}
}
#[test]
fn test_vec_usize_serialization_roundtrip() {
// Test with different vector sizes and content
let test_cases = vec![
vec![],
vec![0],
vec![usize::MAX],
vec![1, 2, 3, 4, 5],
vec![0, 255, 65535, 4294967295, usize::MAX],
(0..10).collect::<Vec<usize>>(),
];
for test_case in test_cases {
// Test little-endian roundtrip
let le_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_le(test_case.len()));
for &value in &test_case {
bytes.extend_from_slice(&normalize_usize_le(value));
}
bytes
};
let reconstructed_le = bytes_le_to_vec_usize(&le_bytes).unwrap();
assert_eq!(test_case, reconstructed_le);
// Test big-endian roundtrip
let be_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_be(test_case.len()));
for &value in &test_case {
bytes.extend_from_slice(&normalize_usize_be(value));
}
bytes
};
let reconstructed_be = bytes_be_to_vec_usize(&be_bytes).unwrap();
assert_eq!(test_case, reconstructed_be);
}
}
#[test]
fn test_str_to_fr() {
// Test valid hex strings
let test_cases = vec![
("0x0", 16, Fr::from(0u64)),
("0x1", 16, Fr::from(1u64)),
("0xff", 16, Fr::from(255u64)),
("0x100", 16, Fr::from(256u64)),
];
for (input, radix, expected) in test_cases {
let result = str_to_fr(input, radix).unwrap();
assert_eq!(result, expected);
}
// Test invalid inputs
assert!(str_to_fr("invalid", 16).is_err());
assert!(str_to_fr("0x", 16).is_err());
}
#[test]
fn test_endianness_differences() {
let mut rng = thread_rng();
let fr = Fr::rand(&mut rng);
// Test that LE and BE produce different byte representations
let le_bytes = fr_to_bytes_le(&fr);
let be_bytes = fr_to_bytes_be(&fr);
// They should be different (unless the value is symmetric)
if le_bytes != be_bytes {
// Verify they can both be reconstructed correctly
let (reconstructed_le, _) = bytes_le_to_fr(&le_bytes);
let (reconstructed_be, _) = bytes_be_to_fr(&be_bytes);
assert_eq!(fr, reconstructed_le);
assert_eq!(fr, reconstructed_be);
}
}
#[test]
fn test_error_handling() {
// Test with valid length but insufficient data
let valid_length_invalid_data = vec![0u8; 8]; // Length 0, but no data
assert!(bytes_le_to_vec_u8(&valid_length_invalid_data).is_ok());
assert!(bytes_be_to_vec_u8(&valid_length_invalid_data).is_ok());
assert!(bytes_le_to_vec_fr(&valid_length_invalid_data).is_ok());
assert!(bytes_be_to_vec_fr(&valid_length_invalid_data).is_ok());
assert!(bytes_le_to_vec_usize(&valid_length_invalid_data).is_ok());
assert!(bytes_be_to_vec_usize(&valid_length_invalid_data).is_ok());
// Test with reasonable length but insufficient data for vector deserialization
let reasonable_length = {
let mut bytes = vec![0u8; 8];
bytes[0] = 1; // Length 1
bytes
};
// This should fail because we don't have enough data for the vector elements
assert!(bytes_le_to_vec_u8(&reasonable_length).is_err());
assert!(bytes_be_to_vec_u8(&reasonable_length).is_err());
assert!(bytes_le_to_vec_fr(&reasonable_length).is_err());
assert!(bytes_be_to_vec_fr(&reasonable_length).is_err());
assert!(bytes_le_to_vec_usize(&reasonable_length).is_err());
assert!(bytes_be_to_vec_usize(&reasonable_length).is_err());
// Test with valid data for u8 vector
let valid_u8_data_le = {
let mut bytes = vec![0u8; 9];
bytes[..8].copy_from_slice(&(1u64.to_le_bytes())); // Length 1, little-endian
bytes[8] = 42; // One byte of data
bytes
};
let valid_u8_data_be = {
let mut bytes = vec![0u8; 9];
bytes[..8].copy_from_slice(&(1u64.to_be_bytes())); // Length 1, big-endian
bytes[8] = 42; // One byte of data
bytes
};
assert!(bytes_le_to_vec_u8(&valid_u8_data_le).is_ok());
assert!(bytes_be_to_vec_u8(&valid_u8_data_be).is_ok());
}
#[test]
fn test_empty_vectors() {
// Test empty vector serialization/deserialization
let empty_fr: Vec<Fr> = vec![];
let empty_u8: Vec<u8> = vec![];
let empty_usize: Vec<usize> = vec![];
// Test Fr vectors
let le_fr_bytes = vec_fr_to_bytes_le(&empty_fr);
let be_fr_bytes = vec_fr_to_bytes_be(&empty_fr);
let (reconstructed_le_fr, _) = bytes_le_to_vec_fr(&le_fr_bytes).unwrap();
let (reconstructed_be_fr, _) = bytes_be_to_vec_fr(&be_fr_bytes).unwrap();
assert_eq!(empty_fr, reconstructed_le_fr);
assert_eq!(empty_fr, reconstructed_be_fr);
// Test u8 vectors
let le_u8_bytes = vec_u8_to_bytes_le(&empty_u8);
let be_u8_bytes = vec_u8_to_bytes_be(&empty_u8);
let (reconstructed_le_u8, _) = bytes_le_to_vec_u8(&le_u8_bytes).unwrap();
let (reconstructed_be_u8, _) = bytes_be_to_vec_u8(&be_u8_bytes).unwrap();
assert_eq!(empty_u8, reconstructed_le_u8);
assert_eq!(empty_u8, reconstructed_be_u8);
// Test usize vectors
let le_usize_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_le(0));
bytes
};
let be_usize_bytes = {
let mut bytes = Vec::new();
bytes.extend_from_slice(&normalize_usize_be(0));
bytes
};
let reconstructed_le_usize = bytes_le_to_vec_usize(&le_usize_bytes).unwrap();
let reconstructed_be_usize = bytes_be_to_vec_usize(&be_usize_bytes).unwrap();
assert_eq!(empty_usize, reconstructed_le_usize);
assert_eq!(empty_usize, reconstructed_be_usize);
}
}

View File

@@ -14,12 +14,12 @@ bench = false
[dependencies]
ark-ff = { version = "0.5.0", default-features = false }
num-bigint = { version = "0.4.6", default-features = false }
pmtree = { package = "vacp2p_pmtree", version = "2.0.2", optional = true }
pmtree = { package = "vacp2p_pmtree", version = "2.0.3", optional = true }
sled = "0.34.7"
serde_json = "1.0.141"
lazy_static = "1.5.0"
hex = "0.4.3"
rayon = "1.7.0"
rayon = "1.10.0"
thiserror = "2.0"
[dev-dependencies]

View File

@@ -26,6 +26,8 @@ pub enum ZerokitMerkleTreeError {
pub enum FromConfigError {
#[error("Error while reading pmtree config: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Error while creating pmtree config: missing path")]
MissingPath,
#[error("Error while creating pmtree config: path already exists")]
PathExists,
}

View File

@@ -78,7 +78,7 @@ where
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
/// depth - the depth of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(
depth: usize,
default_leaf: FrOf<Self::Hasher>,

View File

@@ -70,7 +70,7 @@ where
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
/// depth - the depth of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(
depth: usize,
default_leaf: H::Fr,
@@ -296,7 +296,7 @@ where
*self
.nodes
.get(&(depth, index))
.unwrap_or_else(|| &self.cached_nodes[depth])
.unwrap_or(&self.cached_nodes[depth])
}
/// Computes the hash of a nodes two children at the given depth.