mirror of
https://github.com/vacp2p/zerokit.git
synced 2026-01-09 13:47:58 -05:00
Compare commits
147 Commits
release/v0
...
zerokit_ut
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31331f8a93 | ||
|
|
820240d8c0 | ||
|
|
fe2b224981 | ||
|
|
d3d85c3e3c | ||
|
|
0005b1d61f | ||
|
|
4931b25237 | ||
|
|
652cc3647e | ||
|
|
51939be4a8 | ||
|
|
cd60af5b52 | ||
|
|
8581ac0b78 | ||
|
|
5937a67ee6 | ||
|
|
d96eb59e92 | ||
|
|
a372053047 | ||
|
|
b450bfdb37 | ||
|
|
0521c7349e | ||
|
|
d91a5b3568 | ||
|
|
cf9dbb419d | ||
|
|
aaa12db70d | ||
|
|
30d5f94181 | ||
|
|
ccd2ead847 | ||
|
|
7669d72f9b | ||
|
|
b5760697bc | ||
|
|
5c4e3fc13c | ||
|
|
a92d6428d6 | ||
|
|
e6db05f27c | ||
|
|
25f822e779 | ||
|
|
0997d15d33 | ||
|
|
8614b2a33a | ||
|
|
b903d8d740 | ||
|
|
f73c83b571 | ||
|
|
a86b859b75 | ||
|
|
f8fc455d08 | ||
|
|
b51896c3a7 | ||
|
|
0c5ef6abcf | ||
|
|
a1c292cb2e | ||
|
|
c6c1bfde91 | ||
|
|
bf3d1d3309 | ||
|
|
7110e00674 | ||
|
|
99966d1a6e | ||
|
|
7d63912ace | ||
|
|
ef1da42d94 | ||
|
|
ecb4d9307f | ||
|
|
d1414a44c5 | ||
|
|
6d58320077 | ||
|
|
be2dccfdd0 | ||
|
|
9d4ed68450 | ||
|
|
5cf2b2e05e | ||
|
|
36158e8d08 | ||
|
|
c8cf033f32 | ||
|
|
23d2331b78 | ||
|
|
c6b7a8c0a4 | ||
|
|
4ec93c5e1f | ||
|
|
c83c9902d7 | ||
|
|
131cacab35 | ||
|
|
8a365f0c9e | ||
|
|
c561741339 | ||
|
|
90fdfb9d78 | ||
|
|
56b9285fef | ||
|
|
be88a432d7 | ||
|
|
8cfd83de54 | ||
|
|
2793fe0e24 | ||
|
|
0d35571215 | ||
|
|
9cc86e526e | ||
|
|
ecd056884c | ||
|
|
96497db7c5 | ||
|
|
ba8f011cc1 | ||
|
|
9dc92ec1ce | ||
|
|
75d760c179 | ||
|
|
72a3ce1770 | ||
|
|
b841e725a0 | ||
|
|
3177e3ae74 | ||
|
|
2c4de0484a | ||
|
|
fcd4854037 | ||
|
|
d68dc1ad8e | ||
|
|
8c3d60ed01 | ||
|
|
c2d386cb74 | ||
|
|
8f2c9e3586 | ||
|
|
584c2cf4c0 | ||
|
|
2c4b399126 | ||
|
|
c4b699ddff | ||
|
|
33d3732922 | ||
|
|
654c77dcf6 | ||
|
|
783f875d3b | ||
|
|
fd7d7d9318 | ||
|
|
4f98fd8028 | ||
|
|
9931e901e5 | ||
|
|
0fb7e0bbcb | ||
|
|
672287b77b | ||
|
|
2e868d6cbf | ||
|
|
39bea35a6d | ||
|
|
6ff4eeb237 | ||
|
|
1f983bb232 | ||
|
|
13a2c61355 | ||
|
|
2bbb710e83 | ||
|
|
8cd4baba8a | ||
|
|
9045e31006 | ||
|
|
9e44bb64dc | ||
|
|
bb7dfb80ee | ||
|
|
c319f32a1e | ||
|
|
bf2aa16a71 | ||
|
|
c423bdea61 | ||
|
|
5eb98d4b33 | ||
|
|
b698153e28 | ||
|
|
a6c8090c93 | ||
|
|
7ee7675d52 | ||
|
|
062055dc5e | ||
|
|
55b00fd653 | ||
|
|
62018b4eba | ||
|
|
48fa1b9b3d | ||
|
|
a6145ab201 | ||
|
|
e21e9954ac | ||
|
|
de5eb2066a | ||
|
|
7aba62ff51 | ||
|
|
cbf8c541c2 | ||
|
|
5bcbc6c22f | ||
|
|
01fdba6d88 | ||
|
|
1502315605 | ||
|
|
92c431c98f | ||
|
|
005393d696 | ||
|
|
89ea87a98a | ||
|
|
32f3202e9d | ||
|
|
e69f6a67d8 | ||
|
|
9e1355d36a | ||
|
|
3551435d60 | ||
|
|
60e3369621 | ||
|
|
284e51483c | ||
|
|
3427729f7e | ||
|
|
e1c16c9c3f | ||
|
|
bc69e25359 | ||
|
|
6a7808d911 | ||
|
|
25bcb7991b | ||
|
|
3d943bccb6 | ||
|
|
fba905f45d | ||
|
|
490206aa44 | ||
|
|
afa4a09bba | ||
|
|
b95b151a1c | ||
|
|
b77facc5e9 | ||
|
|
5d429ca031 | ||
|
|
1df6c53ca0 | ||
|
|
878c3c5c5f | ||
|
|
a5aa4e8d4f | ||
|
|
bbacc9dcce | ||
|
|
c42fcfe644 | ||
|
|
99a7eb003f | ||
|
|
14f41d5340 | ||
|
|
c401c0b21d | ||
|
|
4f08818d7a |
111
.github/workflows/ci.yml
vendored
111
.github/workflows/ci.yml
vendored
@@ -2,32 +2,90 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "!.github/workflows/*.yml"
|
||||
- "!rln-wasm/**"
|
||||
- "!rln/src/**"
|
||||
- "!rln/resources/**"
|
||||
- "!utils/src/**"
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "!.github/workflows/*.yml"
|
||||
- "!rln-wasm/**"
|
||||
- "!rln/src/**"
|
||||
- "!rln/resources/**"
|
||||
- "!utils/src/**"
|
||||
|
||||
name: Tests
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
crate: [rln, utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Update git submodules
|
||||
run: git submodule update --init --recursive
|
||||
- name: cargo test
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cargo-make test
|
||||
run: |
|
||||
cargo test
|
||||
cargo make test --release
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
rln-wasm:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test - rln-wasm - ${{ matrix.platform }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install Dependencies
|
||||
run: make installdeps
|
||||
- name: Install wasm-pack
|
||||
uses: jetli/wasm-pack-action@v0.3.0
|
||||
- run: cargo make build
|
||||
working-directory: rln-wasm
|
||||
- run: cargo make test --release
|
||||
working-directory: rln-wasm
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# we run lint tests only on ubuntu
|
||||
platform: [ubuntu-latest]
|
||||
crate: [rln, utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: lint - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@@ -35,11 +93,38 @@ jobs:
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Update git submodules
|
||||
run: git submodule update --init --recursive
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install Dependencies
|
||||
run: make installdeps
|
||||
- name: cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
if: success() || failure()
|
||||
run: cargo fmt -- --check
|
||||
working-directory: ${{ matrix.crate }}
|
||||
- name: cargo clippy
|
||||
run: cargo clippy
|
||||
if: success() || failure()
|
||||
run: |
|
||||
cargo clippy --release -- -D warnings
|
||||
working-directory: ${{ matrix.crate }}
|
||||
# We skip clippy on rln-wasm, since wasm target is managed by cargo make
|
||||
# Currently not treating warnings as error, too noisy
|
||||
# -- -D warnings
|
||||
benchmark:
|
||||
# run only in pull requests
|
||||
if: github.event_name == 'pull_request'
|
||||
strategy:
|
||||
matrix:
|
||||
# we run benchmark tests only on ubuntu
|
||||
platform: [ubuntu-latest]
|
||||
crate: [rln, utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: benchmark - ${{ matrix.platform }} - ${{ matrix.crate }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: boa-dev/criterion-compare-action@v3
|
||||
with:
|
||||
branchName: ${{ github.base_ref }}
|
||||
cwd: ${{ matrix.crate }}
|
||||
|
||||
150
.github/workflows/nightly-release.yml
vendored
Normal file
150
.github/workflows/nightly-release.yml
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
name: Nightly build
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
strategy:
|
||||
matrix:
|
||||
feature: ["default", "arkzkey"]
|
||||
target:
|
||||
- x86_64-unknown-linux-gnu
|
||||
- aarch64-unknown-linux-gnu
|
||||
- i686-unknown-linux-gnu
|
||||
name: Linux build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: ${{ matrix.target }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cross build
|
||||
run: |
|
||||
cross build --release --target ${{ matrix.target }} --features ${{ matrix.feature }} --workspace --exclude rln-wasm
|
||||
mkdir release
|
||||
cp target/${{ matrix.target }}/release/librln* release/
|
||||
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
|
||||
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ matrix.target }}-${{ matrix.feature }}-archive
|
||||
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
macos:
|
||||
name: MacOS build
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
feature: ["default", "arkzkey"]
|
||||
target:
|
||||
- x86_64-apple-darwin
|
||||
- aarch64-apple-darwin
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: ${{ matrix.target }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cross build
|
||||
run: |
|
||||
cross build --release --target ${{ matrix.target }} --features ${{ matrix.feature }} --workspace --exclude rln-wasm
|
||||
mkdir release
|
||||
cp target/${{ matrix.target }}/release/librln* release/
|
||||
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
|
||||
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ matrix.target }}-${{ matrix.feature }}-archive
|
||||
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
browser-rln-wasm:
|
||||
name: Browser build (RLN WASM)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: Install wasm-pack
|
||||
uses: jetli/wasm-pack-action@v0.3.0
|
||||
- name: cross make build
|
||||
run: |
|
||||
cross make build
|
||||
mkdir release
|
||||
cp pkg/** release/
|
||||
tar -czvf browser-rln-wasm.tar.gz release/
|
||||
working-directory: rln-wasm
|
||||
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: browser-rln-wasm-archive
|
||||
path: rln-wasm/browser-rln-wasm.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
prepare-prerelease:
|
||||
name: Prepare pre-release
|
||||
needs: [linux, macos, browser-rln-wasm]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: master
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
|
||||
- name: Delete tag
|
||||
uses: dev-drprasad/delete-tag-and-release@v0.2.1
|
||||
with:
|
||||
delete_release: true
|
||||
tag_name: nightly
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create prerelease
|
||||
run: |
|
||||
start_tag=$(gh release list -L 2 --exclude-drafts | grep -v nightly | cut -d$'\t' -f3 | sed -n '1p')
|
||||
gh release create nightly --prerelease --target master \
|
||||
--title 'Nightly build ("master" branch)' \
|
||||
--generate-notes \
|
||||
--draft=false \
|
||||
--notes-start-tag $start_tag \
|
||||
*-archive/*.tar.gz \
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete artifacts
|
||||
uses: geekyeggo/delete-artifact@v1
|
||||
with:
|
||||
failOnError: false
|
||||
name: |
|
||||
*-archive
|
||||
2
.github/workflows/sync-labels.yml
vendored
2
.github/workflows/sync-labels.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: micnncim/action-label-syncer@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -2,15 +2,13 @@
|
||||
.idea
|
||||
*.log
|
||||
tmp/
|
||||
rln/pmtree_db
|
||||
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
wabt/
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
8
.gitmodules
vendored
8
.gitmodules
vendored
@@ -1,8 +0,0 @@
|
||||
[submodule "rln/vendor/rln"]
|
||||
path = rln/vendor/rln
|
||||
ignore = dirty
|
||||
url = https://github.com/Rate-Limiting-Nullifier/rln_circuits
|
||||
[submodule "semaphore/vendor/semaphore"]
|
||||
path = semaphore/vendor/semaphore
|
||||
ignore = dirty
|
||||
url = https://github.com/appliedzkp/semaphore.git
|
||||
28
CHANGELOG.md
Normal file
28
CHANGELOG.md
Normal file
@@ -0,0 +1,28 @@
|
||||
## 2023-02-28 v0.2
|
||||
|
||||
This release contains:
|
||||
|
||||
- Improved code quality
|
||||
- Allows consumers of zerokit RLN to set leaves to the Merkle Tree from an arbitrary index. Useful for batching updates to the Merkle Tree.
|
||||
- Improved performance for proof generation and verification
|
||||
- rln_wasm which allows for the consumption of RLN through a WebAssembly interface
|
||||
- Refactored to generate Semaphore-compatible credentials
|
||||
- Dual License under Apache 2.0 and MIT
|
||||
- RLN compiles as a static library, which can be consumed through a C FFI
|
||||
|
||||
|
||||
## 2022-09-19 v0.1
|
||||
|
||||
Initial beta release.
|
||||
|
||||
This release contains:
|
||||
|
||||
- RLN Module with API to manage, compute and verify [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and RLN primitives.
|
||||
- This can be consumed either as a Rust API or as a C FFI. The latter means it can be easily consumed through other environments, such as [Go](https://github.com/status-im/go-zerokit-rln/blob/master/rln/librln.h) or [Nim](https://github.com/status-im/nwaku/blob/4745c7872c69b5fd5c6ddab36df9c5c3d55f57c3/waku/v2/protocol/waku_rln_relay/waku_rln_relay_types.nim).
|
||||
|
||||
It also contains the following examples and experiments:
|
||||
|
||||
- Basic [example wrapper](https://github.com/vacp2p/zerokit/tree/master/multiplier) around a simple Circom circuit to show Circom integration through ark-circom and FFI.
|
||||
- Experimental [Semaphore wrapper](https://github.com/vacp2p/zerokit/tree/master/semaphore).
|
||||
|
||||
Feedback welcome! You can either [open an issue](https://github.com/vacp2p/zerokit/issues) or come talk to us in our [Vac Discord](https://discord.gg/PQFdubGt6d) #zerokit channel.
|
||||
3582
Cargo.lock
generated
Normal file
3582
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@@ -1,6 +1,14 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"multiplier",
|
||||
"semaphore",
|
||||
"rln",
|
||||
]
|
||||
members = ["rln", "rln-cli", "rln-wasm", "utils"]
|
||||
default-members = ["rln", "rln-cli", "utils"]
|
||||
resolver = "2"
|
||||
|
||||
# Compilation profile for any non-workspace member.
|
||||
# Dependencies are optimized, even in a dev build. This improves dev performance
|
||||
# while having neglible impact on incremental build times.
|
||||
[profile.dev.package."*"]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release.package."rln-wasm"]
|
||||
# Tell `rustc` to optimize for small code size.
|
||||
opt-level = "s"
|
||||
|
||||
35
Cross.toml
Normal file
35
Cross.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
[target.x86_64-pc-windows-gnu]
|
||||
image = "ghcr.io/cross-rs/x86_64-pc-windows-gnu:latest"
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:latest"
|
||||
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/x86_64-unknown-linux-gnu:latest"
|
||||
|
||||
[target.arm-unknown-linux-gnueabi]
|
||||
image = "ghcr.io/cross-rs/arm-unknown-linux-gnueabi:latest"
|
||||
|
||||
[target.i686-pc-windows-gnu]
|
||||
image = "ghcr.io/cross-rs/i686-pc-windows-gnu:latest"
|
||||
|
||||
[target.i686-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/i686-unknown-linux-gnu:latest"
|
||||
|
||||
[target.arm-unknown-linux-gnueabihf]
|
||||
image = "ghcr.io/cross-rs/arm-unknown-linux-gnueabihf:latest"
|
||||
|
||||
[target.mips-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/mips-unknown-linux-gnu:latest"
|
||||
|
||||
[target.mips64-unknown-linux-gnuabi64]
|
||||
image = "ghcr.io/cross-rs/mips64-unknown-linux-gnuabi64:latest"
|
||||
|
||||
[target.mips64el-unknown-linux-gnuabi64]
|
||||
image = "ghcr.io/cross-rs/mips64el-unknown-linux-gnuabi64:latest"
|
||||
|
||||
[target.mipsel-unknown-linux-gnu]
|
||||
image = "ghcr.io/cross-rs/mipsel-unknown-linux-gnu:latest"
|
||||
|
||||
[target.aarch64-linux-android]
|
||||
image = "ghcr.io/cross-rs/aarch64-linux-android:edge"
|
||||
203
LICENSE-APACHE
Normal file
203
LICENSE-APACHE
Normal file
@@ -0,0 +1,203 @@
|
||||
Copyright (c) 2022 Vac Research
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
25
LICENSE-MIT
Normal file
25
LICENSE-MIT
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright (c) 2022 Vac Research
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE O THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
37
Makefile
Normal file
37
Makefile
Normal file
@@ -0,0 +1,37 @@
|
||||
.PHONY: all installdeps build test clean
|
||||
|
||||
all: .pre-build build
|
||||
|
||||
.fetch-submodules:
|
||||
@git submodule update --init --recursive
|
||||
|
||||
.pre-build: .fetch-submodules
|
||||
@cargo install cargo-make
|
||||
ifdef CI
|
||||
@cargo install cross --git https://github.com/cross-rs/cross.git --rev 1511a28
|
||||
endif
|
||||
|
||||
installdeps: .pre-build
|
||||
ifeq ($(shell uname),Darwin)
|
||||
# commented due to https://github.com/orgs/Homebrew/discussions/4612
|
||||
# @brew update
|
||||
@brew install cmake ninja
|
||||
else ifeq ($(shell uname),Linux)
|
||||
@sudo apt-get update
|
||||
@sudo apt-get install -y cmake ninja-build
|
||||
endif
|
||||
@git -C "wabt" pull || git clone --recursive https://github.com/WebAssembly/wabt.git "wabt"
|
||||
@cd wabt && mkdir -p build && cd build && cmake .. -GNinja && ninja && sudo ninja install
|
||||
@which wasm-pack || cargo install wasm-pack
|
||||
# nvm already checks if it's installed, and no-ops if it is
|
||||
@curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
|
||||
@. ${HOME}/.nvm/nvm.sh && nvm install 18.20.2 && nvm use 18.20.2;
|
||||
|
||||
build: .pre-build
|
||||
@cargo make build
|
||||
|
||||
test: .pre-build
|
||||
@cargo make test
|
||||
|
||||
clean:
|
||||
@cargo clean
|
||||
2
Makefile.toml
Normal file
2
Makefile.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[env]
|
||||
CARGO_MAKE_EXTEND_WORKSPACE_MAKEFILE = true
|
||||
26
README.md
26
README.md
@@ -17,3 +17,29 @@ in Rust.
|
||||
- [RLN library](https://github.com/kilic/rln) written in Rust based on Bellman.
|
||||
|
||||
- [semaphore-rs](https://github.com/worldcoin/semaphore-rs) written in Rust based on ark-circom.
|
||||
|
||||
## Users
|
||||
|
||||
Zerokit is used by -
|
||||
|
||||
- [nwaku](https://github.com/waku-org/nwaku)
|
||||
- [js-rln](https://github.com/waku-org/js-rln)
|
||||
|
||||
## Build and Test
|
||||
|
||||
To install missing dependencies, run the following commands from the root folder
|
||||
|
||||
```bash
|
||||
make installdeps
|
||||
```
|
||||
|
||||
To build and test all crates, run the following commands from the root folder
|
||||
|
||||
```bash
|
||||
make build
|
||||
make test
|
||||
```
|
||||
|
||||
## Release assets
|
||||
|
||||
We use [`cross-rs`](https://github.com/cross-rs/cross) to cross-compile and generate release assets for rln.
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
[package]
|
||||
name = "multiplier"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
||||
# WASM operations
|
||||
# wasmer = { version = "2.0" }
|
||||
# fnv = { version = "1.0.3", default-features = false }
|
||||
# num = { version = "0.4.0" }
|
||||
# num-traits = { version = "0.2.0", default-features = false }
|
||||
num-bigint = { version = "0.4", default-features = false, features = ["rand"] }
|
||||
|
||||
# ZKP Generation
|
||||
ark-ec = { version = "0.3.0", default-features = false, features = ["parallel"] }
|
||||
# ark-ff = { version = "0.3.0", default-features = false, features = ["parallel", "asm"] }
|
||||
ark-std = { version = "0.3.0", default-features = false, features = ["parallel"] }
|
||||
ark-bn254 = { version = "0.3.0" }
|
||||
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", features = ["parallel"] }
|
||||
# ark-poly = { version = "^0.3.0", default-features = false, features = ["parallel"] }
|
||||
ark-relations = { version = "0.3.0", default-features = false }
|
||||
ark-serialize = { version = "0.3.0", default-features = false }
|
||||
|
||||
ark-circom = { git = "https://github.com/gakonst/ark-circom", features = ["circom-2"] }
|
||||
|
||||
# error handling
|
||||
# thiserror = "1.0.26"
|
||||
color-eyre = "0.5"
|
||||
|
||||
# decoding of data
|
||||
# hex = "0.4.3"
|
||||
# byteorder = "1.4.3"
|
||||
@@ -1,13 +0,0 @@
|
||||
# Multiplier example
|
||||
|
||||
Example wrapper around a basic Circom circuit to test Circom 2 integration
|
||||
through ark-circom and FFI.
|
||||
|
||||
# FFI
|
||||
|
||||
To generate C or Nim bindings from Rust FFI, use `cbindgen` or `nbindgen`:
|
||||
|
||||
```
|
||||
cbindgen . -o target/multiplier.h
|
||||
nbindgen . -o target/multiplier.nim
|
||||
```
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,77 +0,0 @@
|
||||
use crate::public::Multiplier;
|
||||
use std::slice;
|
||||
|
||||
/// Buffer struct is taken from
|
||||
/// https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs
|
||||
///
|
||||
/// Also heavily inspired by https://github.com/kilic/rln/blob/master/src/ffi.rs
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Buffer {
|
||||
pub ptr: *const u8,
|
||||
pub len: usize,
|
||||
}
|
||||
|
||||
impl From<&[u8]> for Buffer {
|
||||
fn from(src: &[u8]) -> Self {
|
||||
Self {
|
||||
ptr: &src[0] as *const u8,
|
||||
len: src.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&Buffer> for &'a [u8] {
|
||||
fn from(src: &Buffer) -> &'a [u8] {
|
||||
unsafe { slice::from_raw_parts(src.ptr, src.len) }
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn new_circuit(ctx: *mut *mut Multiplier) -> bool {
|
||||
println!("multiplier ffi: new");
|
||||
let mul = Multiplier::new();
|
||||
|
||||
unsafe { *ctx = Box::into_raw(Box::new(mul)) };
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn prove(ctx: *const Multiplier, output_buffer: *mut Buffer) -> bool {
|
||||
println!("multiplier ffi: prove");
|
||||
let mul = unsafe { &*ctx };
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
|
||||
match mul.prove(&mut output_data) {
|
||||
Ok(proof_data) => proof_data,
|
||||
Err(_) => return false,
|
||||
};
|
||||
unsafe { *output_buffer = Buffer::from(&output_data[..]) };
|
||||
std::mem::forget(output_data);
|
||||
true
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn verify(
|
||||
ctx: *const Multiplier,
|
||||
proof_buffer: *const Buffer,
|
||||
result_ptr: *mut u32,
|
||||
) -> bool {
|
||||
println!("multiplier ffi: verify");
|
||||
let mul = unsafe { &*ctx };
|
||||
let proof_data = <&[u8]>::from(unsafe { &*proof_buffer });
|
||||
if match mul.verify(proof_data) {
|
||||
Ok(verified) => verified,
|
||||
Err(_) => return false,
|
||||
} {
|
||||
unsafe { *result_ptr = 0 };
|
||||
} else {
|
||||
unsafe { *result_ptr = 1 };
|
||||
};
|
||||
true
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
pub mod ffi;
|
||||
pub mod public;
|
||||
@@ -1,48 +0,0 @@
|
||||
use ark_circom::{CircomBuilder, CircomConfig};
|
||||
use ark_std::rand::thread_rng;
|
||||
use color_eyre::Result;
|
||||
|
||||
use ark_bn254::Bn254;
|
||||
use ark_groth16::{
|
||||
create_random_proof as prove, generate_random_parameters, prepare_verifying_key, verify_proof,
|
||||
};
|
||||
|
||||
fn groth16_proof_example() -> Result<()> {
|
||||
let cfg = CircomConfig::<Bn254>::new(
|
||||
"./resources/circom2_multiplier2.wasm",
|
||||
"./resources/circom2_multiplier2.r1cs",
|
||||
)?;
|
||||
|
||||
let mut builder = CircomBuilder::new(cfg);
|
||||
builder.push_input("a", 3);
|
||||
builder.push_input("b", 11);
|
||||
|
||||
// create an empty instance for setting it up
|
||||
let circom = builder.setup();
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng)?;
|
||||
|
||||
let circom = builder.build()?;
|
||||
|
||||
let inputs = circom.get_public_inputs().unwrap();
|
||||
|
||||
let proof = prove(circom, ¶ms, &mut rng)?;
|
||||
|
||||
let pvk = prepare_verifying_key(¶ms.vk);
|
||||
|
||||
let verified = verify_proof(&pvk, &proof, &inputs)?;
|
||||
|
||||
assert!(verified);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
|
||||
match groth16_proof_example() {
|
||||
Ok(_) => println!("Success"),
|
||||
Err(_) => println!("Error"),
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
use ark_circom::{CircomBuilder, CircomCircuit, CircomConfig};
|
||||
use ark_std::rand::thread_rng;
|
||||
|
||||
use ark_bn254::Bn254;
|
||||
use ark_groth16::{
|
||||
create_random_proof as prove, generate_random_parameters, prepare_verifying_key, verify_proof,
|
||||
Proof, ProvingKey,
|
||||
};
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
// , SerializationError};
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
pub struct Multiplier {
|
||||
circom: CircomCircuit<Bn254>,
|
||||
params: ProvingKey<Bn254>,
|
||||
}
|
||||
|
||||
impl Multiplier {
|
||||
// TODO Break this apart here
|
||||
pub fn new() -> Multiplier {
|
||||
let cfg = CircomConfig::<Bn254>::new(
|
||||
"./resources/circom2_multiplier2.wasm",
|
||||
"./resources/circom2_multiplier2.r1cs",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut builder = CircomBuilder::new(cfg);
|
||||
builder.push_input("a", 3);
|
||||
builder.push_input("b", 11);
|
||||
|
||||
// create an empty instance for setting it up
|
||||
let circom = builder.setup();
|
||||
|
||||
let mut rng = thread_rng();
|
||||
|
||||
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng).unwrap();
|
||||
|
||||
let circom = builder.build().unwrap();
|
||||
|
||||
//let inputs = circom.get_public_inputs().unwrap();
|
||||
|
||||
Multiplier { circom, params }
|
||||
}
|
||||
|
||||
// TODO Input Read
|
||||
pub fn prove<W: Write>(&self, result_data: W) -> io::Result<()> {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
// XXX: There's probably a better way to do this
|
||||
let circom = self.circom.clone();
|
||||
let params = self.params.clone();
|
||||
|
||||
let proof = prove(circom, ¶ms, &mut rng).unwrap();
|
||||
|
||||
// XXX: Unclear if this is different from other serialization(s)
|
||||
let _ = proof.serialize(result_data).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn verify<R: Read>(&self, input_data: R) -> io::Result<bool> {
|
||||
let proof = Proof::deserialize(input_data).unwrap();
|
||||
|
||||
let pvk = prepare_verifying_key(&self.params.vk);
|
||||
|
||||
// XXX Part of input data?
|
||||
let inputs = self.circom.get_public_inputs().unwrap();
|
||||
|
||||
let verified = verify_proof(&pvk, &proof, &inputs).unwrap();
|
||||
|
||||
Ok(verified)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Multiplier {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiplier_proof() {
|
||||
let mul = Multiplier::new();
|
||||
//let inputs = mul.circom.get_public_inputs().unwrap();
|
||||
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let _ = mul.prove(&mut output_data);
|
||||
|
||||
let proof_data = &output_data[..];
|
||||
|
||||
// XXX Pass as arg?
|
||||
//let pvk = prepare_verifying_key(&mul.params.vk);
|
||||
|
||||
let verified = mul.verify(proof_data).unwrap();
|
||||
|
||||
assert!(verified);
|
||||
}
|
||||
13
rln-cli/Cargo.toml
Normal file
13
rln-cli/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "rln-cli"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
rln = { path = "../rln" }
|
||||
clap = { version = "4.2.7", features = ["cargo", "derive", "env"]}
|
||||
clap_derive = { version = "=4.2.0" }
|
||||
color-eyre = "=0.6.2"
|
||||
# serialization
|
||||
serde_json = "1.0.48"
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
67
rln-cli/src/commands.rs
Normal file
67
rln-cli/src/commands.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::Subcommand;
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub(crate) enum Commands {
|
||||
New {
|
||||
tree_height: usize,
|
||||
/// Sets a custom config file
|
||||
#[arg(short, long)]
|
||||
config: PathBuf,
|
||||
},
|
||||
NewWithParams {
|
||||
tree_height: usize,
|
||||
/// Sets a custom config file
|
||||
#[arg(short, long)]
|
||||
config: PathBuf,
|
||||
#[arg(short, long)]
|
||||
tree_config_input: PathBuf,
|
||||
},
|
||||
SetTree {
|
||||
tree_height: usize,
|
||||
},
|
||||
SetLeaf {
|
||||
index: usize,
|
||||
#[arg(short, long)]
|
||||
file: PathBuf,
|
||||
},
|
||||
SetMultipleLeaves {
|
||||
index: usize,
|
||||
#[arg(short, long)]
|
||||
file: PathBuf,
|
||||
},
|
||||
ResetMultipleLeaves {
|
||||
#[arg(short, long)]
|
||||
file: PathBuf,
|
||||
},
|
||||
SetNextLeaf {
|
||||
#[arg(short, long)]
|
||||
file: PathBuf,
|
||||
},
|
||||
DeleteLeaf {
|
||||
index: usize,
|
||||
},
|
||||
GetRoot,
|
||||
GetProof {
|
||||
index: usize,
|
||||
},
|
||||
Prove {
|
||||
#[arg(short, long)]
|
||||
input: PathBuf,
|
||||
},
|
||||
Verify {
|
||||
#[arg(short, long)]
|
||||
file: PathBuf,
|
||||
},
|
||||
GenerateProof {
|
||||
#[arg(short, long)]
|
||||
input: PathBuf,
|
||||
},
|
||||
VerifyWithRoots {
|
||||
#[arg(short, long)]
|
||||
input: PathBuf,
|
||||
#[arg(short, long)]
|
||||
roots: PathBuf,
|
||||
},
|
||||
}
|
||||
29
rln-cli/src/config.rs
Normal file
29
rln-cli/src/config.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use color_eyre::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{fs::File, io::Read, path::PathBuf};
|
||||
|
||||
pub const RLN_STATE_PATH: &str = "RLN_STATE_PATH";
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub(crate) struct Config {
|
||||
pub inner: Option<InnerConfig>,
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub(crate) struct InnerConfig {
|
||||
pub file: PathBuf,
|
||||
pub tree_height: usize,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub(crate) fn load_config() -> Result<Config> {
|
||||
let path = PathBuf::from(std::env::var(RLN_STATE_PATH)?);
|
||||
|
||||
let mut file = File::open(path)?;
|
||||
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
let state: Config = serde_json::from_str(&contents)?;
|
||||
Ok(state)
|
||||
}
|
||||
}
|
||||
161
rln-cli/src/main.rs
Normal file
161
rln-cli/src/main.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use std::{fs::File, io::Read, path::Path};
|
||||
|
||||
use clap::Parser;
|
||||
use color_eyre::{Report, Result};
|
||||
use commands::Commands;
|
||||
use rln::public::RLN;
|
||||
use state::State;
|
||||
|
||||
mod commands;
|
||||
mod config;
|
||||
mod state;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Option<Commands>,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
let mut state = State::load_state()?;
|
||||
|
||||
match &cli.command {
|
||||
Some(Commands::New {
|
||||
tree_height,
|
||||
config,
|
||||
}) => {
|
||||
let resources = File::open(&config)?;
|
||||
state.rln = Some(RLN::new(*tree_height, resources)?);
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::NewWithParams {
|
||||
tree_height,
|
||||
config,
|
||||
tree_config_input,
|
||||
}) => {
|
||||
let mut resources: Vec<Vec<u8>> = Vec::new();
|
||||
#[cfg(feature = "arkzkey")]
|
||||
let filenames = ["rln.wasm", "rln_final.arkzkey", "verification_key.json"];
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
let filenames = ["rln.wasm", "rln_final.zkey", "verification_key.json"];
|
||||
for filename in filenames {
|
||||
let fullpath = config.join(Path::new(filename));
|
||||
let mut file = File::open(&fullpath)?;
|
||||
let metadata = std::fs::metadata(&fullpath)?;
|
||||
let mut buffer = vec![0; metadata.len() as usize];
|
||||
file.read_exact(&mut buffer)?;
|
||||
resources.push(buffer);
|
||||
}
|
||||
let tree_config_input_file = File::open(&tree_config_input)?;
|
||||
state.rln = Some(RLN::new_with_params(
|
||||
*tree_height,
|
||||
resources[0].clone(),
|
||||
resources[1].clone(),
|
||||
resources[2].clone(),
|
||||
tree_config_input_file,
|
||||
)?);
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::SetTree { tree_height }) => {
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.set_tree(*tree_height)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::SetLeaf { index, file }) => {
|
||||
let input_data = File::open(&file)?;
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.set_leaf(*index, input_data)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::SetMultipleLeaves { index, file }) => {
|
||||
let input_data = File::open(&file)?;
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.set_leaves_from(*index, input_data)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::ResetMultipleLeaves { file }) => {
|
||||
let input_data = File::open(&file)?;
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.init_tree_with_leaves(input_data)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::SetNextLeaf { file }) => {
|
||||
let input_data = File::open(&file)?;
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.set_next_leaf(input_data)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::DeleteLeaf { index }) => {
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.delete_leaf(*index)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::GetRoot) => {
|
||||
let writer = std::io::stdout();
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.get_root(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::GetProof { index }) => {
|
||||
let writer = std::io::stdout();
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.get_proof(*index, writer)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::Prove { input }) => {
|
||||
let input_data = File::open(&input)?;
|
||||
let writer = std::io::stdout();
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.prove(input_data, writer)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::Verify { file }) => {
|
||||
let input_data = File::open(&file)?;
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.verify(input_data)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::GenerateProof { input }) => {
|
||||
let input_data = File::open(&input)?;
|
||||
let writer = std::io::stdout();
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.generate_rln_proof(input_data, writer)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::VerifyWithRoots { input, roots }) => {
|
||||
let input_data = File::open(&input)?;
|
||||
let roots_data = File::open(&roots)?;
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.verify_with_roots(input_data, roots_data)?;
|
||||
Ok(())
|
||||
}
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
23
rln-cli/src/state.rs
Normal file
23
rln-cli/src/state.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
use color_eyre::Result;
|
||||
use rln::public::RLN;
|
||||
use std::fs::File;
|
||||
|
||||
use crate::config::{Config, InnerConfig};
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct State<'a> {
|
||||
pub rln: Option<RLN<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> State<'a> {
|
||||
pub(crate) fn load_state() -> Result<State<'a>> {
|
||||
let config = Config::load_config()?;
|
||||
let rln = if let Some(InnerConfig { file, tree_height }) = config.inner {
|
||||
let resources = File::open(&file)?;
|
||||
Some(RLN::new(tree_height, resources)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(State { rln })
|
||||
}
|
||||
}
|
||||
6
rln-wasm/.gitignore
vendored
Normal file
6
rln-wasm/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
/target
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
||||
bin/
|
||||
pkg/
|
||||
wasm-pack.log
|
||||
35
rln-wasm/Cargo.toml
Normal file
35
rln-wasm/Cargo.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
[package]
|
||||
name = "rln-wasm"
|
||||
version = "0.0.13"
|
||||
edition = "2021"
|
||||
license = "MIT or Apache2"
|
||||
autobenches = false
|
||||
autotests = false
|
||||
autobins = false
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[features]
|
||||
default = ["console_error_panic_hook"]
|
||||
|
||||
[dependencies]
|
||||
rln = { path = "../rln", default-features = false, features = ["wasm"] }
|
||||
num-bigint = { version = "0.4", default-features = false, features = ["rand", "serde"] }
|
||||
wasmer = { version = "2.3", default-features = false, features = ["js", "std"] }
|
||||
web-sys = {version = "0.3", features=["console"]}
|
||||
getrandom = { version = "0.2.7", default-features = false, features = ["js"] }
|
||||
wasm-bindgen = "0.2.63"
|
||||
serde-wasm-bindgen = "0.4"
|
||||
js-sys = "0.3.59"
|
||||
serde_json = "1.0.85"
|
||||
|
||||
# The `console_error_panic_hook` crate provides better debugging of panics by
|
||||
# logging them with `console.error`. This is great for development, but requires
|
||||
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
|
||||
# code size when deploying.
|
||||
console_error_panic_hook = { version = "0.1.7", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
wasm-bindgen-test = "0.3.13"
|
||||
wasm-bindgen-futures = "0.4.33"
|
||||
35
rln-wasm/Makefile.toml
Normal file
35
rln-wasm/Makefile.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
[tasks.pack-build]
|
||||
command = "wasm-pack"
|
||||
args = ["build", "--release", "--target", "web", "--scope", "waku"]
|
||||
|
||||
[tasks.pack-rename]
|
||||
script = "sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak"
|
||||
|
||||
[tasks.build]
|
||||
clear = true
|
||||
dependencies = [
|
||||
"pack-build",
|
||||
"pack-rename",
|
||||
"post-build"
|
||||
]
|
||||
|
||||
[tasks.post-build]
|
||||
command = "wasm-strip"
|
||||
args = ["./pkg/rln_wasm_bg.wasm"]
|
||||
|
||||
[tasks.test]
|
||||
command = "wasm-pack"
|
||||
args = ["test", "--release", "--node"]
|
||||
dependencies = ["build"]
|
||||
|
||||
[tasks.login]
|
||||
command = "wasm-pack"
|
||||
args = ["login"]
|
||||
|
||||
[tasks.publish]
|
||||
command = "wasm-pack"
|
||||
args = ["publish", "--access", "public", "--target", "web"]
|
||||
|
||||
[tasks.bench]
|
||||
command = "echo"
|
||||
args = ["'No benchmarks available for this project'"]
|
||||
41
rln-wasm/README.md
Normal file
41
rln-wasm/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# RLN for WASM
|
||||
This library is used in [waku-org/js-rln](https://github.com/waku-org/js-rln/)
|
||||
|
||||
## Building the library
|
||||
1. Install `wasm-pack`
|
||||
```
|
||||
curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
|
||||
```
|
||||
2. Install `cargo-make`
|
||||
```
|
||||
cargo install cargo-make
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
```
|
||||
make installdeps
|
||||
```
|
||||
3. Compile zerokit for `wasm32-unknown-unknown`:
|
||||
```
|
||||
cd rln-wasm
|
||||
cargo make build
|
||||
```
|
||||
4. Compile a slimmer version of zerokit for `wasm32-unknown-unknown`:
|
||||
```
|
||||
cd rln-wasm
|
||||
cargo make post-build
|
||||
```
|
||||
|
||||
## Running tests
|
||||
```
|
||||
cd rln-wasm
|
||||
cargo make test
|
||||
```
|
||||
|
||||
## Publishing a npm package
|
||||
```
|
||||
cd rln-wasm
|
||||
cargo make login
|
||||
cargo make publish
|
||||
```
|
||||
331
rln-wasm/resources/witness_calculator.js
Normal file
331
rln-wasm/resources/witness_calculator.js
Normal file
@@ -0,0 +1,331 @@
|
||||
module.exports = async function builder(code, options) {
|
||||
|
||||
options = options || {};
|
||||
|
||||
let wasmModule;
|
||||
try {
|
||||
wasmModule = await WebAssembly.compile(code);
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
console.log("\nTry to run circom --c in order to generate c++ code instead\n");
|
||||
throw new Error(err);
|
||||
}
|
||||
|
||||
let wc;
|
||||
|
||||
let errStr = "";
|
||||
let msgStr = "";
|
||||
|
||||
const instance = await WebAssembly.instantiate(wasmModule, {
|
||||
runtime: {
|
||||
exceptionHandler : function(code) {
|
||||
let err;
|
||||
if (code == 1) {
|
||||
err = "Signal not found.\n";
|
||||
} else if (code == 2) {
|
||||
err = "Too many signals set.\n";
|
||||
} else if (code == 3) {
|
||||
err = "Signal already set.\n";
|
||||
} else if (code == 4) {
|
||||
err = "Assert Failed.\n";
|
||||
} else if (code == 5) {
|
||||
err = "Not enough memory.\n";
|
||||
} else if (code == 6) {
|
||||
err = "Input signal array access exceeds the size.\n";
|
||||
} else {
|
||||
err = "Unknown error.\n";
|
||||
}
|
||||
throw new Error(err + errStr);
|
||||
},
|
||||
printErrorMessage : function() {
|
||||
errStr += getMessage() + "\n";
|
||||
// console.error(getMessage());
|
||||
},
|
||||
writeBufferMessage : function() {
|
||||
const msg = getMessage();
|
||||
// Any calls to `log()` will always end with a `\n`, so that's when we print and reset
|
||||
if (msg === "\n") {
|
||||
console.log(msgStr);
|
||||
msgStr = "";
|
||||
} else {
|
||||
// If we've buffered other content, put a space in between the items
|
||||
if (msgStr !== "") {
|
||||
msgStr += " "
|
||||
}
|
||||
// Then append the message to the message we are creating
|
||||
msgStr += msg;
|
||||
}
|
||||
},
|
||||
showSharedRWMemory : function() {
|
||||
printSharedRWMemory ();
|
||||
}
|
||||
|
||||
}
|
||||
});
|
||||
|
||||
const sanityCheck =
|
||||
options
|
||||
// options &&
|
||||
// (
|
||||
// options.sanityCheck ||
|
||||
// options.logGetSignal ||
|
||||
// options.logSetSignal ||
|
||||
// options.logStartComponent ||
|
||||
// options.logFinishComponent
|
||||
// );
|
||||
|
||||
|
||||
wc = new WitnessCalculator(instance, sanityCheck);
|
||||
return wc;
|
||||
|
||||
function getMessage() {
|
||||
var message = "";
|
||||
var c = instance.exports.getMessageChar();
|
||||
while ( c != 0 ) {
|
||||
message += String.fromCharCode(c);
|
||||
c = instance.exports.getMessageChar();
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
function printSharedRWMemory () {
|
||||
const shared_rw_memory_size = instance.exports.getFieldNumLen32();
|
||||
const arr = new Uint32Array(shared_rw_memory_size);
|
||||
for (let j=0; j<shared_rw_memory_size; j++) {
|
||||
arr[shared_rw_memory_size-1-j] = instance.exports.readSharedRWMemory(j);
|
||||
}
|
||||
|
||||
// If we've buffered other content, put a space in between the items
|
||||
if (msgStr !== "") {
|
||||
msgStr += " "
|
||||
}
|
||||
// Then append the value to the message we are creating
|
||||
msgStr += (fromArray32(arr).toString());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class WitnessCalculator {
|
||||
constructor(instance, sanityCheck) {
|
||||
this.instance = instance;
|
||||
|
||||
this.version = this.instance.exports.getVersion();
|
||||
this.n32 = this.instance.exports.getFieldNumLen32();
|
||||
|
||||
this.instance.exports.getRawPrime();
|
||||
const arr = new Uint32Array(this.n32);
|
||||
for (let i=0; i<this.n32; i++) {
|
||||
arr[this.n32-1-i] = this.instance.exports.readSharedRWMemory(i);
|
||||
}
|
||||
this.prime = fromArray32(arr);
|
||||
|
||||
this.witnessSize = this.instance.exports.getWitnessSize();
|
||||
|
||||
this.sanityCheck = sanityCheck;
|
||||
}
|
||||
|
||||
circom_version() {
|
||||
return this.instance.exports.getVersion();
|
||||
}
|
||||
|
||||
async _doCalculateWitness(input, sanityCheck) {
|
||||
//input is assumed to be a map from signals to arrays of bigints
|
||||
this.instance.exports.init((this.sanityCheck || sanityCheck) ? 1 : 0);
|
||||
const keys = Object.keys(input);
|
||||
var input_counter = 0;
|
||||
keys.forEach( (k) => {
|
||||
const h = fnvHash(k);
|
||||
const hMSB = parseInt(h.slice(0,8), 16);
|
||||
const hLSB = parseInt(h.slice(8,16), 16);
|
||||
const fArr = flatArray(input[k]);
|
||||
let signalSize = this.instance.exports.getInputSignalSize(hMSB, hLSB);
|
||||
if (signalSize < 0){
|
||||
throw new Error(`Signal ${k} not found\n`);
|
||||
}
|
||||
if (fArr.length < signalSize) {
|
||||
throw new Error(`Not enough values for input signal ${k}\n`);
|
||||
}
|
||||
if (fArr.length > signalSize) {
|
||||
throw new Error(`Too many values for input signal ${k}\n`);
|
||||
}
|
||||
for (let i=0; i<fArr.length; i++) {
|
||||
const arrFr = toArray32(BigInt(fArr[i])%this.prime,this.n32)
|
||||
for (let j=0; j<this.n32; j++) {
|
||||
this.instance.exports.writeSharedRWMemory(j,arrFr[this.n32-1-j]);
|
||||
}
|
||||
try {
|
||||
this.instance.exports.setInputSignal(hMSB, hLSB,i);
|
||||
input_counter++;
|
||||
} catch (err) {
|
||||
// console.log(`After adding signal ${i} of ${k}`)
|
||||
throw new Error(err);
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
if (input_counter < this.instance.exports.getInputSize()) {
|
||||
throw new Error(`Not all inputs have been set. Only ${input_counter} out of ${this.instance.exports.getInputSize()}`);
|
||||
}
|
||||
}
|
||||
|
||||
async calculateWitness(input, sanityCheck) {
|
||||
|
||||
const w = [];
|
||||
|
||||
await this._doCalculateWitness(input, sanityCheck);
|
||||
|
||||
for (let i=0; i<this.witnessSize; i++) {
|
||||
this.instance.exports.getWitness(i);
|
||||
const arr = new Uint32Array(this.n32);
|
||||
for (let j=0; j<this.n32; j++) {
|
||||
arr[this.n32-1-j] = this.instance.exports.readSharedRWMemory(j);
|
||||
}
|
||||
w.push(fromArray32(arr));
|
||||
}
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
|
||||
async calculateBinWitness(input, sanityCheck) {
|
||||
|
||||
const buff32 = new Uint32Array(this.witnessSize*this.n32);
|
||||
const buff = new Uint8Array( buff32.buffer);
|
||||
await this._doCalculateWitness(input, sanityCheck);
|
||||
|
||||
for (let i=0; i<this.witnessSize; i++) {
|
||||
this.instance.exports.getWitness(i);
|
||||
const pos = i*this.n32;
|
||||
for (let j=0; j<this.n32; j++) {
|
||||
buff32[pos+j] = this.instance.exports.readSharedRWMemory(j);
|
||||
}
|
||||
}
|
||||
|
||||
return buff;
|
||||
}
|
||||
|
||||
|
||||
async calculateWTNSBin(input, sanityCheck) {
|
||||
|
||||
const buff32 = new Uint32Array(this.witnessSize*this.n32+this.n32+11);
|
||||
const buff = new Uint8Array( buff32.buffer);
|
||||
await this._doCalculateWitness(input, sanityCheck);
|
||||
|
||||
//"wtns"
|
||||
buff[0] = "w".charCodeAt(0)
|
||||
buff[1] = "t".charCodeAt(0)
|
||||
buff[2] = "n".charCodeAt(0)
|
||||
buff[3] = "s".charCodeAt(0)
|
||||
|
||||
//version 2
|
||||
buff32[1] = 2;
|
||||
|
||||
//number of sections: 2
|
||||
buff32[2] = 2;
|
||||
|
||||
//id section 1
|
||||
buff32[3] = 1;
|
||||
|
||||
const n8 = this.n32*4;
|
||||
//id section 1 length in 64bytes
|
||||
const idSection1length = 8 + n8;
|
||||
const idSection1lengthHex = idSection1length.toString(16);
|
||||
buff32[4] = parseInt(idSection1lengthHex.slice(0,8), 16);
|
||||
buff32[5] = parseInt(idSection1lengthHex.slice(8,16), 16);
|
||||
|
||||
//this.n32
|
||||
buff32[6] = n8;
|
||||
|
||||
//prime number
|
||||
this.instance.exports.getRawPrime();
|
||||
|
||||
var pos = 7;
|
||||
for (let j=0; j<this.n32; j++) {
|
||||
buff32[pos+j] = this.instance.exports.readSharedRWMemory(j);
|
||||
}
|
||||
pos += this.n32;
|
||||
|
||||
// witness size
|
||||
buff32[pos] = this.witnessSize;
|
||||
pos++;
|
||||
|
||||
//id section 2
|
||||
buff32[pos] = 2;
|
||||
pos++;
|
||||
|
||||
// section 2 length
|
||||
const idSection2length = n8*this.witnessSize;
|
||||
const idSection2lengthHex = idSection2length.toString(16);
|
||||
buff32[pos] = parseInt(idSection2lengthHex.slice(0,8), 16);
|
||||
buff32[pos+1] = parseInt(idSection2lengthHex.slice(8,16), 16);
|
||||
|
||||
pos += 2;
|
||||
for (let i=0; i<this.witnessSize; i++) {
|
||||
this.instance.exports.getWitness(i);
|
||||
for (let j=0; j<this.n32; j++) {
|
||||
buff32[pos+j] = this.instance.exports.readSharedRWMemory(j);
|
||||
}
|
||||
pos += this.n32;
|
||||
}
|
||||
|
||||
return buff;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
function toArray32(rem,size) {
|
||||
const res = []; //new Uint32Array(size); //has no unshift
|
||||
const radix = BigInt(0x100000000);
|
||||
while (rem) {
|
||||
res.unshift( Number(rem % radix));
|
||||
rem = rem / radix;
|
||||
}
|
||||
if (size) {
|
||||
var i = size - res.length;
|
||||
while (i>0) {
|
||||
res.unshift(0);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
function fromArray32(arr) { //returns a BigInt
|
||||
var res = BigInt(0);
|
||||
const radix = BigInt(0x100000000);
|
||||
for (let i = 0; i<arr.length; i++) {
|
||||
res = res*radix + BigInt(arr[i]);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
function flatArray(a) {
|
||||
var res = [];
|
||||
fillArray(res, a);
|
||||
return res;
|
||||
|
||||
function fillArray(res, a) {
|
||||
if (Array.isArray(a)) {
|
||||
for (let i=0; i<a.length; i++) {
|
||||
fillArray(res, a[i]);
|
||||
}
|
||||
} else {
|
||||
res.push(a);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function fnvHash(str) {
|
||||
const uint64_max = BigInt(2) ** BigInt(64);
|
||||
let hash = BigInt("0xCBF29CE484222325");
|
||||
for (var i = 0; i < str.length; i++) {
|
||||
hash ^= BigInt(str[i].charCodeAt());
|
||||
hash *= BigInt(0x100000001B3);
|
||||
hash %= uint64_max;
|
||||
}
|
||||
let shash = hash.toString(16);
|
||||
let n = 16 - shash.length;
|
||||
shash = '0'.repeat(n).concat(shash);
|
||||
return shash;
|
||||
}
|
||||
406
rln-wasm/src/lib.rs
Normal file
406
rln-wasm/src/lib.rs
Normal file
@@ -0,0 +1,406 @@
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
extern crate wasm_bindgen;
|
||||
extern crate web_sys;
|
||||
|
||||
use std::vec::Vec;
|
||||
|
||||
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
|
||||
use num_bigint::BigInt;
|
||||
use rln::public::{hash, poseidon_hash, RLN};
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn init_panic_hook() {
|
||||
console_error_panic_hook::set_once();
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = RLN)]
|
||||
pub struct RLNWrapper {
|
||||
// The purpose of this wrapper is to hold a RLN instance with the 'static lifetime
|
||||
// because wasm_bindgen does not allow returning elements with lifetimes
|
||||
instance: RLN<'static>,
|
||||
}
|
||||
|
||||
// Macro to call methods with arbitrary amount of arguments,
|
||||
// which have the last argument is output buffer pointer
|
||||
// First argument to the macro is context,
|
||||
// second is the actual method on `RLN`
|
||||
// third is the aforementioned output buffer argument
|
||||
// rest are all other arguments to the method
|
||||
macro_rules! call_with_output_and_error_msg {
|
||||
// this variant is needed for the case when
|
||||
// there are zero other arguments
|
||||
($instance:expr, $method:ident, $error_msg:expr) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let new_instance = $instance.process();
|
||||
if let Err(err) = new_instance.instance.$method(&mut output_data) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
};
|
||||
($instance:expr, $method:ident, $error_msg:expr, $( $arg:expr ),* ) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let new_instance = $instance.process();
|
||||
if let Err(err) = new_instance.instance.$method($($arg.process()),*, &mut output_data) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Macro to call_with_error_msg methods with arbitrary amount of arguments,
|
||||
// First argument to the macro is context,
|
||||
// second is the actual method on `RLNWrapper`
|
||||
// rest are all other arguments to the method
|
||||
macro_rules! call_with_error_msg {
|
||||
($instance:expr, $method:ident, $error_msg:expr $(, $arg:expr)*) => {
|
||||
{
|
||||
let new_instance: &mut RLNWrapper = $instance.process();
|
||||
if let Err(err) = new_instance.instance.$method($($arg.process()),*) {
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! call {
|
||||
($instance:expr, $method:ident $(, $arg:expr)*) => {
|
||||
{
|
||||
let new_instance: &mut RLNWrapper = $instance.process();
|
||||
new_instance.instance.$method($($arg.process()),*)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! call_bool_method_with_error_msg {
|
||||
($instance:expr, $method:ident, $error_msg:expr $(, $arg:expr)*) => {
|
||||
{
|
||||
let new_instance: &RLNWrapper = $instance.process();
|
||||
new_instance.instance.$method($($arg.process()),*).map_err(|err| format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Macro to execute a function with arbitrary amount of arguments,
|
||||
// First argument is the function to execute
|
||||
// Rest are all other arguments to the method
|
||||
macro_rules! fn_call_with_output_and_error_msg {
|
||||
// this variant is needed for the case when
|
||||
// there are zero other arguments
|
||||
($func:ident, $error_msg:expr) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
if let Err(err) = $func(&mut output_data) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
};
|
||||
($func:ident, $error_msg:expr, $( $arg:expr ),* ) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
if let Err(err) = $func($($arg.process()),*, &mut output_data) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
trait ProcessArg {
|
||||
type ReturnType;
|
||||
fn process(self) -> Self::ReturnType;
|
||||
}
|
||||
|
||||
impl ProcessArg for usize {
|
||||
type ReturnType = usize;
|
||||
fn process(self) -> Self::ReturnType {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ProcessArg for Vec<T> {
|
||||
type ReturnType = Vec<T>;
|
||||
fn process(self) -> Self::ReturnType {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ProcessArg for *const RLN<'a> {
|
||||
type ReturnType = &'a RLN<'a>;
|
||||
fn process(self) -> Self::ReturnType {
|
||||
unsafe { &*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessArg for *const RLNWrapper {
|
||||
type ReturnType = &'static RLNWrapper;
|
||||
fn process(self) -> Self::ReturnType {
|
||||
unsafe { &*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessArg for *mut RLNWrapper {
|
||||
type ReturnType = &'static mut RLNWrapper;
|
||||
fn process(self) -> Self::ReturnType {
|
||||
unsafe { &mut *self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ProcessArg for &'a [u8] {
|
||||
type ReturnType = &'a [u8];
|
||||
|
||||
fn process(self) -> Self::ReturnType {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = newRLN)]
|
||||
pub fn wasm_new(
|
||||
tree_height: usize,
|
||||
zkey: Uint8Array,
|
||||
vk: Uint8Array,
|
||||
) -> Result<*mut RLNWrapper, String> {
|
||||
let instance = RLN::new_with_params(tree_height, zkey.to_vec(), vk.to_vec())
|
||||
.map_err(|err| format!("{:#?}", err))?;
|
||||
let wrapper = RLNWrapper { instance };
|
||||
Ok(Box::into_raw(Box::new(wrapper)))
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = getSerializedRLNWitness)]
|
||||
pub fn wasm_get_serialized_rln_witness(
|
||||
ctx: *mut RLNWrapper,
|
||||
input: Uint8Array,
|
||||
) -> Result<Uint8Array, String> {
|
||||
let rln_witness = call!(ctx, get_serialized_rln_witness, &input.to_vec()[..])
|
||||
.map_err(|err| format!("{:#?}", err))?;
|
||||
Ok(Uint8Array::from(&rln_witness[..]))
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = insertMember)]
|
||||
pub fn wasm_set_next_leaf(ctx: *mut RLNWrapper, input: Uint8Array) -> Result<(), String> {
|
||||
call_with_error_msg!(
|
||||
ctx,
|
||||
set_next_leaf,
|
||||
"could not insert member into merkle tree".to_string(),
|
||||
&input.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = setLeavesFrom)]
|
||||
pub fn wasm_set_leaves_from(
|
||||
ctx: *mut RLNWrapper,
|
||||
index: usize,
|
||||
input: Uint8Array,
|
||||
) -> Result<(), String> {
|
||||
call_with_error_msg!(
|
||||
ctx,
|
||||
set_leaves_from,
|
||||
"could not set multiple leaves".to_string(),
|
||||
index,
|
||||
&*input.to_vec()
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = deleteLeaf)]
|
||||
pub fn wasm_delete_leaf(ctx: *mut RLNWrapper, index: usize) -> Result<(), String> {
|
||||
call_with_error_msg!(ctx, delete_leaf, "could not delete leaf".to_string(), index)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = setMetadata)]
|
||||
pub fn wasm_set_metadata(ctx: *mut RLNWrapper, input: Uint8Array) -> Result<(), String> {
|
||||
call_with_error_msg!(
|
||||
ctx,
|
||||
set_metadata,
|
||||
"could not set metadata".to_string(),
|
||||
&*input.to_vec()
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = getMetadata)]
|
||||
pub fn wasm_get_metadata(ctx: *mut RLNWrapper) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(ctx, get_metadata, "could not get metadata".to_string())
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = initTreeWithLeaves)]
|
||||
pub fn wasm_init_tree_with_leaves(ctx: *mut RLNWrapper, input: Uint8Array) -> Result<(), String> {
|
||||
call_with_error_msg!(
|
||||
ctx,
|
||||
init_tree_with_leaves,
|
||||
"could not init merkle tree".to_string(),
|
||||
&*input.to_vec()
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = RLNWitnessToJson)]
|
||||
pub fn rln_witness_to_json(
|
||||
ctx: *mut RLNWrapper,
|
||||
serialized_witness: Uint8Array,
|
||||
) -> Result<Object, String> {
|
||||
let inputs = call!(ctx, get_rln_witness_json, &serialized_witness.to_vec()[..])
|
||||
.map_err(|err| err.to_string())?;
|
||||
let js_value = serde_wasm_bindgen::to_value(&inputs).map_err(|err| err.to_string())?;
|
||||
Object::from_entries(&js_value).map_err(|err| format!("{:#?}", err))
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen]
|
||||
pub fn generate_rln_proof_with_witness(
|
||||
ctx: *mut RLNWrapper,
|
||||
calculated_witness: Vec<JsBigInt>,
|
||||
serialized_witness: Uint8Array,
|
||||
) -> Result<Uint8Array, String> {
|
||||
let mut witness_vec: Vec<BigInt> = vec![];
|
||||
|
||||
for v in calculated_witness {
|
||||
witness_vec.push(
|
||||
v.to_string(10)
|
||||
.map_err(|err| format!("{:#?}", err))?
|
||||
.as_string()
|
||||
.ok_or("not a string error")?
|
||||
.parse::<BigInt>()
|
||||
.map_err(|err| format!("{:#?}", err))?,
|
||||
);
|
||||
}
|
||||
|
||||
call_with_output_and_error_msg!(
|
||||
ctx,
|
||||
generate_rln_proof_with_witness,
|
||||
"could not generate proof",
|
||||
witness_vec,
|
||||
serialized_witness.to_vec()
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateMembershipKey)]
|
||||
pub fn wasm_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(ctx, key_gen, "could not generate membership keys")
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
|
||||
pub fn wasm_extended_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(ctx, extended_key_gen, "could not generate membership keys")
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
|
||||
pub fn wasm_seeded_key_gen(ctx: *const RLNWrapper, seed: Uint8Array) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(
|
||||
ctx,
|
||||
seeded_key_gen,
|
||||
"could not generate membership key",
|
||||
&seed.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
|
||||
pub fn wasm_seeded_extended_key_gen(
|
||||
ctx: *const RLNWrapper,
|
||||
seed: Uint8Array,
|
||||
) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(
|
||||
ctx,
|
||||
seeded_extended_key_gen,
|
||||
"could not generate membership key",
|
||||
&seed.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = recovedIDSecret)]
|
||||
pub fn wasm_recover_id_secret(
|
||||
ctx: *const RLNWrapper,
|
||||
input_proof_data_1: Uint8Array,
|
||||
input_proof_data_2: Uint8Array,
|
||||
) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(
|
||||
ctx,
|
||||
recover_id_secret,
|
||||
"could not recover id secret",
|
||||
&input_proof_data_1.to_vec()[..],
|
||||
&input_proof_data_2.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = verifyRLNProof)]
|
||||
pub fn wasm_verify_rln_proof(ctx: *const RLNWrapper, proof: Uint8Array) -> Result<bool, String> {
|
||||
call_bool_method_with_error_msg!(
|
||||
ctx,
|
||||
verify_rln_proof,
|
||||
"error while verifying rln proof".to_string(),
|
||||
&proof.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = verifyWithRoots)]
|
||||
pub fn wasm_verify_with_roots(
|
||||
ctx: *const RLNWrapper,
|
||||
proof: Uint8Array,
|
||||
roots: Uint8Array,
|
||||
) -> Result<bool, String> {
|
||||
call_bool_method_with_error_msg!(
|
||||
ctx,
|
||||
verify_with_roots,
|
||||
"error while verifying proof with roots".to_string(),
|
||||
&proof.to_vec()[..],
|
||||
&roots.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = getRoot)]
|
||||
pub fn wasm_get_root(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(ctx, get_root, "could not obtain root")
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = hash)]
|
||||
pub fn wasm_hash(input: Uint8Array) -> Result<Uint8Array, String> {
|
||||
fn_call_with_output_and_error_msg!(hash, "could not generate hash", &input.to_vec()[..])
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = poseidonHash)]
|
||||
pub fn wasm_poseidon_hash(input: Uint8Array) -> Result<Uint8Array, String> {
|
||||
fn_call_with_output_and_error_msg!(
|
||||
poseidon_hash,
|
||||
"could not generate poseidon hash",
|
||||
&input.to_vec()[..]
|
||||
)
|
||||
}
|
||||
18
rln-wasm/src/utils.js
Normal file
18
rln-wasm/src/utils.js
Normal file
@@ -0,0 +1,18 @@
|
||||
const fs = require("fs");
|
||||
|
||||
// Utils functions for loading circom witness calculator and reading files from test
|
||||
|
||||
module.exports = {
|
||||
read_file: function (path) {
|
||||
return fs.readFileSync(path);
|
||||
},
|
||||
|
||||
calculateWitness: async function(circom_path, inputs){
|
||||
const wc = require("resources/witness_calculator.js");
|
||||
const wasmFile = fs.readFileSync(circom_path);
|
||||
const wasmFileBuffer = wasmFile.slice(wasmFile.byteOffset, wasmFile.byteOffset + wasmFile.byteLength);
|
||||
const witnessCalculator = await wc(wasmFileBuffer);
|
||||
const calculatedWitness = await witnessCalculator.calculateWitness(inputs, false);
|
||||
return JSON.stringify(calculatedWitness, (key, value) => typeof value === "bigint" ? value.toString() : value);
|
||||
}
|
||||
}
|
||||
148
rln-wasm/tests/rln-wasm.rs
Normal file
148
rln-wasm/tests/rln-wasm.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
|
||||
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash};
|
||||
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, normalize_usize};
|
||||
use rln_wasm::*;
|
||||
use wasm_bindgen::{prelude::*, JsValue};
|
||||
use wasm_bindgen_test::wasm_bindgen_test;
|
||||
|
||||
#[wasm_bindgen(module = "src/utils.js")]
|
||||
extern "C" {
|
||||
#[wasm_bindgen(catch)]
|
||||
fn read_file(path: &str) -> Result<Uint8Array, JsValue>;
|
||||
|
||||
#[wasm_bindgen(catch)]
|
||||
async fn calculateWitness(circom_path: &str, input: Object) -> Result<JsValue, JsValue>;
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
pub async fn test_basic_flow() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let circom_path = format!("../rln/resources/tree_height_{TEST_TREE_HEIGHT}/rln.wasm");
|
||||
let zkey_path = format!("../rln/resources/tree_height_{TEST_TREE_HEIGHT}/rln_final.zkey");
|
||||
let vk_path =
|
||||
format!("../rln/resources/tree_height_{TEST_TREE_HEIGHT}/verification_key.json");
|
||||
let zkey = read_file(&zkey_path).unwrap();
|
||||
let vk = read_file(&vk_path).unwrap();
|
||||
|
||||
// Creating an instance of RLN
|
||||
let rln_instance = wasm_new(tree_height, zkey, vk).unwrap();
|
||||
|
||||
// Creating membership key
|
||||
let mem_keys = wasm_key_gen(rln_instance).unwrap();
|
||||
let id_key = mem_keys.subarray(0, 32);
|
||||
let id_commitment = mem_keys.subarray(32, 64);
|
||||
|
||||
// Prepare the message
|
||||
let signal = b"Hello World";
|
||||
|
||||
let identity_index: usize = 0;
|
||||
// Setting up the epoch and rln_identifier
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
let external_nullifier = fr_to_bytes_le(&external_nullifier);
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
let message_id = fr_to_bytes_le(&Fr::from(0));
|
||||
|
||||
let (id_commitment_fr, _) = bytes_le_to_fr(&id_commitment.to_vec()[..]);
|
||||
let rate_commitment = poseidon_hash(&[id_commitment_fr, user_message_limit]);
|
||||
|
||||
// Insert PK
|
||||
wasm_set_next_leaf(
|
||||
rln_instance,
|
||||
Uint8Array::from(fr_to_bytes_le(&rate_commitment).as_slice()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Serializing the message
|
||||
let mut serialized_vec: Vec<u8> = Vec::new();
|
||||
serialized_vec.append(&mut id_key.to_vec());
|
||||
serialized_vec.append(&mut normalize_usize(identity_index));
|
||||
serialized_vec.append(&mut fr_to_bytes_le(&user_message_limit).to_vec());
|
||||
serialized_vec.append(&mut message_id.to_vec());
|
||||
serialized_vec.append(&mut external_nullifier.to_vec());
|
||||
serialized_vec.append(&mut normalize_usize(signal.len()));
|
||||
serialized_vec.append(&mut signal.to_vec());
|
||||
let serialized_message = Uint8Array::from(&serialized_vec[..]);
|
||||
|
||||
let serialized_rln_witness =
|
||||
wasm_get_serialized_rln_witness(rln_instance, serialized_message).unwrap();
|
||||
|
||||
// Obtaining inputs that should be sent to circom witness calculator
|
||||
let json_inputs =
|
||||
rln_witness_to_json(rln_instance, serialized_rln_witness.clone()).unwrap();
|
||||
|
||||
// Calculating witness with JS
|
||||
// (Using a JSON since wasm_bindgen does not like Result<Vec<JsBigInt>,JsValue>)
|
||||
let calculated_witness_json = calculateWitness(&circom_path, json_inputs)
|
||||
.await
|
||||
.unwrap()
|
||||
.as_string()
|
||||
.unwrap();
|
||||
let calculated_witness_vec_str: Vec<String> =
|
||||
serde_json::from_str(&calculated_witness_json).unwrap();
|
||||
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
|
||||
.iter()
|
||||
.map(|x| JsBigInt::new(&x.into()).unwrap())
|
||||
.collect();
|
||||
|
||||
// Generating proof
|
||||
let proof = generate_rln_proof_with_witness(
|
||||
rln_instance,
|
||||
calculated_witness.into(),
|
||||
serialized_rln_witness,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Add signal_len | signal
|
||||
let mut proof_bytes = proof.to_vec();
|
||||
proof_bytes.append(&mut normalize_usize(signal.len()));
|
||||
proof_bytes.append(&mut signal.to_vec());
|
||||
let proof_with_signal = Uint8Array::from(&proof_bytes[..]);
|
||||
|
||||
// Validate Proof
|
||||
let is_proof_valid = wasm_verify_rln_proof(rln_instance, proof_with_signal);
|
||||
|
||||
assert!(
|
||||
is_proof_valid.unwrap(),
|
||||
"validating proof generated with wasm failed"
|
||||
);
|
||||
|
||||
// Validating Proof with Roots
|
||||
let root = wasm_get_root(rln_instance).unwrap();
|
||||
let roots = Uint8Array::from(&root.to_vec()[..]);
|
||||
let proof_with_signal = Uint8Array::from(&proof_bytes[..]);
|
||||
|
||||
let is_proof_valid = wasm_verify_with_roots(rln_instance, proof_with_signal, roots);
|
||||
assert!(is_proof_valid.unwrap(), "verifying proof with roots failed");
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_metadata() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let zkey_path = format!("../rln/resources/tree_height_{TEST_TREE_HEIGHT}/rln_final.zkey");
|
||||
let vk_path =
|
||||
format!("../rln/resources/tree_height_{TEST_TREE_HEIGHT}/verification_key.json");
|
||||
let zkey = read_file(&zkey_path).unwrap();
|
||||
let vk = read_file(&vk_path).unwrap();
|
||||
|
||||
// Creating an instance of RLN
|
||||
let rln_instance = wasm_new(tree_height, zkey, vk).unwrap();
|
||||
|
||||
let test_metadata = Uint8Array::new(&JsValue::from_str("test"));
|
||||
// Inserting random metadata
|
||||
wasm_set_metadata(rln_instance, test_metadata.clone()).unwrap();
|
||||
|
||||
// Getting metadata
|
||||
let metadata = wasm_get_metadata(rln_instance).unwrap();
|
||||
|
||||
assert_eq!(metadata.to_vec(), test_metadata.to_vec());
|
||||
}
|
||||
}
|
||||
@@ -1,42 +1,93 @@
|
||||
[package]
|
||||
name = "rln"
|
||||
version = "0.1.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "APIs to manage, compute and verify zkSNARK proofs and RLN primitives"
|
||||
documentation = "https://github.com/vacp2p/zerokit"
|
||||
homepage = "https://vac.dev"
|
||||
repository = "https://github.com/vacp2p/zerokit"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib", "staticlib"]
|
||||
crate-type = ["rlib", "staticlib"]
|
||||
bench = false
|
||||
|
||||
# This flag disable cargo doctests, i.e. testing example code-snippets in documentation
|
||||
doctest = false
|
||||
|
||||
|
||||
[dependencies]
|
||||
|
||||
# ZKP Generation
|
||||
ark-ff = { version = "0.3.0", default-features = false, features = ["parallel", "asm"] }
|
||||
ark-std = { version = "0.3.0", default-features = false, features = ["parallel"] }
|
||||
ark-bn254 = { version = "0.3.0" }
|
||||
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", features = ["parallel"] }
|
||||
ark-relations = { version = "0.3.0", default-features = false, features = [ "std" ] }
|
||||
ark-serialize = { version = "0.3.0", default-features = false }
|
||||
ark-circom = { git = "https://github.com/gakonst/ark-circom", rev = "06eb075", features = ["circom-2"] }
|
||||
#ark-circom = { git = "https://github.com/vacp2p/ark-circom", branch = "no-ethers-core", features = ["circom-2"] }
|
||||
wasmer = "2.3.0"
|
||||
ark-ec = { version = "=0.4.1", default-features = false }
|
||||
ark-ff = { version = "=0.4.1", default-features = false, features = ["asm"] }
|
||||
ark-std = { version = "=0.4.0", default-features = false }
|
||||
ark-bn254 = { version = "=0.4.0" }
|
||||
ark-groth16 = { version = "=0.4.0", features = [
|
||||
"parallel",
|
||||
], default-features = false }
|
||||
ark-relations = { version = "=0.4.0", default-features = false, features = [
|
||||
"std",
|
||||
] }
|
||||
ark-serialize = { version = "=0.4.1", default-features = false }
|
||||
ark-circom = { version = "=0.1.0", default-features = false, features = [
|
||||
"circom-2",
|
||||
] }
|
||||
ark-zkey = { version = "0.1.0", optional = true, default-features = false }
|
||||
|
||||
# WASM
|
||||
wasmer = { version = "=2.3.0", default-features = false }
|
||||
|
||||
# error handling
|
||||
color-eyre = "0.5.11"
|
||||
thiserror = "1.0.0"
|
||||
color-eyre = "=0.6.2"
|
||||
thiserror = "=1.0.39"
|
||||
|
||||
# utilities
|
||||
cfg-if = "1.0"
|
||||
num-bigint = { version = "0.4.3", default-features = false, features = ["rand"] }
|
||||
num-traits = "0.2.11"
|
||||
once_cell = "1.14.0"
|
||||
rand = "0.8"
|
||||
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
|
||||
cfg-if = "=1.0"
|
||||
num-bigint = { version = "=0.4.3", default-features = false, features = [
|
||||
"rand",
|
||||
] }
|
||||
num-traits = "=0.2.15"
|
||||
once_cell = "=1.17.1"
|
||||
rand = "=0.8.5"
|
||||
rand_chacha = "=0.3.1"
|
||||
tiny-keccak = { version = "=2.0.2", features = ["keccak"] }
|
||||
utils = { package = "zerokit_utils", version = "=0.5.0", path = "../utils/", default-features = false }
|
||||
|
||||
|
||||
# serialization
|
||||
serde_json = "1.0.48"
|
||||
serde_json = "=1.0.96"
|
||||
serde = { version = "=1.0.163", features = ["derive"] }
|
||||
|
||||
include_dir = "=0.7.3"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
hex-literal = "0.3.4"
|
||||
sled = "=0.34.7"
|
||||
criterion = { version = "=0.4.0", features = ["html_reports"] }
|
||||
|
||||
[features]
|
||||
fullmerkletree = []
|
||||
default = ["parallel", "wasmer/sys-default", "pmtree-ft"]
|
||||
parallel = [
|
||||
"ark-ec/parallel",
|
||||
"ark-ff/parallel",
|
||||
"ark-std/parallel",
|
||||
"ark-groth16/parallel",
|
||||
"utils/parallel",
|
||||
]
|
||||
wasm = ["wasmer/js", "wasmer/std"]
|
||||
fullmerkletree = ["default"]
|
||||
arkzkey = ["ark-zkey"]
|
||||
|
||||
# Note: pmtree feature is still experimental
|
||||
pmtree-ft = ["utils/pmtree-ft"]
|
||||
|
||||
[[bench]]
|
||||
name = "pmtree_benchmark"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "circuit_loading_benchmark"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "poseidon_tree_benchmark"
|
||||
harness = false
|
||||
|
||||
11
rln/Makefile.toml
Normal file
11
rln/Makefile.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[tasks.build]
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
[tasks.test]
|
||||
command = "cargo"
|
||||
args = ["test", "--release"]
|
||||
|
||||
[tasks.bench]
|
||||
command = "cargo"
|
||||
args = ["bench"]
|
||||
189
rln/README.md
189
rln/README.md
@@ -2,17 +2,32 @@
|
||||
|
||||
This module provides APIs to manage, compute and verify [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and RLN primitives.
|
||||
|
||||
Currently, this module comes with three [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) RLN circuits having Merkle tree of height `15`, `19` and `20`, respectively.
|
||||
## Pre-requisites
|
||||
|
||||
Implemented tests can be executed by running within the module folder
|
||||
### Install dependencies and clone repo
|
||||
|
||||
`cargo test --release`
|
||||
```sh
|
||||
make installdeps
|
||||
git clone https://github.com/vacp2p/zerokit.git
|
||||
cd zerokit/rln
|
||||
```
|
||||
|
||||
## Compiling circuits
|
||||
### Build and Test
|
||||
|
||||
`rln` (https://github.com/privacy-scaling-explorations/rln) repo with Circuits is contained as a submodule.
|
||||
To build and test, run the following commands within the module folder
|
||||
|
||||
``` sh
|
||||
```bash
|
||||
cargo make build
|
||||
cargo make test
|
||||
```
|
||||
|
||||
### Compile ZK circuits
|
||||
|
||||
The `rln` (https://github.com/rate-limiting-nullifier/circom-rln) repository, which contains the RLN circuit implementation is a submodule of zerokit RLN.
|
||||
|
||||
To compile the RLN circuit
|
||||
|
||||
```sh
|
||||
# Update submodules
|
||||
git submodule update --init --recursive
|
||||
|
||||
@@ -27,9 +42,9 @@ cp build/zkeyFiles/rln-final.zkey ../../resources/tree_height_15
|
||||
cp build/zkeyFiles/rln.wasm ../../resources/tree_height_15
|
||||
```
|
||||
|
||||
Note that the above code snippet will compile a RLN circuit with a Merkle tree of height equal `15` based on the default value set in `rln/circuit/rln.circom`.
|
||||
Note that the above code snippet will compile a RLN circuit with a Merkle tree of height equal `15` based on the default value set in `vendor/rln/circuit/rln.circom`.
|
||||
|
||||
To compile a RLN circuit with Merkle tree height `N`, it suffices to change `rln/circuit/rln.circom` to
|
||||
In order to compile a RLN circuit with Merkle tree height `N`, it suffices to change `vendor/rln/circuit/rln.circom` to
|
||||
|
||||
```
|
||||
pragma circom 2.0.0;
|
||||
@@ -39,5 +54,159 @@ include "./rln-base.circom";
|
||||
component main {public [x, epoch, rln_identifier ]} = RLN(N);
|
||||
```
|
||||
|
||||
However, if `N` is too big, this might require a bigger Powers of Tau ceremony than the one hardcoded in `./scripts/build-circuits.sh`, which is `2^14`.
|
||||
In such case we refer to the official [Circom documentation](https://docs.circom.io/getting-started/proving-circuits/#powers-of-tau) for instructions on how to run an appropriate Powers of Tau ceremony and Phase 2 in order to compile the desired circuit.
|
||||
However, if `N` is too big, this might require a bigger Powers of Tau ceremony than the one hardcoded in `./scripts/build-circuits.sh`, which is `2^14`.
|
||||
In such case we refer to the official [Circom documentation](https://docs.circom.io/getting-started/proving-circuits/#powers-of-tau) for instructions on how to run an appropriate Powers of Tau ceremony and Phase 2 in order to compile the desired circuit.
|
||||
|
||||
Currently, the `rln` module comes with 2 [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) RLN circuits having Merkle tree of height `20` and `32`, respectively.
|
||||
|
||||
## Getting started
|
||||
|
||||
### Add RLN as dependency
|
||||
|
||||
We start by adding zerokit RLN to our `Cargo.toml`
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
rln = { git = "https://github.com/vacp2p/zerokit" }
|
||||
```
|
||||
|
||||
### Create a RLN object
|
||||
|
||||
First, we need to create a RLN object for a chosen input Merkle tree size.
|
||||
|
||||
Note that we need to pass to RLN object constructor the path where the circuit (`rln.wasm`, built for the input tree size), the corresponding proving key (`rln_final.zkey`) or (`rln_final.arkzkey`) and verification key (`verification_key.json`, optional) are found.
|
||||
|
||||
In the following we will use [cursors](https://doc.rust-lang.org/std/io/struct.Cursor.html) as readers/writers for interfacing with RLN public APIs.
|
||||
|
||||
```rust
|
||||
use rln::protocol::*;
|
||||
use rln::public::*;
|
||||
use std::io::Cursor;
|
||||
|
||||
// We set the RLN parameters:
|
||||
// - the tree height;
|
||||
// - the tree config, if it is not defined, the default value will be set
|
||||
let tree_height = 20;
|
||||
let input = Cursor::new(json!({}).to_string());
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, input);
|
||||
```
|
||||
|
||||
### Generate an identity keypair
|
||||
|
||||
We generate an identity keypair
|
||||
|
||||
```rust
|
||||
// We generate an identity pair
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.key_gen(&mut buffer).unwrap();
|
||||
|
||||
// We deserialize the keygen output to obtain
|
||||
// the identity_secret and id_commitment
|
||||
let (identity_secret_hash, id_commitment) = deserialize_identity_pair(buffer.into_inner());
|
||||
```
|
||||
|
||||
### Add Rate commitment to the RLN Merkle tree
|
||||
|
||||
```rust
|
||||
// We define the tree index where id_commitment will be added
|
||||
let id_index = 10;
|
||||
let user_message_limit = 10;
|
||||
|
||||
// We serialize id_commitment and pass it to set_leaf
|
||||
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
let mut buffer = Cursor::new(serialize_field_element(rate_commitment));
|
||||
rln.set_leaf(id_index, &mut buffer).unwrap();
|
||||
```
|
||||
|
||||
Note that when tree leaves are not explicitly set by the user (in this example, all those with index less and greater than `10`), their values is set to an hardcoded default (all-`0` bytes in current implementation).
|
||||
|
||||
### Set external nullifier
|
||||
|
||||
The `external nullifier` includes two parameters.
|
||||
|
||||
The first one is `epoch` and it's used to identify messages received in a certain time frame. It usually corresponds to the current UNIX time but can also be set to a random value or generated by a seed, provided that it corresponds to a field element.
|
||||
|
||||
The second one is `rln_identifier` and it's used to prevent a RLN ZK proof generated for one application to be re-used in another one.
|
||||
|
||||
```rust
|
||||
// We generate epoch from a date seed and we ensure is
|
||||
// mapped to a field element by hashing-to-field its content
|
||||
let epoch = hash_to_field(b"Today at noon, this year");
|
||||
// We generate rln_identifier from a date seed and we ensure is
|
||||
// mapped to a field element by hashing-to-field its content
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
```
|
||||
|
||||
### Set signal
|
||||
|
||||
The signal is the message for which we are computing a RLN proof.
|
||||
|
||||
```rust
|
||||
// We set our signal
|
||||
let signal = b"RLN is awesome";
|
||||
```
|
||||
|
||||
### Generate a RLN proof
|
||||
|
||||
We prepare the input to the proof generation routine.
|
||||
|
||||
Input buffer is serialized as `[ identity_key | id_index | external_nullifier | user_message_limit | message_id | signal_len | signal ]`.
|
||||
|
||||
```rust
|
||||
// We prepare input to the proof generation routine
|
||||
let proof_input = prepare_prove_input(identity_secret_hash, id_index, external_nullifier, signal);
|
||||
```
|
||||
|
||||
We are now ready to generate a RLN ZK proof along with the _public outputs_ of the ZK circuit evaluation.
|
||||
|
||||
```rust
|
||||
|
||||
// We generate a RLN proof for proof_input
|
||||
let mut in_buffer = Cursor::new(proof_input);
|
||||
let mut out_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut in_buffer, &mut out_buffer)
|
||||
.unwrap();
|
||||
|
||||
// We get the public outputs returned by the circuit evaluation
|
||||
let proof_data = out_buffer.into_inner();
|
||||
```
|
||||
|
||||
The byte vector `proof_data` is serialized as `[ zk-proof | tree_root | external_nullifier | share_x | share_y | nullifier ]`.
|
||||
|
||||
### Verify a RLN proof
|
||||
|
||||
We prepare the input to the proof verification routine.
|
||||
|
||||
Input buffer is serialized as `[proof_data | signal_len | signal ]`, where `proof_data` is (computed as) the output obtained by `generate_rln_proof`.
|
||||
|
||||
```rust
|
||||
// We prepare input to the proof verification routine
|
||||
let verify_data = prepare_verify_input(proof_data, signal);
|
||||
|
||||
// We verify the zk-proof against the provided proof values
|
||||
let mut in_buffer = Cursor::new(verify_data);
|
||||
let verified = rln.verify(&mut in_buffer).unwrap();
|
||||
```
|
||||
|
||||
We check if the proof verification was successful:
|
||||
|
||||
```rust
|
||||
// We ensure the proof is valid
|
||||
assert!(verified);
|
||||
```
|
||||
|
||||
## Get involved!
|
||||
|
||||
Zerokit RLN public and FFI APIs allow interaction with many more features than what briefly showcased above.
|
||||
|
||||
We invite you to check our API documentation by running
|
||||
|
||||
```rust
|
||||
cargo doc --no-deps
|
||||
```
|
||||
|
||||
and look at unit tests to have an hint on how to interface and use them.
|
||||
|
||||
14
rln/benches/circuit_loading_benchmark.rs
Normal file
14
rln/benches/circuit_loading_benchmark.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
// Depending on the key type (enabled by the `--features arkzkey` flag)
|
||||
// the upload speed from the `rln_final.zkey` or `rln_final.arkzkey` file is calculated
|
||||
pub fn key_load_benchmark(c: &mut Criterion) {
|
||||
c.bench_function("zkey::upload_from_folder", |b| {
|
||||
b.iter(|| {
|
||||
let _ = rln::circuit::zkey_from_folder();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, key_load_benchmark);
|
||||
criterion_main!(benches);
|
||||
56
rln/benches/pmtree_benchmark.rs
Normal file
56
rln/benches/pmtree_benchmark.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use rln::{circuit::Fr, pm_tree_adapter::PmTree};
|
||||
use utils::ZerokitMerkleTree;
|
||||
|
||||
pub fn pmtree_benchmark(c: &mut Criterion) {
|
||||
let mut tree = PmTree::default(2).unwrap();
|
||||
|
||||
let leaves: Vec<Fr> = (0..4).map(|s| Fr::from(s)).collect();
|
||||
|
||||
c.bench_function("Pmtree::set", |b| {
|
||||
b.iter(|| {
|
||||
tree.set(0, leaves[0]).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree:delete", |b| {
|
||||
b.iter(|| {
|
||||
tree.delete(0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree::override_range", |b| {
|
||||
b.iter(|| {
|
||||
tree.override_range(0, leaves.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree::compute_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.compute_root().unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree::get", |b| {
|
||||
b.iter(|| {
|
||||
tree.get(0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
// check intermediate node getter which required additional computation of sub root index
|
||||
c.bench_function("Pmtree::get_subtree_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.get_subtree_root(1, 0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree::get_empty_leaves_indices", |b| {
|
||||
b.iter(|| {
|
||||
tree.get_empty_leaves_indices();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, pmtree_benchmark);
|
||||
criterion_main!(benches);
|
||||
79
rln/benches/poseidon_tree_benchmark.rs
Normal file
79
rln/benches/poseidon_tree_benchmark.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use rln::{
|
||||
circuit::{Fr, TEST_TREE_HEIGHT},
|
||||
hashers::PoseidonHash,
|
||||
};
|
||||
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleTree};
|
||||
|
||||
pub fn get_leaves(n: u32) -> Vec<Fr> {
|
||||
(0..n).map(|s| Fr::from(s)).collect()
|
||||
}
|
||||
|
||||
pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
c.bench_function("OptimalMerkleTree::<Poseidon>::full_height_gen", |b| {
|
||||
b.iter(|| {
|
||||
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
let mut group = c.benchmark_group("Set");
|
||||
for &n in [1u32, 10, 100].iter() {
|
||||
let leaves = get_leaves(n);
|
||||
|
||||
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
group.bench_function(
|
||||
BenchmarkId::new("OptimalMerkleTree::<Poseidon>::set", n),
|
||||
|b| {
|
||||
b.iter(|| {
|
||||
for (i, l) in leaves.iter().enumerate() {
|
||||
let _ = tree.set(i, *l);
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_function(
|
||||
BenchmarkId::new("OptimalMerkleTree::<Poseidon>::set_range", n),
|
||||
|b| b.iter(|| tree.set_range(0, leaves.iter().cloned())),
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
c.bench_function("FullMerkleTree::<Poseidon>::full_height_gen", |b| {
|
||||
b.iter(|| {
|
||||
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
let mut group = c.benchmark_group("Set");
|
||||
for &n in [1u32, 10, 100].iter() {
|
||||
let leaves = get_leaves(n);
|
||||
|
||||
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
group.bench_function(
|
||||
BenchmarkId::new("FullMerkleTree::<Poseidon>::set", n),
|
||||
|b| {
|
||||
b.iter(|| {
|
||||
for (i, l) in leaves.iter().enumerate() {
|
||||
let _ = tree.set(i, *l);
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
group.bench_function(
|
||||
BenchmarkId::new("FullMerkleTree::<Poseidon>::set_range", n),
|
||||
|b| b.iter(|| tree.set_range(0, leaves.iter().cloned())),
|
||||
);
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
optimal_merkle_tree_poseidon_benchmark,
|
||||
full_merkle_tree_poseidon_benchmark
|
||||
);
|
||||
criterion_main!(benches);
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,119 +0,0 @@
|
||||
{
|
||||
"protocol": "groth16",
|
||||
"curve": "bn128",
|
||||
"nPublic": 6,
|
||||
"vk_alpha_1": [
|
||||
"1805378556360488226980822394597799963030511477964155500103132920745199284516",
|
||||
"11990395240534218699464972016456017378439762088320057798320175886595281336136",
|
||||
"1"
|
||||
],
|
||||
"vk_beta_2": [
|
||||
[
|
||||
"11031529986141021025408838211017932346992429731488270384177563837022796743627",
|
||||
"16042159910707312759082561183373181639420894978640710177581040523252926273854"
|
||||
],
|
||||
[
|
||||
"20112698439519222240302944148895052359035104222313380895334495118294612255131",
|
||||
"19441583024670359810872018179190533814486480928824742448673677460151702019379"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
"0"
|
||||
]
|
||||
],
|
||||
"vk_gamma_2": [
|
||||
[
|
||||
"10857046999023057135944570762232829481370756359578518086990519993285655852781",
|
||||
"11559732032986387107991004021392285783925812861821192530917403151452391805634"
|
||||
],
|
||||
[
|
||||
"8495653923123431417604973247489272438418190587263600148770280649306958101930",
|
||||
"4082367875863433681332203403145435568316851327593401208105741076214120093531"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
"0"
|
||||
]
|
||||
],
|
||||
"vk_delta_2": [
|
||||
[
|
||||
"1342791402398183550129987853701397066695422166542200371137242980909975744720",
|
||||
"19885954793721639146517398722913034453263197732511169431324269951156805454588"
|
||||
],
|
||||
[
|
||||
"16612518449808520746616592899100682320852224744311197908486719118388461103870",
|
||||
"13039435290897389787786546960964558630619663289413586834851804020863949546009"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
"0"
|
||||
]
|
||||
],
|
||||
"vk_alphabeta_12": [
|
||||
[
|
||||
[
|
||||
"5151991366823434428398919091000210787450832786814248297320989361921939794156",
|
||||
"15735191313289001022885148627913534790382722933676436876510746491415970766821"
|
||||
],
|
||||
[
|
||||
"3387907257437913904447588318761906430938415556102110876587455322225272831272",
|
||||
"1998779853452712881084781956683721603875246565720647583735935725110674288056"
|
||||
],
|
||||
[
|
||||
"14280074182991498185075387990446437410077692353432005297922275464876153151820",
|
||||
"17092408446352310039633488224969232803092763095456307462247653153107223117633"
|
||||
]
|
||||
],
|
||||
[
|
||||
[
|
||||
"4359046709531668109201634396816565829237358165496082832279660960675584351266",
|
||||
"4511888308846208349307186938266411423935335853916317436093178288331845821336"
|
||||
],
|
||||
[
|
||||
"11429499807090785857812316277335883295048773373068683863667725283965356423273",
|
||||
"16232274853200678548795010078253506586114563833318973594428907292096178657392"
|
||||
],
|
||||
[
|
||||
"18068999605870933925311275504102553573815570223888590384919752303726860800970",
|
||||
"17309569111965782732372130116757295842160193489132771344011460471298173784984"
|
||||
]
|
||||
]
|
||||
],
|
||||
"IC": [
|
||||
[
|
||||
"15907620619058468322652190166474219459106695372760190199814463422116003944385",
|
||||
"15752765921940703867480319151728055971288798043197983667046402260506178676501",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"12004081423498474638814710157503496372594892372197913146719480190853290407272",
|
||||
"17759993271504587923309435837545182941635937261719294500288793819648071033469",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"878120019311612655450010384994897394984265086410869146105626241891073100410",
|
||||
"17631186298933191134732246976686754514124819009836710500647157641262968661294",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"14710016919630225372037989028011020715054625029990218653012745498368446893907",
|
||||
"2581293501049347486538806758240731445964309309490885835380825245889909387041",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"766327921864693063481261933507417084013182964450768912480746815296334678928",
|
||||
"18104222034822903557262264275808261481286672296559910954337205847153944954509",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"8877686447180479408315100041907552504213694351585462004774320248566787828012",
|
||||
"15836202093850379814510995758762098170932781831518064786308541653541698178373",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"19567388833538990982537236781224917793757180861915757860561618079730704818311",
|
||||
"3535132838196675082818592669173684593624477421910576112671761297886253127546",
|
||||
"1"
|
||||
]
|
||||
]
|
||||
}
|
||||
Binary file not shown.
BIN
rln/resources/tree_height_20/rln_final.arkzkey
Normal file
BIN
rln/resources/tree_height_20/rln_final.arkzkey
Normal file
Binary file not shown.
Binary file not shown.
@@ -1,20 +1,20 @@
|
||||
{
|
||||
"protocol": "groth16",
|
||||
"curve": "bn128",
|
||||
"nPublic": 6,
|
||||
"nPublic": 5,
|
||||
"vk_alpha_1": [
|
||||
"1805378556360488226980822394597799963030511477964155500103132920745199284516",
|
||||
"11990395240534218699464972016456017378439762088320057798320175886595281336136",
|
||||
"20491192805390485299153009773594534940189261866228447918068658471970481763042",
|
||||
"9383485363053290200918347156157836566562967994039712273449902621266178545958",
|
||||
"1"
|
||||
],
|
||||
"vk_beta_2": [
|
||||
[
|
||||
"11031529986141021025408838211017932346992429731488270384177563837022796743627",
|
||||
"16042159910707312759082561183373181639420894978640710177581040523252926273854"
|
||||
"6375614351688725206403948262868962793625744043794305715222011528459656738731",
|
||||
"4252822878758300859123897981450591353533073413197771768651442665752259397132"
|
||||
],
|
||||
[
|
||||
"20112698439519222240302944148895052359035104222313380895334495118294612255131",
|
||||
"19441583024670359810872018179190533814486480928824742448673677460151702019379"
|
||||
"10505242626370262277552901082094356697409835680220590971873171140371331206856",
|
||||
"21847035105528745403288232691147584728191162732299865338377159692350059136679"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
@@ -37,12 +37,12 @@
|
||||
],
|
||||
"vk_delta_2": [
|
||||
[
|
||||
"1948496782571164085469528023647105317580208688174386157591917599801657832035",
|
||||
"20445814069256658101339037520922621162739470138213615104905368409238414511981"
|
||||
"17077735495685170943380938230836408503627170115414840315502244846025577589191",
|
||||
"14030085636943255545683322474441991939484590437387381169642530788494152024614"
|
||||
],
|
||||
[
|
||||
"10024680869920840984813249386422727863826862577760330492647062850849851925340",
|
||||
"10512156247842686783409460795717734694774542185222602679117887145206209285142"
|
||||
"11568745146423307387256571230823432454624378106569286849514884592874522611163",
|
||||
"1838524899938769516485895655063198583192139511330418290063560641219523306282"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
@@ -52,67 +52,62 @@
|
||||
"vk_alphabeta_12": [
|
||||
[
|
||||
[
|
||||
"5151991366823434428398919091000210787450832786814248297320989361921939794156",
|
||||
"15735191313289001022885148627913534790382722933676436876510746491415970766821"
|
||||
"2029413683389138792403550203267699914886160938906632433982220835551125967885",
|
||||
"21072700047562757817161031222997517981543347628379360635925549008442030252106"
|
||||
],
|
||||
[
|
||||
"3387907257437913904447588318761906430938415556102110876587455322225272831272",
|
||||
"1998779853452712881084781956683721603875246565720647583735935725110674288056"
|
||||
"5940354580057074848093997050200682056184807770593307860589430076672439820312",
|
||||
"12156638873931618554171829126792193045421052652279363021382169897324752428276"
|
||||
],
|
||||
[
|
||||
"14280074182991498185075387990446437410077692353432005297922275464876153151820",
|
||||
"17092408446352310039633488224969232803092763095456307462247653153107223117633"
|
||||
"7898200236362823042373859371574133993780991612861777490112507062703164551277",
|
||||
"7074218545237549455313236346927434013100842096812539264420499035217050630853"
|
||||
]
|
||||
],
|
||||
[
|
||||
[
|
||||
"4359046709531668109201634396816565829237358165496082832279660960675584351266",
|
||||
"4511888308846208349307186938266411423935335853916317436093178288331845821336"
|
||||
"7077479683546002997211712695946002074877511277312570035766170199895071832130",
|
||||
"10093483419865920389913245021038182291233451549023025229112148274109565435465"
|
||||
],
|
||||
[
|
||||
"11429499807090785857812316277335883295048773373068683863667725283965356423273",
|
||||
"16232274853200678548795010078253506586114563833318973594428907292096178657392"
|
||||
"4595479056700221319381530156280926371456704509942304414423590385166031118820",
|
||||
"19831328484489333784475432780421641293929726139240675179672856274388269393268"
|
||||
],
|
||||
[
|
||||
"18068999605870933925311275504102553573815570223888590384919752303726860800970",
|
||||
"17309569111965782732372130116757295842160193489132771344011460471298173784984"
|
||||
"11934129596455521040620786944827826205713621633706285934057045369193958244500",
|
||||
"8037395052364110730298837004334506829870972346962140206007064471173334027475"
|
||||
]
|
||||
]
|
||||
],
|
||||
"IC": [
|
||||
[
|
||||
"18693301901828818437917730940595978397160482710354161265484535387752523310572",
|
||||
"17985273354976640088538673802000794244421192643855111089693820179790551470769",
|
||||
"4920513730204767532050733107749276406754520419375654722016092399980613788208",
|
||||
"10950491564509418434657706642388934308456795265036074733953533582377584967294",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"21164641723988537620541455173278629777250883365474191521194244273980931825942",
|
||||
"998385854410718613441067082771678946155853656328717326195057262123686425518",
|
||||
"6815064660695497986531118446154820702646540722664044216580897159556261271171",
|
||||
"17838140936832571103329556013529166877877534025488014783346458943575275015438",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"21666968581672145768705229094968410656430989593283335488162701230986314747515",
|
||||
"17996457608540683483506630273632100555125353447506062045735279661096094677264",
|
||||
"16364982450206976302246609763791333525052810246590359380676749324389440643932",
|
||||
"17092624338100676284548565502349491320314889021833923882585524649862570629227",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"20137761979695192602424300886442379728165712610493092740175904438282083668117",
|
||||
"19184814924890679891263780109959113289320127263583260218200636509492157834679",
|
||||
"3679639231485547795420532910726924727560917141402837495597760107842698404034",
|
||||
"16213191511474848247596810551723578773353083440353745908057321946068926848382",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"10943171273393803842589314082509655332154393332394322726077270895078286354146",
|
||||
"10872472035685319847811233167729172672344935625121511932198535224727331126439",
|
||||
"9215428431027260354679105025212521481930206886203677270216204485256690813172",
|
||||
"934602510541226149881779979217731465262250233587980565969044391353665291792",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"13049169779481227658517545034348883391527506091990880778783387628208561946597",
|
||||
"10083689369261379027228809473568899816311684698866922944902456565434209079955",
|
||||
"1"
|
||||
],
|
||||
[
|
||||
"19633516378466409167014413361365552102431118630694133723053441455184566611083",
|
||||
"8059525100726933978719058611146131904598011633549012007359165766216730722269",
|
||||
"8935861545794299876685457331391349387048184820319250771243971382360441890897",
|
||||
"4993459033694759724715904486381952906869986989682015547152342336961693234616",
|
||||
"1"
|
||||
]
|
||||
]
|
||||
|
||||
@@ -4,33 +4,44 @@ use ark_bn254::{
|
||||
Bn254, Fq as ArkFq, Fq2 as ArkFq2, Fr as ArkFr, G1Affine as ArkG1Affine,
|
||||
G1Projective as ArkG1Projective, G2Affine as ArkG2Affine, G2Projective as ArkG2Projective,
|
||||
};
|
||||
use ark_circom::{read_zkey, WitnessCalculator};
|
||||
use ark_groth16::{ProvingKey, VerifyingKey};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use cfg_if::cfg_if;
|
||||
use color_eyre::{Report, Result};
|
||||
use num_bigint::BigUint;
|
||||
use once_cell::sync::OnceCell;
|
||||
use serde_json::Value;
|
||||
use std::fs::File;
|
||||
use std::io::{Cursor, Error, ErrorKind, Result};
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Mutex;
|
||||
use wasmer::{Module, Store};
|
||||
|
||||
const ZKEY_FILENAME: &str = "rln_final.zkey";
|
||||
const VK_FILENAME: &str = "verifying_key.json";
|
||||
const WASM_FILENAME: &str = "rln.wasm";
|
||||
cfg_if! {
|
||||
if #[cfg(not(target_arch = "wasm32"))] {
|
||||
use ark_circom::{WitnessCalculator};
|
||||
use once_cell::sync::OnceCell;
|
||||
use std::sync::Mutex;
|
||||
use wasmer::{Module, Store};
|
||||
use include_dir::{include_dir, Dir};
|
||||
use std::path::Path;
|
||||
}
|
||||
}
|
||||
|
||||
// These parameters are used for tests
|
||||
// Note that the circuit and keys in TEST_RESOURCES_FOLDER are compiled for Merkle trees of height 15, 19 and 20
|
||||
// Changing these parameters to other values than these defaults will cause zkSNARK proof verification to fail
|
||||
pub const TEST_PARAMETERS_INDEX: usize = 2;
|
||||
pub const TEST_TREE_HEIGHT: usize = [15, 19, 20][TEST_PARAMETERS_INDEX];
|
||||
pub const TEST_RESOURCES_FOLDER: &str = [
|
||||
"./resources/tree_height_15/",
|
||||
"./resources/tree_height_19/",
|
||||
"./resources/tree_height_20/",
|
||||
][TEST_PARAMETERS_INDEX];
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "arkzkey")] {
|
||||
use ark_zkey::read_arkzkey_from_bytes;
|
||||
const ARKZKEY_FILENAME: &str = "tree_height_20/rln_final.arkzkey";
|
||||
|
||||
} else {
|
||||
use std::io::Cursor;
|
||||
use ark_circom::read_zkey;
|
||||
}
|
||||
}
|
||||
|
||||
const ZKEY_FILENAME: &str = "tree_height_20/rln_final.zkey";
|
||||
const VK_FILENAME: &str = "tree_height_20/verification_key.json";
|
||||
const WASM_FILENAME: &str = "tree_height_20/rln.wasm";
|
||||
|
||||
pub const TEST_TREE_HEIGHT: usize = 20;
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
static RESOURCES_DIR: Dir<'_> = include_dir!("$CARGO_MANIFEST_DIR/resources");
|
||||
|
||||
// The following types define the pairing friendly elliptic curve, the underlying finite fields and groups default to this module
|
||||
// Note that proofs are serialized assuming Fr to be 4x8 = 32 bytes in size. Hence, changing to a curve with different encoding will make proof verification to fail
|
||||
@@ -46,194 +57,228 @@ pub type G2Projective = ArkG2Projective;
|
||||
// Loads the proving key using a bytes vector
|
||||
pub fn zkey_from_raw(zkey_data: &Vec<u8>) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
if !zkey_data.is_empty() {
|
||||
let mut c = Cursor::new(zkey_data);
|
||||
let proving_key_and_matrices = read_zkey(&mut c)?;
|
||||
let proving_key_and_matrices = match () {
|
||||
#[cfg(feature = "arkzkey")]
|
||||
() => read_arkzkey_from_bytes(zkey_data.as_slice())?,
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
() => {
|
||||
let mut c = Cursor::new(zkey_data);
|
||||
read_zkey(&mut c)?
|
||||
}
|
||||
};
|
||||
Ok(proving_key_and_matrices)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::NotFound, "No proving key found!"))
|
||||
Err(Report::msg("No proving key found!"))
|
||||
}
|
||||
}
|
||||
|
||||
// Loads the proving key
|
||||
pub fn zkey_from_folder(
|
||||
resources_folder: &str,
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
let zkey_path = format!("{resources_folder}{ZKEY_FILENAME}");
|
||||
if Path::new(&zkey_path).exists() {
|
||||
let mut file = File::open(&zkey_path)?;
|
||||
let proving_key_and_matrices = read_zkey(&mut file)?;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn zkey_from_folder() -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
#[cfg(feature = "arkzkey")]
|
||||
let zkey = RESOURCES_DIR.get_file(Path::new(ARKZKEY_FILENAME));
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
let zkey = RESOURCES_DIR.get_file(Path::new(ZKEY_FILENAME));
|
||||
|
||||
if let Some(zkey) = zkey {
|
||||
let proving_key_and_matrices = match () {
|
||||
#[cfg(feature = "arkzkey")]
|
||||
() => read_arkzkey_from_bytes(zkey.contents())?,
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
() => {
|
||||
let mut c = Cursor::new(zkey.contents());
|
||||
read_zkey(&mut c)?
|
||||
}
|
||||
};
|
||||
Ok(proving_key_and_matrices)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::NotFound, "No proving key found!"))
|
||||
Err(Report::msg("No proving key found!"))
|
||||
}
|
||||
}
|
||||
|
||||
// Loads the verification key from a bytes vector
|
||||
pub fn vk_from_raw(vk_data: &Vec<u8>, zkey_data: &Vec<u8>) -> Result<VerifyingKey<Curve>> {
|
||||
pub fn vk_from_raw(vk_data: &[u8], zkey_data: &Vec<u8>) -> Result<VerifyingKey<Curve>> {
|
||||
let verifying_key: VerifyingKey<Curve>;
|
||||
|
||||
if !vk_data.is_empty() {
|
||||
verifying_key = vk_from_vector(vk_data);
|
||||
verifying_key = vk_from_vector(vk_data)?;
|
||||
Ok(verifying_key)
|
||||
} else if !zkey_data.is_empty() {
|
||||
let (proving_key, _matrices) = zkey_from_raw(zkey_data)?;
|
||||
verifying_key = proving_key.vk;
|
||||
Ok(verifying_key)
|
||||
} else {
|
||||
Err(Error::new(
|
||||
ErrorKind::NotFound,
|
||||
"No proving/verification key found!",
|
||||
))
|
||||
Err(Report::msg("No proving/verification key found!"))
|
||||
}
|
||||
}
|
||||
|
||||
// Loads the verification key
|
||||
pub fn vk_from_folder(resources_folder: &str) -> Result<VerifyingKey<Curve>> {
|
||||
let vk_path = format!("{resources_folder}{VK_FILENAME}");
|
||||
let zkey_path = format!("{resources_folder}{ZKEY_FILENAME}");
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn vk_from_folder() -> Result<VerifyingKey<Curve>> {
|
||||
let vk = RESOURCES_DIR.get_file(Path::new(VK_FILENAME));
|
||||
let zkey = RESOURCES_DIR.get_file(Path::new(ZKEY_FILENAME));
|
||||
|
||||
let verifying_key: VerifyingKey<Curve>;
|
||||
|
||||
if Path::new(&vk_path).exists() {
|
||||
verifying_key = vk_from_json(&vk_path);
|
||||
if let Some(vk) = vk {
|
||||
verifying_key = vk_from_json(vk.contents_utf8().ok_or(Report::msg(
|
||||
"Could not read verification key from JSON file!",
|
||||
))?)?;
|
||||
Ok(verifying_key)
|
||||
} else if Path::new(&zkey_path).exists() {
|
||||
let (proving_key, _matrices) = zkey_from_folder(resources_folder)?;
|
||||
} else if let Some(_zkey) = zkey {
|
||||
let (proving_key, _matrices) = zkey_from_folder()?;
|
||||
verifying_key = proving_key.vk;
|
||||
Ok(verifying_key)
|
||||
} else {
|
||||
Err(Error::new(
|
||||
ErrorKind::NotFound,
|
||||
"No proving/verification key found!",
|
||||
))
|
||||
Err(Report::msg("No proving/verification key found!"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
static WITNESS_CALCULATOR: OnceCell<Mutex<WitnessCalculator>> = OnceCell::new();
|
||||
|
||||
// Initializes the witness calculator using a bytes vector
|
||||
pub fn circom_from_raw(wasm_buffer: Vec<u8>) -> &'static Mutex<WitnessCalculator> {
|
||||
WITNESS_CALCULATOR.get_or_init(|| {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn circom_from_raw(wasm_buffer: Vec<u8>) -> Result<&'static Mutex<WitnessCalculator>> {
|
||||
WITNESS_CALCULATOR.get_or_try_init(|| {
|
||||
let store = Store::default();
|
||||
let module = Module::new(&store, wasm_buffer).unwrap();
|
||||
let result =
|
||||
WitnessCalculator::from_module(module).expect("Failed to create witness calculator");
|
||||
Mutex::new(result)
|
||||
let module = Module::new(&store, wasm_buffer)?;
|
||||
let result = WitnessCalculator::from_module(module)?;
|
||||
Ok::<Mutex<WitnessCalculator>, Report>(Mutex::new(result))
|
||||
})
|
||||
}
|
||||
|
||||
// Initializes the witness calculator
|
||||
pub fn circom_from_folder(resources_folder: &str) -> &'static Mutex<WitnessCalculator> {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn circom_from_folder() -> Result<&'static Mutex<WitnessCalculator>> {
|
||||
// We read the wasm file
|
||||
let wasm_path = format!("{resources_folder}{WASM_FILENAME}");
|
||||
let wasm_buffer = std::fs::read(&wasm_path).unwrap();
|
||||
circom_from_raw(wasm_buffer)
|
||||
let wasm = RESOURCES_DIR.get_file(Path::new(WASM_FILENAME));
|
||||
|
||||
if let Some(wasm) = wasm {
|
||||
let wasm_buffer = wasm.contents();
|
||||
circom_from_raw(wasm_buffer.to_vec())
|
||||
} else {
|
||||
Err(Report::msg("No wasm file found!"))
|
||||
}
|
||||
}
|
||||
|
||||
// The following function implementations are taken/adapted from https://github.com/gakonst/ark-circom/blob/1732e15d6313fe176b0b1abb858ac9e095d0dbd7/src/zkey.rs
|
||||
|
||||
// Utilities to convert a json verification key in a groth16::VerificationKey
|
||||
fn fq_from_str(s: &str) -> Fq {
|
||||
Fq::try_from(BigUint::from_str(s).unwrap()).unwrap()
|
||||
fn fq_from_str(s: &str) -> Result<Fq> {
|
||||
Ok(Fq::from(BigUint::from_str(s)?))
|
||||
}
|
||||
|
||||
// Extracts the element in G1 corresponding to its JSON serialization
|
||||
fn json_to_g1(json: &Value, key: &str) -> G1Affine {
|
||||
fn json_to_g1(json: &Value, key: &str) -> Result<G1Affine> {
|
||||
let els: Vec<String> = json
|
||||
.get(key)
|
||||
.unwrap()
|
||||
.ok_or(Report::msg("no json value"))?
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.ok_or(Report::msg("value not an array"))?
|
||||
.iter()
|
||||
.map(|i| i.as_str().unwrap().to_string())
|
||||
.collect();
|
||||
G1Affine::from(G1Projective::new(
|
||||
fq_from_str(&els[0]),
|
||||
fq_from_str(&els[1]),
|
||||
fq_from_str(&els[2]),
|
||||
))
|
||||
.map(|i| i.as_str().ok_or(Report::msg("element is not a string")))
|
||||
.map(|x| x.map(|v| v.to_owned()))
|
||||
.collect::<Result<Vec<String>>>()?;
|
||||
|
||||
Ok(G1Affine::from(G1Projective::new(
|
||||
fq_from_str(&els[0])?,
|
||||
fq_from_str(&els[1])?,
|
||||
fq_from_str(&els[2])?,
|
||||
)))
|
||||
}
|
||||
|
||||
// Extracts the vector of G1 elements corresponding to its JSON serialization
|
||||
fn json_to_g1_vec(json: &Value, key: &str) -> Vec<G1Affine> {
|
||||
fn json_to_g1_vec(json: &Value, key: &str) -> Result<Vec<G1Affine>> {
|
||||
let els: Vec<Vec<String>> = json
|
||||
.get(key)
|
||||
.unwrap()
|
||||
.ok_or(Report::msg("no json value"))?
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.ok_or(Report::msg("value not an array"))?
|
||||
.iter()
|
||||
.map(|i| {
|
||||
i.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|x| x.as_str().unwrap().to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.ok_or(Report::msg("element is not an array"))
|
||||
.and_then(|array| {
|
||||
array
|
||||
.iter()
|
||||
.map(|x| x.as_str().ok_or(Report::msg("element is not a string")))
|
||||
.map(|x| x.map(|v| v.to_owned()))
|
||||
.collect::<Result<Vec<String>>>()
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
.collect::<Result<Vec<Vec<String>>>>()?;
|
||||
|
||||
els.iter()
|
||||
.map(|coords| {
|
||||
G1Affine::from(G1Projective::new(
|
||||
fq_from_str(&coords[0]),
|
||||
fq_from_str(&coords[1]),
|
||||
fq_from_str(&coords[2]),
|
||||
))
|
||||
})
|
||||
.collect()
|
||||
let mut res = vec![];
|
||||
for coords in els {
|
||||
res.push(G1Affine::from(G1Projective::new(
|
||||
fq_from_str(&coords[0])?,
|
||||
fq_from_str(&coords[1])?,
|
||||
fq_from_str(&coords[2])?,
|
||||
)))
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// Extracts the element in G2 corresponding to its JSON serialization
|
||||
fn json_to_g2(json: &Value, key: &str) -> G2Affine {
|
||||
fn json_to_g2(json: &Value, key: &str) -> Result<G2Affine> {
|
||||
let els: Vec<Vec<String>> = json
|
||||
.get(key)
|
||||
.unwrap()
|
||||
.ok_or(Report::msg("no json value"))?
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.ok_or(Report::msg("value not an array"))?
|
||||
.iter()
|
||||
.map(|i| {
|
||||
i.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|x| x.as_str().unwrap().to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.ok_or(Report::msg("element is not an array"))
|
||||
.and_then(|array| {
|
||||
array
|
||||
.iter()
|
||||
.map(|x| x.as_str().ok_or(Report::msg("element is not a string")))
|
||||
.map(|x| x.map(|v| v.to_owned()))
|
||||
.collect::<Result<Vec<String>>>()
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
.collect::<Result<Vec<Vec<String>>>>()?;
|
||||
|
||||
let x = Fq2::new(fq_from_str(&els[0][0]), fq_from_str(&els[0][1]));
|
||||
let y = Fq2::new(fq_from_str(&els[1][0]), fq_from_str(&els[1][1]));
|
||||
let z = Fq2::new(fq_from_str(&els[2][0]), fq_from_str(&els[2][1]));
|
||||
G2Affine::from(G2Projective::new(x, y, z))
|
||||
let x = Fq2::new(fq_from_str(&els[0][0])?, fq_from_str(&els[0][1])?);
|
||||
let y = Fq2::new(fq_from_str(&els[1][0])?, fq_from_str(&els[1][1])?);
|
||||
let z = Fq2::new(fq_from_str(&els[2][0])?, fq_from_str(&els[2][1])?);
|
||||
Ok(G2Affine::from(G2Projective::new(x, y, z)))
|
||||
}
|
||||
|
||||
// Converts JSON to a VerifyingKey
|
||||
fn to_verifying_key(json: serde_json::Value) -> VerifyingKey<Curve> {
|
||||
VerifyingKey {
|
||||
alpha_g1: json_to_g1(&json, "vk_alpha_1"),
|
||||
beta_g2: json_to_g2(&json, "vk_beta_2"),
|
||||
gamma_g2: json_to_g2(&json, "vk_gamma_2"),
|
||||
delta_g2: json_to_g2(&json, "vk_delta_2"),
|
||||
gamma_abc_g1: json_to_g1_vec(&json, "IC"),
|
||||
}
|
||||
fn to_verifying_key(json: serde_json::Value) -> Result<VerifyingKey<Curve>> {
|
||||
Ok(VerifyingKey {
|
||||
alpha_g1: json_to_g1(&json, "vk_alpha_1")?,
|
||||
beta_g2: json_to_g2(&json, "vk_beta_2")?,
|
||||
gamma_g2: json_to_g2(&json, "vk_gamma_2")?,
|
||||
delta_g2: json_to_g2(&json, "vk_delta_2")?,
|
||||
gamma_abc_g1: json_to_g1_vec(&json, "IC")?,
|
||||
})
|
||||
}
|
||||
|
||||
// Computes the verification key from its JSON serialization
|
||||
fn vk_from_json(vk_path: &str) -> VerifyingKey<Curve> {
|
||||
let json = std::fs::read_to_string(vk_path).unwrap();
|
||||
let json: Value = serde_json::from_str(&json).unwrap();
|
||||
|
||||
fn vk_from_json(vk: &str) -> Result<VerifyingKey<Curve>> {
|
||||
let json: Value = serde_json::from_str(vk)?;
|
||||
to_verifying_key(json)
|
||||
}
|
||||
|
||||
// Computes the verification key from a bytes vector containing its JSON serialization
|
||||
fn vk_from_vector(vk: &[u8]) -> VerifyingKey<Curve> {
|
||||
let json = String::from_utf8(vk.to_vec()).expect("Found invalid UTF-8");
|
||||
let json: Value = serde_json::from_str(&json).unwrap();
|
||||
fn vk_from_vector(vk: &[u8]) -> Result<VerifyingKey<Curve>> {
|
||||
let json = String::from_utf8(vk.to_vec())?;
|
||||
let json: Value = serde_json::from_str(&json)?;
|
||||
|
||||
to_verifying_key(json)
|
||||
}
|
||||
|
||||
// Checks verification key to be correct with respect to proving key
|
||||
pub fn check_vk_from_zkey(resources_folder: &str, verifying_key: VerifyingKey<Curve>) {
|
||||
let (proving_key, _matrices) = zkey_from_folder(resources_folder).unwrap();
|
||||
assert_eq!(proving_key.vk, verifying_key);
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn check_vk_from_zkey(verifying_key: VerifyingKey<Curve>) -> Result<()> {
|
||||
let (proving_key, _matrices) = zkey_from_folder()?;
|
||||
if proving_key.vk == verifying_key {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Report::msg("verifying_keys are not equal"))
|
||||
}
|
||||
}
|
||||
|
||||
973
rln/src/ffi.rs
973
rln/src/ffi.rs
File diff suppressed because it is too large
Load Diff
58
rln/src/hashers.rs
Normal file
58
rln/src/hashers.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
/// This crate instantiates the Poseidon hash algorithm.
|
||||
use crate::{circuit::Fr, utils::bytes_le_to_fr};
|
||||
use once_cell::sync::Lazy;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
use utils::poseidon::Poseidon;
|
||||
|
||||
/// These indexed constants hardcode the supported round parameters tuples (t, RF, RN, SKIP_MATRICES) for the Bn254 scalar field.
|
||||
/// SKIP_MATRICES is the index of the randomly generated secure MDS matrix.
|
||||
/// TODO: generate these parameters
|
||||
pub const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
|
||||
(2, 8, 56, 0),
|
||||
(3, 8, 57, 0),
|
||||
(4, 8, 56, 0),
|
||||
(5, 8, 60, 0),
|
||||
(6, 8, 60, 0),
|
||||
(7, 8, 63, 0),
|
||||
(8, 8, 64, 0),
|
||||
(9, 8, 63, 0),
|
||||
];
|
||||
|
||||
/// Poseidon Hash wrapper over above implementation.
|
||||
static POSEIDON: Lazy<Poseidon<Fr>> = Lazy::new(|| Poseidon::<Fr>::from(&ROUND_PARAMS));
|
||||
|
||||
pub fn poseidon_hash(input: &[Fr]) -> Fr {
|
||||
POSEIDON
|
||||
.hash(input.to_vec())
|
||||
.expect("hash with fixed input size can't fail")
|
||||
}
|
||||
|
||||
/// The zerokit RLN Merkle tree Hasher.
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PoseidonHash;
|
||||
|
||||
/// The default Hasher trait used by Merkle tree implementation in utils.
|
||||
impl utils::merkle_tree::Hasher for PoseidonHash {
|
||||
type Fr = Fr;
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
Self::Fr::from(0)
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
poseidon_hash(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Hashes arbitrary signal to the underlying prime field.
|
||||
pub fn hash_to_field(signal: &[u8]) -> Fr {
|
||||
// We hash the input signal using Keccak256
|
||||
let mut hash = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(signal);
|
||||
hasher.finalize(&mut hash);
|
||||
|
||||
// We export the hash as a field element
|
||||
let (el, _) = bytes_le_to_fr(hash.as_ref());
|
||||
el
|
||||
}
|
||||
431
rln/src/lib.rs
431
rln/src/lib.rs
@@ -1,432 +1,15 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod circuit;
|
||||
pub mod ffi;
|
||||
pub mod merkle_tree;
|
||||
pub mod poseidon_constants;
|
||||
pub mod poseidon_hash;
|
||||
pub mod hashers;
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
pub mod pm_tree_adapter;
|
||||
pub mod poseidon_tree;
|
||||
pub mod protocol;
|
||||
pub mod public;
|
||||
#[cfg(test)]
|
||||
pub mod public_api_tests;
|
||||
pub mod utils;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use crate::circuit::{
|
||||
circom_from_folder, vk_from_folder, zkey_from_folder, Fr, TEST_RESOURCES_FOLDER,
|
||||
TEST_TREE_HEIGHT,
|
||||
};
|
||||
use crate::poseidon_hash::poseidon_hash;
|
||||
use crate::poseidon_tree::PoseidonTree;
|
||||
use crate::protocol::*;
|
||||
use crate::utils::str_to_fr;
|
||||
|
||||
// Input generated with https://github.com/oskarth/zk-kit/commit/b6a872f7160c7c14e10a0ea40acab99cbb23c9a8
|
||||
const WITNESS_JSON_15: &str = r#"
|
||||
{
|
||||
"identity_secret": "12825549237505733615964533204745049909430608936689388901883576945030025938736",
|
||||
"path_elements": [
|
||||
"18622655742232062119094611065896226799484910997537830749762961454045300666333",
|
||||
"20590447254980891299813706518821659736846425329007960381537122689749540452732",
|
||||
"7423237065226347324353380772367382631490014989348495481811164164159255474657",
|
||||
"11286972368698509976183087595462810875513684078608517520839298933882497716792",
|
||||
"3607627140608796879659380071776844901612302623152076817094415224584923813162",
|
||||
"19712377064642672829441595136074946683621277828620209496774504837737984048981",
|
||||
"20775607673010627194014556968476266066927294572720319469184847051418138353016",
|
||||
"3396914609616007258851405644437304192397291162432396347162513310381425243293",
|
||||
"21551820661461729022865262380882070649935529853313286572328683688269863701601",
|
||||
"6573136701248752079028194407151022595060682063033565181951145966236778420039",
|
||||
"12413880268183407374852357075976609371175688755676981206018884971008854919922",
|
||||
"14271763308400718165336499097156975241954733520325982997864342600795471836726",
|
||||
"20066985985293572387227381049700832219069292839614107140851619262827735677018",
|
||||
"9394776414966240069580838672673694685292165040808226440647796406499139370960",
|
||||
"11331146992410411304059858900317123658895005918277453009197229807340014528524"
|
||||
],
|
||||
"identity_path_index": [
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"x": "8143228284048792769012135629627737459844825626241842423967352803501040982",
|
||||
"epoch": "0x0000005b612540fc986b42322f8cb91c2273afad58ed006fdba0c97b4b16b12f",
|
||||
"rln_identifier": "11412926387081627876309792396682864042420635853496105400039841573530884328439"
|
||||
}
|
||||
"#;
|
||||
|
||||
// Input generated with protocol::random_rln_witness
|
||||
const WITNESS_JSON_19: &str = r#"
|
||||
{
|
||||
"identity_secret": "922538810348594125658702672067738675294669207539999802857585668079702330450",
|
||||
"path_elements": [
|
||||
"16059714054680148404543504061485737353203416489071538960876865983954285286166",
|
||||
"3041470753871943901334053763207316028823782848445723460227667780327106380356",
|
||||
"2557297527793326315072058421057853700096944625924483912548759909801348042183",
|
||||
"6677578602456189582427063963562590713054668181987223110955234085327917303436",
|
||||
"2250827150965576973906150764756422151438812678308727218463995574869267980301",
|
||||
"1895457427602709606993445561553433669787657053834360973759981803464906070980",
|
||||
"11033689991077061346803816826729204895841441316315304395980565540264104346466",
|
||||
"18588752216879570844240300406954267039026327526134910835334500497981810174976",
|
||||
"19346480964028499661277403659363466542857230928032088490855656809181891953123",
|
||||
"21460193770370072688835316363068413651465631481105148051902686770759127189327",
|
||||
"20906347653364838502964722817589315918082261023317339146393355650507243340078",
|
||||
"13466599592974387800162739317046838825289754472645703919149409009404541432954",
|
||||
"9617165663598957201253074168824246164494443748556931540348223968573884172285",
|
||||
"6936463137584425684797785981770877165377386163416057257854261010817156666898",
|
||||
"369902028235468424790098825415813437044876310542601948037281422841675126849",
|
||||
"13510969869821080499683463562609720931680005714401083864659516045615497273644",
|
||||
"2567921390740781421487331055530491683313154421589525170472201828596388395736",
|
||||
"14360870889466292805403568662660511177232987619663547772298178013674025998478",
|
||||
"4735344599616284973799984501493858013178071155960162022656706545116168334293"
|
||||
],
|
||||
"identity_path_index": [
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"x": "6427050788896290028100534859169645070970780055911091444144195464808120686416",
|
||||
"epoch": "0x2bd155d9f85c741044da6909d144f9cc5ce8e0d545a9ed4921b156e8b8569bab",
|
||||
"rln_identifier": "2193983000213424579594329476781986065965849144986973472766961413131458022566"
|
||||
}
|
||||
"#;
|
||||
|
||||
const WITNESS_JSON_20: &str = r#"
|
||||
{
|
||||
"identity_secret": "13732353453861280511150022598793312186188599006979552959297495195757997428306",
|
||||
"path_elements": [
|
||||
"20463525608687844300981085488128968694844212760055234622292326942405619575964",
|
||||
"8040856403709217901175408904825741112286158901303127670929462145501210871313",
|
||||
"3776499751255585163563840252112871568402966629435152937692711318702338789837",
|
||||
"19415813252626942110541463414404411443562242499365750694284604341271149125679",
|
||||
"19414720788761208006634240390286942738242262010168559813148115573784354129237",
|
||||
"17680594732844291740094158892269696200077963275550625226493856898849422516043",
|
||||
"16009199741350632715210088346611798597033333293348807000623441780059543674510",
|
||||
"18743496911007535170857676824393811326863602477260615792503039058813338644738",
|
||||
"1029572792321380246989475723806770724699749375691788486434716005338938722216",
|
||||
"21713138150151063186050010182615713685603650963220209951496401043119768920892",
|
||||
"6713732504049401389983008178456811894856018247924860823028704114266363984580",
|
||||
"2746686888799473963221285145390361693256731812094259845879519459924507786594",
|
||||
"18620748467731297359505500266677881218553438497271819903304075323783392031715",
|
||||
"2446201221122671119406471414204229600430018713181038717206670749886932158104",
|
||||
"12037171942017611311954851302868199608036334625783560875426350283156617524597",
|
||||
"21798743392351780927808323348278035105395367759688979232116905142049921734349",
|
||||
"17450230289417496971557215666910229260621413088991137405744457922069827319039",
|
||||
"20936854099128086256353520300046664152516566958630447858438908748907198510485",
|
||||
"13513344965831154386658059617477268600255664386844920822248038939666265737046",
|
||||
"15546319496880899251450021422131511560001766832580480193115646510655765306630"
|
||||
|
||||
],
|
||||
"identity_path_index": [
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"x": "18073935665561339809445069958310044423750771681863480888589546877024349720547",
|
||||
"epoch": "0x147e4c23a43a1ddca78d94bcd28147f62ca74b3dc7e56bb0a314a954b9f0e567",
|
||||
"rln_identifier": "2193983000213424579594329476781986065965849144986973472766961413131458022566"
|
||||
}
|
||||
"#;
|
||||
|
||||
#[test]
|
||||
// We test Merkle tree generation, proofs and verification
|
||||
fn test_merkle_proof() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let leaf_index = 3;
|
||||
|
||||
// generate identity
|
||||
let identity_secret = hash_to_field(b"test-merkle-proof");
|
||||
let id_commitment = poseidon_hash(&vec![identity_secret]);
|
||||
|
||||
// generate merkle tree
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(tree_height, default_leaf);
|
||||
tree.set(leaf_index, id_commitment.into()).unwrap();
|
||||
|
||||
// We check correct computation of the root
|
||||
let root = tree.root();
|
||||
|
||||
if TEST_TREE_HEIGHT == 15 {
|
||||
assert_eq!(
|
||||
root,
|
||||
str_to_fr(
|
||||
"0x1984f2e01184aef5cb974640898a5f5c25556554e2b06d99d4841badb8b198cd",
|
||||
16
|
||||
)
|
||||
);
|
||||
} else if TEST_TREE_HEIGHT == 19 {
|
||||
assert_eq!(
|
||||
root,
|
||||
str_to_fr(
|
||||
"0x219ceb53f2b1b7a6cf74e80d50d44d68ecb4a53c6cc65b25593c8d56343fb1fe",
|
||||
16
|
||||
)
|
||||
);
|
||||
} else if TEST_TREE_HEIGHT == 20 {
|
||||
assert_eq!(
|
||||
root,
|
||||
str_to_fr(
|
||||
"0x21947ffd0bce0c385f876e7c97d6a42eec5b1fe935aab2f01c1f8a8cbcc356d2",
|
||||
16
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
|
||||
// We check correct computation of the path and indexes
|
||||
// These values refers to TEST_TREE_HEIGHT == 16
|
||||
let mut expected_path_elements = vec![
|
||||
str_to_fr(
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
|
||||
16,
|
||||
),
|
||||
];
|
||||
|
||||
let mut expected_identity_path_index: Vec<u8> =
|
||||
vec![1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
|
||||
|
||||
// We add the remaining elements for the case TEST_TREE_HEIGHT = 20
|
||||
if TEST_TREE_HEIGHT == 19 || TEST_TREE_HEIGHT == 20 {
|
||||
expected_path_elements.append(&mut vec![
|
||||
str_to_fr(
|
||||
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
|
||||
16,
|
||||
),
|
||||
str_to_fr(
|
||||
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
|
||||
16,
|
||||
),
|
||||
]);
|
||||
expected_identity_path_index.append(&mut vec![0, 0, 0, 0]);
|
||||
}
|
||||
|
||||
if TEST_TREE_HEIGHT == 20 {
|
||||
expected_path_elements.append(&mut vec![str_to_fr(
|
||||
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
|
||||
16,
|
||||
)]);
|
||||
expected_identity_path_index.append(&mut vec![0]);
|
||||
}
|
||||
|
||||
assert_eq!(path_elements, expected_path_elements);
|
||||
assert_eq!(identity_path_index, expected_identity_path_index);
|
||||
|
||||
// We check correct verification of the proof
|
||||
assert!(tree.verify(&id_commitment, &merkle_proof).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We test a RLN proof generation and verification
|
||||
fn test_witness_from_json() {
|
||||
// We generate all relevant keys
|
||||
let proving_key = zkey_from_folder(TEST_RESOURCES_FOLDER).unwrap();
|
||||
let verification_key = vk_from_folder(TEST_RESOURCES_FOLDER).unwrap();
|
||||
let builder = circom_from_folder(TEST_RESOURCES_FOLDER);
|
||||
|
||||
// We compute witness from the json input example
|
||||
let mut witness_json: &str = "";
|
||||
|
||||
if TEST_TREE_HEIGHT == 15 {
|
||||
witness_json = WITNESS_JSON_15;
|
||||
} else if TEST_TREE_HEIGHT == 19 {
|
||||
witness_json = WITNESS_JSON_19;
|
||||
} else if TEST_TREE_HEIGHT == 20 {
|
||||
witness_json = WITNESS_JSON_20;
|
||||
}
|
||||
|
||||
let rln_witness = rln_witness_from_json(witness_json);
|
||||
|
||||
// Let's generate a zkSNARK proof
|
||||
let proof = generate_proof(builder, &proving_key, &rln_witness).unwrap();
|
||||
|
||||
let proof_values = proof_values_from_witness(&rln_witness);
|
||||
|
||||
// Let's verify the proof
|
||||
let verified = verify_proof(&verification_key, &proof, &proof_values);
|
||||
|
||||
assert!(verified.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We test a RLN proof generation and verification
|
||||
fn test_end_to_end() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let leaf_index = 3;
|
||||
|
||||
// Generate identity pair
|
||||
let (identity_secret, id_commitment) = keygen();
|
||||
|
||||
//// generate merkle tree
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(tree_height, default_leaf);
|
||||
tree.set(leaf_index, id_commitment.into()).unwrap();
|
||||
|
||||
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
|
||||
|
||||
let signal = b"hey hey";
|
||||
let x = hash_to_field(signal);
|
||||
|
||||
// We set the remaining values to random ones
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
//let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
|
||||
let rln_witness: RLNWitnessInput = rln_witness_from_values(
|
||||
identity_secret,
|
||||
&merkle_proof,
|
||||
x,
|
||||
epoch, /*, rln_identifier*/
|
||||
);
|
||||
|
||||
// We generate all relevant keys
|
||||
let proving_key = zkey_from_folder(TEST_RESOURCES_FOLDER).unwrap();
|
||||
let verification_key = vk_from_folder(TEST_RESOURCES_FOLDER).unwrap();
|
||||
let builder = circom_from_folder(TEST_RESOURCES_FOLDER);
|
||||
|
||||
// Let's generate a zkSNARK proof
|
||||
let proof = generate_proof(builder, &proving_key, &rln_witness).unwrap();
|
||||
|
||||
let proof_values = proof_values_from_witness(&rln_witness);
|
||||
|
||||
// Let's verify the proof
|
||||
let success = verify_proof(&verification_key, &proof, &proof_values).unwrap();
|
||||
|
||||
assert!(success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_witness_serialization() {
|
||||
// We test witness serialization
|
||||
let mut witness_json: &str = "";
|
||||
|
||||
if TEST_TREE_HEIGHT == 15 {
|
||||
witness_json = WITNESS_JSON_15;
|
||||
} else if TEST_TREE_HEIGHT == 19 {
|
||||
witness_json = WITNESS_JSON_19;
|
||||
} else if TEST_TREE_HEIGHT == 20 {
|
||||
witness_json = WITNESS_JSON_20;
|
||||
}
|
||||
|
||||
let rln_witness = rln_witness_from_json(witness_json);
|
||||
|
||||
let ser = serialize_witness(&rln_witness);
|
||||
let (deser, _) = deserialize_witness(&ser);
|
||||
assert_eq!(rln_witness, deser);
|
||||
|
||||
// We test Proof values serialization
|
||||
let proof_values = proof_values_from_witness(&rln_witness);
|
||||
let ser = serialize_proof_values(&proof_values);
|
||||
let (deser, _) = deserialize_proof_values(&ser);
|
||||
assert_eq!(proof_values, deser);
|
||||
}
|
||||
}
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub mod ffi;
|
||||
|
||||
@@ -1,666 +0,0 @@
|
||||
// This crate provides different implementation of Merkle tree
|
||||
// Currently two interchangeable implementations are supported:
|
||||
// - FullMerkleTree: each tree node is stored
|
||||
// - OptimalMerkleTree: only nodes used to prove accumulation of set leaves are stored
|
||||
// Library defaults are set in the poseidon_tree crate
|
||||
//
|
||||
// Merkle tree implementations are adapted from https://github.com/kilic/rln/blob/master/src/merkle.rs
|
||||
// and https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
|
||||
|
||||
//!
|
||||
//! # To do
|
||||
//!
|
||||
//! * Disk based storage backend (using mmaped files should be easy)
|
||||
//! * Implement serialization for tree and Merkle proof
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::{
|
||||
cmp::max,
|
||||
fmt::Debug,
|
||||
iter::{once, repeat, successors},
|
||||
};
|
||||
|
||||
/// In the Hasher trait we define the node type, the default leaf
|
||||
/// and the hash function used to initialize a Merkle Tree implementation
|
||||
pub trait Hasher {
|
||||
/// Type of the leaf and tree node
|
||||
type Fr: Copy + Clone + Eq;
|
||||
|
||||
/// Returns the default tree leaf
|
||||
fn default_leaf() -> Self::Fr;
|
||||
|
||||
/// Utility to compute the hash of an intermediate node
|
||||
fn hash(input: &[Self::Fr]) -> Self::Fr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Optimal Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
/// The Merkle tree structure
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OptimalMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
/// The depth of the tree, i.e. the number of levels from leaf to root
|
||||
depth: usize,
|
||||
|
||||
/// The nodes cached from the empty part of the tree (where leaves are set to default).
|
||||
/// Since the rightmost part of the tree is usually changed much later than its creation,
|
||||
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
|
||||
/// and by caching few intermediate nodes to the root computed from default leaves
|
||||
cached_nodes: Vec<H::Fr>,
|
||||
|
||||
/// The tree nodes
|
||||
nodes: HashMap<(usize, usize), H::Fr>,
|
||||
|
||||
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
// (deletions leave next_index unchanged)
|
||||
next_index: usize,
|
||||
}
|
||||
|
||||
/// The Merkle proof
|
||||
/// Contains a vector of (node, branch_index) that defines the proof path elements and branch direction (1 or 0)
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
|
||||
|
||||
/// Implementations
|
||||
|
||||
impl<H: Hasher> OptimalMerkleTree<H> {
|
||||
pub fn default(depth: usize) -> Self {
|
||||
OptimalMerkleTree::<H>::new(depth, H::default_leaf())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
pub fn new(depth: usize, default_leaf: H::Fr) -> Self {
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
for i in 0..depth {
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
|
||||
}
|
||||
cached_nodes.reverse();
|
||||
OptimalMerkleTree {
|
||||
cached_nodes: cached_nodes.clone(),
|
||||
depth: depth,
|
||||
nodes: HashMap::new(),
|
||||
next_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the depth of the tree
|
||||
pub fn depth(&self) -> usize {
|
||||
self.depth
|
||||
}
|
||||
|
||||
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
pub fn capacity(&self) -> usize {
|
||||
1 << self.depth
|
||||
}
|
||||
|
||||
// Returns the total number of leaves set
|
||||
pub fn leaves_set(&mut self) -> usize {
|
||||
self.next_index
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
// Returns the root of the tree
|
||||
pub fn root(&self) -> H::Fr {
|
||||
self.get_node(0, 0)
|
||||
}
|
||||
|
||||
// Sets a leaf at the specified tree index
|
||||
pub fn set(&mut self, index: usize, leaf: H::Fr) -> io::Result<()> {
|
||||
if index >= self.capacity() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"index exceeds set size",
|
||||
));
|
||||
}
|
||||
self.nodes.insert((self.depth, index), leaf);
|
||||
self.recalculate_from(index);
|
||||
self.next_index = max(self.next_index, index + 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Sets a leaf at the next available index
|
||||
pub fn update_next(&mut self, leaf: H::Fr) -> io::Result<()> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
pub fn delete(&mut self, index: usize) -> io::Result<()> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Computes a merkle proof the the leaf at the specified index
|
||||
pub fn proof(&self, index: usize) -> io::Result<OptimalMerkleProof<H>> {
|
||||
if index >= self.capacity() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"index exceeds set size",
|
||||
));
|
||||
}
|
||||
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
|
||||
let mut i = index;
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
i ^= 1;
|
||||
witness.push((self.get_node(depth, i), (1 - (i & 1)).try_into().unwrap()));
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
if depth == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(i, 0);
|
||||
Ok(OptimalMerkleProof(witness))
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
pub fn verify(&self, leaf: &H::Fr, witness: &OptimalMerkleProof<H>) -> io::Result<bool> {
|
||||
if witness.length() != self.depth {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"witness length doesn't match tree depth",
|
||||
));
|
||||
}
|
||||
let expected_root = witness.compute_root_from(leaf);
|
||||
Ok(expected_root.eq(&self.root()))
|
||||
}
|
||||
|
||||
// Utilities for updating the tree nodes
|
||||
|
||||
fn get_node(&self, depth: usize, index: usize) -> H::Fr {
|
||||
let node = *self
|
||||
.nodes
|
||||
.get(&(depth, index))
|
||||
.unwrap_or_else(|| &self.cached_nodes[depth]);
|
||||
node
|
||||
}
|
||||
|
||||
fn get_leaf(&self, index: usize) -> H::Fr {
|
||||
self.get_node(self.depth, index)
|
||||
}
|
||||
|
||||
fn hash_couple(&mut self, depth: usize, index: usize) -> H::Fr {
|
||||
let b = index & !1;
|
||||
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
|
||||
}
|
||||
|
||||
fn recalculate_from(&mut self, index: usize) {
|
||||
let mut i = index;
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
let h = self.hash_couple(depth, i);
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
self.nodes.insert((depth, i), h);
|
||||
if depth == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(depth, 0);
|
||||
assert_eq!(i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> OptimalMerkleProof<H> {
|
||||
#[must_use]
|
||||
// Returns the length of a Merkle proof
|
||||
pub fn length(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Computes the leaf index corresponding to a Merkle proof
|
||||
#[must_use]
|
||||
pub fn leaf_index(&self) -> usize {
|
||||
// In current implementation the path indexes in a proof correspond to the binary representation of the leaf index
|
||||
let mut binary_repr = self.get_path_index();
|
||||
binary_repr.reverse();
|
||||
binary_repr
|
||||
.into_iter()
|
||||
.fold(0, |acc, digit| (acc << 1) + usize::from(digit))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Returns the path elements forming a Merkle proof
|
||||
pub fn get_path_elements(&self) -> Vec<H::Fr> {
|
||||
self.0.iter().map(|x| x.0).collect()
|
||||
}
|
||||
|
||||
/// Returns the path indexes forming a Merkle proof
|
||||
#[must_use]
|
||||
pub fn get_path_index(&self) -> Vec<u8> {
|
||||
self.0.iter().map(|x| x.1).collect()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
|
||||
pub fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
|
||||
let mut acc: H::Fr = *leaf;
|
||||
for w in self.0.iter() {
|
||||
if w.1 == 0 {
|
||||
acc = H::hash(&[acc, w.0]);
|
||||
} else {
|
||||
acc = H::hash(&[w.0, acc]);
|
||||
}
|
||||
}
|
||||
acc
|
||||
}
|
||||
}
|
||||
|
||||
// Debug formatting for printing a (Optimal) Merkle Proof
|
||||
impl<H> Debug for OptimalMerkleProof<H>
|
||||
where
|
||||
H: Hasher,
|
||||
H::Fr: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("Proof").field(&self.0).finish()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Full Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
/// Merkle tree with all leaf and intermediate hashes stored
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct FullMerkleTree<H: Hasher> {
|
||||
/// The depth of the tree, i.e. the number of levels from leaf to root
|
||||
depth: usize,
|
||||
|
||||
/// The nodes cached from the empty part of the tree (where leaves are set to default).
|
||||
/// Since the rightmost part of the tree is usually changed much later than its creation,
|
||||
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
|
||||
/// and by caching few intermediate nodes to the root computed from default leaves
|
||||
cached_nodes: Vec<H::Fr>,
|
||||
|
||||
/// The tree nodes
|
||||
nodes: Vec<H::Fr>,
|
||||
|
||||
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
// (deletions leave next_index unchanged)
|
||||
next_index: usize,
|
||||
}
|
||||
|
||||
/// Element of a Merkle proof
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub enum FullMerkleBranch<H: Hasher> {
|
||||
/// Left branch taken, value is the right sibling hash.
|
||||
Left(H::Fr),
|
||||
|
||||
/// Right branch taken, value is the left sibling hash.
|
||||
Right(H::Fr),
|
||||
}
|
||||
|
||||
/// Merkle proof path, bottom to top.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
|
||||
|
||||
/// Implementations
|
||||
|
||||
impl<H: Hasher> FullMerkleTree<H> {
|
||||
pub fn default(depth: usize) -> Self {
|
||||
FullMerkleTree::<H>::new(depth, H::default_leaf())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
pub fn new(depth: usize, initial_leaf: H::Fr) -> Self {
|
||||
// Compute cache node values, leaf to root
|
||||
let cached_nodes = successors(Some(initial_leaf), |prev| Some(H::hash(&[*prev, *prev])))
|
||||
.take(depth + 1)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Compute node values
|
||||
let nodes = cached_nodes
|
||||
.iter()
|
||||
.rev()
|
||||
.enumerate()
|
||||
.flat_map(|(levels, hash)| repeat(hash).take(1 << levels))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
|
||||
|
||||
let next_index = 0;
|
||||
|
||||
Self {
|
||||
depth,
|
||||
cached_nodes,
|
||||
nodes,
|
||||
next_index,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the depth of the tree
|
||||
pub fn depth(&self) -> usize {
|
||||
self.depth
|
||||
}
|
||||
|
||||
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
pub fn capacity(&self) -> usize {
|
||||
1 << self.depth
|
||||
}
|
||||
|
||||
// Returns the total number of leaves set
|
||||
pub fn leaves_set(&mut self) -> usize {
|
||||
self.next_index
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
// Returns the root of the tree
|
||||
pub fn root(&self) -> H::Fr {
|
||||
self.nodes[0]
|
||||
}
|
||||
|
||||
// Sets a leaf at the specified tree index
|
||||
pub fn set(&mut self, leaf: usize, hash: H::Fr) -> io::Result<()> {
|
||||
self.set_range(leaf, once(hash))?;
|
||||
self.next_index = max(self.next_index, leaf + 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Sets tree nodes, starting from start index
|
||||
// Function proper of FullMerkleTree implementation
|
||||
fn set_range<I: IntoIterator<Item = H::Fr>>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
hashes: I,
|
||||
) -> io::Result<()> {
|
||||
let index = self.capacity() + start - 1;
|
||||
let mut count = 0;
|
||||
// TODO: Error/panic when hashes is longer than available leafs
|
||||
for (leaf, hash) in self.nodes[index..].iter_mut().zip(hashes) {
|
||||
*leaf = hash;
|
||||
count += 1;
|
||||
}
|
||||
if count != 0 {
|
||||
self.update_nodes(index, index + (count - 1));
|
||||
self.next_index = max(self.next_index, start + count);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Sets a leaf at the next available index
|
||||
pub fn update_next(&mut self, leaf: H::Fr) -> io::Result<()> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
pub fn delete(&mut self, index: usize) -> io::Result<()> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Computes a merkle proof the the leaf at the specified index
|
||||
pub fn proof(&self, leaf: usize) -> io::Result<FullMerkleProof<H>> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"index exceeds set size",
|
||||
));
|
||||
}
|
||||
let mut index = self.capacity() + leaf - 1;
|
||||
let mut path = Vec::with_capacity(self.depth + 1);
|
||||
while let Some(parent) = self.parent(index) {
|
||||
// Add proof for node at index to parent
|
||||
path.push(match index & 1 {
|
||||
1 => FullMerkleBranch::Left(self.nodes[index + 1]),
|
||||
0 => FullMerkleBranch::Right(self.nodes[index - 1]),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
index = parent;
|
||||
}
|
||||
Ok(FullMerkleProof(path))
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
pub fn verify(&self, hash: &H::Fr, proof: &FullMerkleProof<H>) -> io::Result<bool> {
|
||||
Ok(proof.compute_root_from(hash) == self.root())
|
||||
}
|
||||
|
||||
// Utilities for updating the tree nodes
|
||||
|
||||
/// For a given node index, return the parent node index
|
||||
/// Returns None if there is no parent (root node)
|
||||
fn parent(&self, index: usize) -> Option<usize> {
|
||||
if index == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(((index + 1) >> 1) - 1)
|
||||
}
|
||||
}
|
||||
|
||||
/// For a given node index, return index of the first (left) child.
|
||||
fn first_child(&self, index: usize) -> usize {
|
||||
(index << 1) + 1
|
||||
}
|
||||
|
||||
fn levels(&self, index: usize) -> usize {
|
||||
// `n.next_power_of_two()` will return `n` iff `n` is a power of two.
|
||||
// The extra offset corrects this.
|
||||
(index + 2).next_power_of_two().trailing_zeros() as usize - 1
|
||||
}
|
||||
|
||||
fn update_nodes(&mut self, start: usize, end: usize) {
|
||||
debug_assert_eq!(self.levels(start), self.levels(end));
|
||||
if let (Some(start), Some(end)) = (self.parent(start), self.parent(end)) {
|
||||
for parent in start..=end {
|
||||
let child = self.first_child(parent);
|
||||
self.nodes[parent] = H::hash(&[self.nodes[child], self.nodes[child + 1]]);
|
||||
}
|
||||
self.update_nodes(start, end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> FullMerkleProof<H> {
|
||||
#[must_use]
|
||||
// Returns the length of a Merkle proof
|
||||
pub fn length(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Computes the leaf index corresponding to a Merkle proof
|
||||
#[must_use]
|
||||
pub fn leaf_index(&self) -> usize {
|
||||
self.0.iter().rev().fold(0, |index, branch| match branch {
|
||||
FullMerkleBranch::Left(_) => index << 1,
|
||||
FullMerkleBranch::Right(_) => (index << 1) + 1,
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Returns the path elements forming a Merkle proof
|
||||
pub fn get_path_elements(&self) -> Vec<H::Fr> {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|x| match x {
|
||||
FullMerkleBranch::Left(value) | FullMerkleBranch::Right(value) => *value,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the path indexes forming a Merkle proof
|
||||
#[must_use]
|
||||
pub fn get_path_index(&self) -> Vec<u8> {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|branch| match branch {
|
||||
FullMerkleBranch::Left(_) => 0,
|
||||
FullMerkleBranch::Right(_) => 1,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
|
||||
#[must_use]
|
||||
pub fn compute_root_from(&self, hash: &H::Fr) -> H::Fr {
|
||||
self.0.iter().fold(*hash, |hash, branch| match branch {
|
||||
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),
|
||||
FullMerkleBranch::Right(sibling) => H::hash(&[*sibling, hash]),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Debug formatting for printing a (Full) Merkle Proof Branch
|
||||
impl<H> Debug for FullMerkleBranch<H>
|
||||
where
|
||||
H: Hasher,
|
||||
H::Fr: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Left(arg0) => f.debug_tuple("Left").field(arg0).finish(),
|
||||
Self::Right(arg0) => f.debug_tuple("Right").field(arg0).finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Debug formatting for printing a (Full) Merkle Proof
|
||||
impl<H> Debug for FullMerkleProof<H>
|
||||
where
|
||||
H: Hasher,
|
||||
H::Fr: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("Proof").field(&self.0).finish()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Tests
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
// Tests adapted from https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use hex_literal::hex;
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
|
||||
struct Keccak256;
|
||||
|
||||
impl Hasher for Keccak256 {
|
||||
type Fr = [u8; 32];
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
[0; 32]
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
let mut output = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
for element in inputs {
|
||||
hasher.update(element);
|
||||
}
|
||||
hasher.finalize(&mut output);
|
||||
output
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_root() {
|
||||
let leaves = [
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
];
|
||||
|
||||
let default_tree_root =
|
||||
hex!("b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30");
|
||||
|
||||
let roots = [
|
||||
hex!("c1ba1812ff680ce84c1d5b4f1087eeb08147a4d510f3496b2849df3a73f5af95"),
|
||||
hex!("893760ec5b5bee236f29e85aef64f17139c3c1b7ff24ce64eb6315fca0f2485b"),
|
||||
hex!("222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c"),
|
||||
hex!("a9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36"),
|
||||
];
|
||||
|
||||
let mut tree = FullMerkleTree::<Keccak256>::new(2, [0; 32]);
|
||||
assert_eq!(tree.root(), default_tree_root);
|
||||
for i in 0..leaves.len() {
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree.root(), roots[i]);
|
||||
}
|
||||
|
||||
let mut tree = OptimalMerkleTree::<Keccak256>::new(2, [0; 32]);
|
||||
assert_eq!(tree.root(), default_tree_root);
|
||||
for i in 0..leaves.len() {
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree.root(), roots[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof() {
|
||||
let leaves = [
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
];
|
||||
|
||||
// We thest the FullMerkleTree implementation
|
||||
let mut tree = FullMerkleTree::<Keccak256>::new(2, [0; 32]);
|
||||
for i in 0..leaves.len() {
|
||||
// We set the leaves
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree.proof(i).expect("index should be set");
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree.verify(&leaves[i], &proof).unwrap());
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree
|
||||
.verify(&leaves[(i + 1) % leaves.len()], &proof)
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
// We test the OptimalMerkleTree implementation
|
||||
let mut tree = OptimalMerkleTree::<Keccak256>::new(2, [0; 32]);
|
||||
for i in 0..leaves.len() {
|
||||
// We set the leaves
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree.proof(i).expect("index should be set");
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree.verify(&leaves[i], &proof).unwrap());
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree
|
||||
.verify(&leaves[(i + 1) % leaves.len()], &proof)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
384
rln/src/pm_tree_adapter.rs
Normal file
384
rln/src/pm_tree_adapter.rs
Normal file
@@ -0,0 +1,384 @@
|
||||
use std::fmt::Debug;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
|
||||
use color_eyre::{Report, Result};
|
||||
use serde_json::Value;
|
||||
|
||||
use utils::pmtree::tree::Key;
|
||||
use utils::pmtree::{Database, Hasher};
|
||||
use utils::*;
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::hashers::{poseidon_hash, PoseidonHash};
|
||||
use crate::utils::{bytes_le_to_fr, fr_to_bytes_le};
|
||||
|
||||
const METADATA_KEY: [u8; 8] = *b"metadata";
|
||||
|
||||
pub struct PmTree {
|
||||
tree: pmtree::MerkleTree<SledDB, PoseidonHash>,
|
||||
/// The indices of leaves which are set into zero upto next_index.
|
||||
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
|
||||
cached_leaves_indices: Vec<u8>,
|
||||
// metadata that an application may use to store additional information
|
||||
metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
pub struct PmTreeProof {
|
||||
proof: pmtree::tree::MerkleProof<PoseidonHash>,
|
||||
}
|
||||
|
||||
pub type FrOf<H> = <H as Hasher>::Fr;
|
||||
|
||||
// The pmtree Hasher trait used by pmtree Merkle tree
|
||||
impl Hasher for PoseidonHash {
|
||||
type Fr = Fr;
|
||||
|
||||
fn serialize(value: Self::Fr) -> pmtree::Value {
|
||||
fr_to_bytes_le(&value)
|
||||
}
|
||||
|
||||
fn deserialize(value: pmtree::Value) -> Self::Fr {
|
||||
let (fr, _) = bytes_le_to_fr(&value);
|
||||
fr
|
||||
}
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
Fr::from(0)
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
poseidon_hash(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tmp_path() -> PathBuf {
|
||||
std::env::temp_dir().join(format!("pmtree-{}", rand::random::<u64>()))
|
||||
}
|
||||
|
||||
fn get_tmp() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub struct PmtreeConfig(Config);
|
||||
|
||||
impl FromStr for PmtreeConfig {
|
||||
type Err = Report;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
let config: Value = serde_json::from_str(s)?;
|
||||
|
||||
let path = config["path"].as_str();
|
||||
let path = path.map(PathBuf::from);
|
||||
let temporary = config["temporary"].as_bool();
|
||||
let cache_capacity = config["cache_capacity"].as_u64();
|
||||
let flush_every_ms = config["flush_every_ms"].as_u64();
|
||||
let mode = match config["mode"].as_str() {
|
||||
Some("HighThroughput") => Mode::HighThroughput,
|
||||
Some("LowSpace") => Mode::LowSpace,
|
||||
_ => Mode::HighThroughput,
|
||||
};
|
||||
let use_compression = config["use_compression"].as_bool();
|
||||
|
||||
if temporary.is_some()
|
||||
&& path.is_some()
|
||||
&& temporary.unwrap()
|
||||
&& path.as_ref().unwrap().exists()
|
||||
{
|
||||
return Err(Report::msg(format!(
|
||||
"Path {:?} already exists, cannot use temporary",
|
||||
path.unwrap()
|
||||
)));
|
||||
}
|
||||
|
||||
let config = Config::new()
|
||||
.temporary(temporary.unwrap_or(get_tmp()))
|
||||
.path(path.unwrap_or(get_tmp_path()))
|
||||
.cache_capacity(cache_capacity.unwrap_or(1024 * 1024 * 1024))
|
||||
.flush_every_ms(flush_every_ms)
|
||||
.mode(mode)
|
||||
.use_compression(use_compression.unwrap_or(false));
|
||||
Ok(PmtreeConfig(config))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PmtreeConfig {
|
||||
fn default() -> Self {
|
||||
let tmp_path = get_tmp_path();
|
||||
PmtreeConfig(
|
||||
Config::new()
|
||||
.temporary(true)
|
||||
.path(tmp_path)
|
||||
.cache_capacity(150_000)
|
||||
.mode(Mode::HighThroughput)
|
||||
.use_compression(false)
|
||||
.flush_every_ms(Some(12_000)),
|
||||
)
|
||||
}
|
||||
}
|
||||
impl Debug for PmtreeConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for PmtreeConfig {
|
||||
fn clone(&self) -> Self {
|
||||
PmtreeConfig(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl ZerokitMerkleTree for PmTree {
|
||||
type Proof = PmTreeProof;
|
||||
type Hasher = PoseidonHash;
|
||||
type Config = PmtreeConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
let default_config = PmtreeConfig::default();
|
||||
PmTree::new(depth, Self::Hasher::default_leaf(), default_config)
|
||||
}
|
||||
|
||||
fn new(depth: usize, _default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self> {
|
||||
let tree_loaded = pmtree::MerkleTree::load(config.clone().0);
|
||||
let tree = match tree_loaded {
|
||||
Ok(tree) => tree,
|
||||
Err(_) => pmtree::MerkleTree::new(depth, config.0)?,
|
||||
};
|
||||
|
||||
Ok(PmTree {
|
||||
tree,
|
||||
cached_leaves_indices: vec![0; 1 << depth],
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn depth(&self) -> usize {
|
||||
self.tree.depth()
|
||||
}
|
||||
|
||||
fn capacity(&self) -> usize {
|
||||
self.tree.capacity()
|
||||
}
|
||||
|
||||
fn leaves_set(&self) -> usize {
|
||||
self.tree.leaves_set()
|
||||
}
|
||||
|
||||
fn root(&self) -> FrOf<Self::Hasher> {
|
||||
self.tree.root()
|
||||
}
|
||||
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
|
||||
Ok(self.tree.root())
|
||||
}
|
||||
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
self.tree
|
||||
.set(index, leaf)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_range<I: IntoIterator<Item = FrOf<Self::Hasher>>>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
values: I,
|
||||
) -> Result<()> {
|
||||
let v = values.into_iter().collect::<Vec<_>>();
|
||||
self.tree
|
||||
.set_range(start, v.clone().into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
for i in start..v.len() {
|
||||
self.cached_leaves_indices[i] = 1
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
self.tree.get(index).map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
} else if n == self.depth() {
|
||||
self.get(index)
|
||||
} else {
|
||||
let node = self
|
||||
.tree
|
||||
.get_elem(Key::new(n, index >> (self.depth() - n)))
|
||||
.unwrap();
|
||||
Ok(node)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize> {
|
||||
let next_idx = self.leaves_set();
|
||||
self.cached_leaves_indices
|
||||
.iter()
|
||||
.take(next_idx)
|
||||
.enumerate()
|
||||
.filter(|&(_, &v)| v == 0u8)
|
||||
.map(|(idx, _)| idx)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn override_range<I: IntoIterator<Item = FrOf<Self::Hasher>>, J: IntoIterator<Item = usize>>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<()> {
|
||||
let leaves = leaves.into_iter().collect::<Vec<_>>();
|
||||
let mut indices = indices.into_iter().collect::<Vec<_>>();
|
||||
indices.sort();
|
||||
|
||||
match (leaves.len(), indices.len()) {
|
||||
(0, 0) => Err(Report::msg("no leaves or indices to be removed")),
|
||||
(1, 0) => self.set(start, leaves[0]),
|
||||
(0, 1) => self.delete(indices[0]),
|
||||
(_, 0) => self.set_range(start, leaves),
|
||||
(0, _) => self.remove_indices(&indices),
|
||||
(_, _) => self.remove_indices_and_set_leaves(start, leaves, &indices),
|
||||
}
|
||||
}
|
||||
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
self.tree
|
||||
.update_next(leaf)
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
self.tree
|
||||
.delete(index)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.cached_leaves_indices[index] = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof> {
|
||||
let proof = self.tree.proof(index)?;
|
||||
Ok(PmTreeProof { proof })
|
||||
}
|
||||
|
||||
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool> {
|
||||
if self.tree.verify(leaf, &witness.proof) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(Report::msg("verify failed"))
|
||||
}
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
self.tree.db.put(METADATA_KEY, metadata.to_vec())?;
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
if !self.metadata.is_empty() {
|
||||
return Ok(self.metadata.clone());
|
||||
}
|
||||
// if empty, try searching the db
|
||||
let data = self.tree.db.get(METADATA_KEY)?;
|
||||
|
||||
if data.is_none() {
|
||||
// send empty Metadata
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
Ok(data.unwrap())
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
self.tree.db.close().map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
type PmTreeHasher = <PmTree as ZerokitMerkleTree>::Hasher;
|
||||
type FrOfPmTreeHasher = FrOf<PmTreeHasher>;
|
||||
|
||||
impl PmTree {
|
||||
fn remove_indices(&mut self, indices: &[usize]) -> Result<()> {
|
||||
let start = indices[0];
|
||||
let end = indices.last().unwrap() + 1;
|
||||
|
||||
let new_leaves = (start..end).map(|_| PmTreeHasher::default_leaf());
|
||||
|
||||
self.tree
|
||||
.set_range(start, new_leaves)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
|
||||
for i in start..end {
|
||||
self.cached_leaves_indices[i] = 0
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_indices_and_set_leaves(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: Vec<FrOfPmTreeHasher>,
|
||||
indices: &[usize],
|
||||
) -> Result<()> {
|
||||
let min_index = *indices.first().unwrap();
|
||||
let max_index = start + leaves.len();
|
||||
|
||||
let mut set_values = vec![PmTreeHasher::default_leaf(); max_index - min_index];
|
||||
|
||||
for i in min_index..start {
|
||||
if !indices.contains(&i) {
|
||||
let value = self.tree.get(i)?;
|
||||
set_values[i - min_index] = value;
|
||||
}
|
||||
}
|
||||
|
||||
for (i, &leaf) in leaves.iter().enumerate() {
|
||||
set_values[start - min_index + i] = leaf;
|
||||
}
|
||||
|
||||
self.tree
|
||||
.set_range(start, set_values)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
|
||||
for i in indices {
|
||||
self.cached_leaves_indices[*i] = 0;
|
||||
}
|
||||
|
||||
for i in start..(max_index - min_index) {
|
||||
self.cached_leaves_indices[i] = 1
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ZerokitMerkleProof for PmTreeProof {
|
||||
type Index = u8;
|
||||
type Hasher = PoseidonHash;
|
||||
|
||||
fn length(&self) -> usize {
|
||||
self.proof.length()
|
||||
}
|
||||
|
||||
fn leaf_index(&self) -> usize {
|
||||
self.proof.leaf_index()
|
||||
}
|
||||
|
||||
fn get_path_elements(&self) -> Vec<FrOf<Self::Hasher>> {
|
||||
self.proof.get_path_elements()
|
||||
}
|
||||
|
||||
fn get_path_index(&self) -> Vec<Self::Index> {
|
||||
self.proof.get_path_index()
|
||||
}
|
||||
fn compute_root_from(&self, leaf: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher> {
|
||||
self.proof.compute_root_from(leaf)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,129 +0,0 @@
|
||||
// This crate implements the Poseidon hash algorithm https://eprint.iacr.org/2019/458.pdf
|
||||
|
||||
// The implementation is taken from https://github.com/arnaucube/poseidon-rs/blob/233027d6075a637c29ad84a8a44f5653b81f0410/src/lib.rs
|
||||
// and slightly adapted to work over arkworks field data type
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::poseidon_constants::constants;
|
||||
use crate::utils::*;
|
||||
use ark_std::Zero;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Constants {
|
||||
pub c: Vec<Vec<Fr>>,
|
||||
pub m: Vec<Vec<Vec<Fr>>>,
|
||||
pub n_rounds_f: usize,
|
||||
pub n_rounds_p: Vec<usize>,
|
||||
}
|
||||
pub fn load_constants() -> Constants {
|
||||
let (c_str, m_str) = constants();
|
||||
let mut c: Vec<Vec<Fr>> = Vec::new();
|
||||
for i in 0..c_str.len() {
|
||||
let mut cci: Vec<Fr> = Vec::new();
|
||||
for j in 0..c_str[i].len() {
|
||||
let b: Fr = str_to_fr(c_str[i][j], 10);
|
||||
cci.push(b);
|
||||
}
|
||||
c.push(cci);
|
||||
}
|
||||
let mut m: Vec<Vec<Vec<Fr>>> = Vec::new();
|
||||
for i in 0..m_str.len() {
|
||||
let mut mi: Vec<Vec<Fr>> = Vec::new();
|
||||
for j in 0..m_str[i].len() {
|
||||
let mut mij: Vec<Fr> = Vec::new();
|
||||
for k in 0..m_str[i][j].len() {
|
||||
let b: Fr = str_to_fr(m_str[i][j][k], 10);
|
||||
mij.push(b);
|
||||
}
|
||||
mi.push(mij);
|
||||
}
|
||||
m.push(mi);
|
||||
}
|
||||
Constants {
|
||||
c: c,
|
||||
m: m,
|
||||
n_rounds_f: 8,
|
||||
n_rounds_p: vec![56, 57, 56, 60, 60, 63, 64, 63],
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Poseidon {
|
||||
constants: Constants,
|
||||
}
|
||||
impl Poseidon {
|
||||
pub fn new() -> Poseidon {
|
||||
Poseidon {
|
||||
constants: load_constants(),
|
||||
}
|
||||
}
|
||||
pub fn ark(&self, state: &mut [Fr], c: &[Fr], it: usize) {
|
||||
for i in 0..state.len() {
|
||||
state[i] += c[it + i];
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sbox(&self, n_rounds_f: usize, n_rounds_p: usize, state: &mut [Fr], i: usize) {
|
||||
if (i < n_rounds_f / 2) || (i >= n_rounds_f / 2 + n_rounds_p) {
|
||||
for j in 0..state.len() {
|
||||
let aux = state[j];
|
||||
state[j] *= state[j];
|
||||
state[j] *= state[j];
|
||||
state[j] *= aux;
|
||||
}
|
||||
} else {
|
||||
let aux = state[0];
|
||||
state[0] *= state[0];
|
||||
state[0] *= state[0];
|
||||
state[0] *= aux;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mix(&self, state: &[Fr], m: &[Vec<Fr>]) -> Vec<Fr> {
|
||||
let mut new_state: Vec<Fr> = Vec::new();
|
||||
for i in 0..state.len() {
|
||||
new_state.push(Fr::zero());
|
||||
for j in 0..state.len() {
|
||||
let mut mij = m[i][j];
|
||||
mij *= state[j];
|
||||
new_state[i] += mij;
|
||||
}
|
||||
}
|
||||
new_state.clone()
|
||||
}
|
||||
|
||||
pub fn hash(&self, inp: Vec<Fr>) -> Result<Fr, String> {
|
||||
let t = inp.len() + 1;
|
||||
if inp.is_empty() || (inp.len() >= self.constants.n_rounds_p.len() - 1) {
|
||||
return Err("Wrong inputs length".to_string());
|
||||
}
|
||||
let n_rounds_f = self.constants.n_rounds_f;
|
||||
let n_rounds_p = self.constants.n_rounds_p[t - 2];
|
||||
|
||||
let mut state = vec![Fr::zero(); t];
|
||||
state[1..].clone_from_slice(&inp);
|
||||
|
||||
for i in 0..(n_rounds_f + n_rounds_p) {
|
||||
self.ark(&mut state, &self.constants.c[t - 2], i * t);
|
||||
self.sbox(n_rounds_f, n_rounds_p, &mut state, i);
|
||||
state = self.mix(&state, &self.constants.m[t - 2]);
|
||||
}
|
||||
|
||||
Ok(state[0])
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Poseidon {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// Poseidon Hash wrapper over above implementation. Adapted from semaphore-rs poseidon hash wrapper.
|
||||
static POSEIDON: Lazy<Poseidon> = Lazy::new(Poseidon::new);
|
||||
|
||||
pub fn poseidon_hash(input: &[Fr]) -> Fr {
|
||||
POSEIDON
|
||||
.hash(input.to_vec())
|
||||
.expect("hash with fixed input size can't fail")
|
||||
}
|
||||
@@ -2,11 +2,17 @@
|
||||
|
||||
// Implementation inspired by https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/poseidon_tree.rs (no differences)
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::merkle_tree::*;
|
||||
use crate::poseidon_hash::poseidon_hash;
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "pmtree-ft")] {
|
||||
use crate::pm_tree_adapter::*;
|
||||
} else {
|
||||
use crate::hashers::{PoseidonHash};
|
||||
use utils::merkle_tree::*;
|
||||
}
|
||||
}
|
||||
|
||||
// The zerokit RLN default Merkle tree implementation is the OptimalMerkleTree.
|
||||
// To switch to FullMerkleTree implementation, it is enough to enable the fullmerkletree feature
|
||||
|
||||
@@ -14,96 +20,11 @@ cfg_if! {
|
||||
if #[cfg(feature = "fullmerkletree")] {
|
||||
pub type PoseidonTree = FullMerkleTree<PoseidonHash>;
|
||||
pub type MerkleProof = FullMerkleProof<PoseidonHash>;
|
||||
} else if #[cfg(feature = "pmtree-ft")] {
|
||||
pub type PoseidonTree = PmTree;
|
||||
pub type MerkleProof = PmTreeProof;
|
||||
} else {
|
||||
pub type PoseidonTree = OptimalMerkleTree<PoseidonHash>;
|
||||
pub type MerkleProof = OptimalMerkleProof<PoseidonHash>;
|
||||
}
|
||||
}
|
||||
|
||||
// The zerokit RLN default Hasher
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PoseidonHash;
|
||||
|
||||
impl Hasher for PoseidonHash {
|
||||
type Fr = Fr;
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
Self::Fr::from(0)
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
poseidon_hash(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Tests
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
/// A basic performance comparison between the two supported Merkle Tree implementations
|
||||
fn test_merkle_implementations_performances() {
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
let tree_height = 20;
|
||||
let sample_size = 100;
|
||||
|
||||
let leaves: Vec<Fr> = (0..sample_size).map(|s| Fr::from(s)).collect();
|
||||
|
||||
let mut gen_time_full: u128 = 0;
|
||||
let mut upd_time_full: u128 = 0;
|
||||
let mut gen_time_opt: u128 = 0;
|
||||
let mut upd_time_opt: u128 = 0;
|
||||
|
||||
for _ in 0..sample_size.try_into().unwrap() {
|
||||
let now = Instant::now();
|
||||
FullMerkleTree::<PoseidonHash>::default(tree_height);
|
||||
gen_time_full += now.elapsed().as_nanos();
|
||||
|
||||
let now = Instant::now();
|
||||
OptimalMerkleTree::<PoseidonHash>::default(tree_height);
|
||||
gen_time_opt += now.elapsed().as_nanos();
|
||||
}
|
||||
|
||||
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(tree_height);
|
||||
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(tree_height);
|
||||
for i in 0..sample_size.try_into().unwrap() {
|
||||
let now = Instant::now();
|
||||
tree_full.set(i, leaves[i]).unwrap();
|
||||
upd_time_full += now.elapsed().as_nanos();
|
||||
let proof = tree_full.proof(i).expect("index should be set");
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
let now = Instant::now();
|
||||
tree_opt.set(i, leaves[i]).unwrap();
|
||||
upd_time_opt += now.elapsed().as_nanos();
|
||||
let proof = tree_opt.proof(i).expect("index should be set");
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
}
|
||||
|
||||
println!("Average tree generation time:");
|
||||
println!(
|
||||
" - Full Merkle Tree: {:?}",
|
||||
Duration::from_nanos((gen_time_full / sample_size).try_into().unwrap())
|
||||
);
|
||||
println!(
|
||||
" - Optimal Merkle Tree: {:?}",
|
||||
Duration::from_nanos((gen_time_opt / sample_size).try_into().unwrap())
|
||||
);
|
||||
|
||||
println!("Average update_next execution time:");
|
||||
println!(
|
||||
" - Full Merkle Tree: {:?}",
|
||||
Duration::from_nanos((upd_time_full / sample_size).try_into().unwrap())
|
||||
);
|
||||
|
||||
println!(
|
||||
" - Optimal Merkle Tree: {:?}",
|
||||
Duration::from_nanos((upd_time_opt / sample_size).try_into().unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
// This crate collects all the underlying primitives used to implement RLN
|
||||
|
||||
use ark_circom::{CircomReduction, WitnessCalculator};
|
||||
use ark_groth16::{
|
||||
create_proof_with_reduction_and_matrices, prepare_verifying_key,
|
||||
verify_proof as ark_verify_proof, Proof as ArkProof, ProvingKey, VerifyingKey,
|
||||
};
|
||||
use ark_groth16::{prepare_verifying_key, Groth16, Proof as ArkProof, ProvingKey, VerifyingKey};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use ark_relations::r1cs::SynthesisError;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use color_eyre::Result;
|
||||
use color_eyre::{Report, Result};
|
||||
use num_bigint::BigInt;
|
||||
use rand::Rng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use std::sync::Mutex;
|
||||
#[cfg(debug_assertions)]
|
||||
use std::time::Instant;
|
||||
@@ -18,23 +19,33 @@ use thiserror::Error;
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
|
||||
use crate::circuit::{Curve, Fr};
|
||||
use crate::poseidon_hash::poseidon_hash;
|
||||
use crate::hashers::hash_to_field;
|
||||
use crate::hashers::poseidon_hash;
|
||||
use crate::poseidon_tree::*;
|
||||
use crate::public::RLN_IDENTIFIER;
|
||||
use crate::utils::*;
|
||||
use cfg_if::cfg_if;
|
||||
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// RLN Witness data structure and utility functions
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RLNWitnessInput {
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
identity_secret: Fr,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
user_message_limit: Fr,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
message_id: Fr,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
path_elements: Vec<Fr>,
|
||||
identity_path_index: Vec<u8>,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
x: Fr,
|
||||
epoch: Fr,
|
||||
rln_identifier: Fr,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
external_nullifier: Fr,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -45,212 +56,199 @@ pub struct RLNProofValues {
|
||||
pub root: Fr,
|
||||
// Public Inputs:
|
||||
pub x: Fr,
|
||||
pub epoch: Fr,
|
||||
pub rln_identifier: Fr,
|
||||
pub external_nullifier: Fr,
|
||||
}
|
||||
|
||||
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Vec<u8> {
|
||||
pub fn serialize_field_element(element: Fr) -> Vec<u8> {
|
||||
fr_to_bytes_le(&element)
|
||||
}
|
||||
|
||||
pub fn deserialize_field_element(serialized: Vec<u8>) -> Fr {
|
||||
let (element, _) = bytes_le_to_fr(&serialized);
|
||||
|
||||
element
|
||||
}
|
||||
|
||||
pub fn deserialize_identity_pair(serialized: Vec<u8>) -> (Fr, Fr) {
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&serialized[read..]);
|
||||
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
|
||||
let mut all_read = 0;
|
||||
|
||||
let (identity_trapdoor, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_commitment, _) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
|
||||
(
|
||||
identity_trapdoor,
|
||||
identity_nullifier,
|
||||
identity_secret_hash,
|
||||
identity_commitment,
|
||||
)
|
||||
}
|
||||
|
||||
/// Serializes witness
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.identity_secret));
|
||||
serialized.append(&mut vec_fr_to_bytes_le(&rln_witness.path_elements));
|
||||
serialized.append(&mut vec_u8_to_bytes_le(&rln_witness.identity_path_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.message_id));
|
||||
serialized.append(&mut vec_fr_to_bytes_le(&rln_witness.path_elements)?);
|
||||
serialized.append(&mut vec_u8_to_bytes_le(&rln_witness.identity_path_index)?);
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.x));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.epoch));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.rln_identifier));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_witness.external_nullifier));
|
||||
|
||||
serialized
|
||||
Ok(serialized)
|
||||
}
|
||||
|
||||
pub fn deserialize_witness(serialized: &[u8]) -> (RLNWitnessInput, usize) {
|
||||
/// Deserializes witness
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)> {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (path_elements, read) = bytes_le_to_vec_fr(&serialized[all_read..].to_vec());
|
||||
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_path_index, read) = bytes_le_to_vec_u8(&serialized[all_read..].to_vec());
|
||||
let (message_id, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (x, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
message_id_range_check(&message_id, &user_message_limit)?;
|
||||
|
||||
let (path_elements, read) = bytes_le_to_vec_fr(&serialized[all_read..])?;
|
||||
all_read += read;
|
||||
|
||||
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
let (identity_path_index, read) = bytes_le_to_vec_u8(&serialized[all_read..])?;
|
||||
all_read += read;
|
||||
|
||||
let (rln_identifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
let (x, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
// TODO: check rln_identifier against public::RLN_IDENTIFIER
|
||||
assert_eq!(serialized.len(), all_read);
|
||||
let (external_nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
(
|
||||
if serialized.len() != all_read {
|
||||
return Err(Report::msg("serialized length is not equal to all_read"));
|
||||
}
|
||||
|
||||
Ok((
|
||||
RLNWitnessInput {
|
||||
identity_secret,
|
||||
path_elements,
|
||||
identity_path_index,
|
||||
x,
|
||||
epoch,
|
||||
rln_identifier,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
},
|
||||
all_read,
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
// This function deserializes input for kilic's rln generate_proof public API
|
||||
// https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L148
|
||||
// input_data is [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// return value is a rln witness populated according to this information
|
||||
pub fn proof_inputs_to_rln_witness(
|
||||
tree: &mut PoseidonTree,
|
||||
serialized: &[u8],
|
||||
) -> (RLNWitnessInput, usize) {
|
||||
) -> Result<(RLNWitnessInput, usize)> {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let id_index = u64::from_le_bytes(serialized[all_read..all_read + 8].try_into().unwrap());
|
||||
let id_index = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
all_read += 8;
|
||||
|
||||
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let signal_len = u64::from_le_bytes(serialized[all_read..all_read + 8].try_into().unwrap());
|
||||
let (message_id, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (external_nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let signal_len = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
all_read += 8;
|
||||
|
||||
let signal: Vec<u8> =
|
||||
serialized[all_read..all_read + usize::try_from(signal_len).unwrap()].to_vec();
|
||||
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
|
||||
|
||||
let merkle_proof = tree
|
||||
.proof(usize::try_from(id_index).unwrap())
|
||||
.expect("proof should exist");
|
||||
let merkle_proof = tree.proof(id_index).expect("proof should exist");
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
|
||||
let x = hash_to_field(&signal);
|
||||
|
||||
let rln_identifier = hash_to_field(RLN_IDENTIFIER);
|
||||
|
||||
(
|
||||
Ok((
|
||||
RLNWitnessInput {
|
||||
identity_secret,
|
||||
path_elements,
|
||||
identity_path_index,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
x,
|
||||
epoch,
|
||||
rln_identifier,
|
||||
external_nullifier,
|
||||
},
|
||||
all_read,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn serialize_proof_values(rln_proof_values: &RLNProofValues) -> Vec<u8> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.root));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.epoch));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.x));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.y));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.nullifier));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.rln_identifier));
|
||||
|
||||
serialized
|
||||
}
|
||||
|
||||
pub fn deserialize_proof_values(serialized: &[u8]) -> (RLNProofValues, usize) {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (root, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
all_read += read;
|
||||
|
||||
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
all_read += read;
|
||||
|
||||
let (x, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
all_read += read;
|
||||
|
||||
let (y, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
all_read += read;
|
||||
|
||||
let (nullifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
all_read += read;
|
||||
|
||||
let (rln_identifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
|
||||
all_read += read;
|
||||
|
||||
(
|
||||
RLNProofValues {
|
||||
y,
|
||||
nullifier,
|
||||
root,
|
||||
x,
|
||||
epoch,
|
||||
rln_identifier,
|
||||
},
|
||||
all_read,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn rln_witness_from_json(input_json_str: &str) -> RLNWitnessInput {
|
||||
let input_json: serde_json::Value =
|
||||
serde_json::from_str(input_json_str).expect("JSON was not well-formatted");
|
||||
|
||||
let identity_secret = str_to_fr(&input_json["identity_secret"].to_string(), 10);
|
||||
|
||||
let path_elements = input_json["path_elements"]
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|v| str_to_fr(&v.to_string(), 10))
|
||||
.collect();
|
||||
|
||||
let identity_path_index = input_json["identity_path_index"]
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|v| v.as_u64().unwrap() as u8)
|
||||
.collect();
|
||||
|
||||
let x = str_to_fr(&input_json["x"].to_string(), 10);
|
||||
|
||||
let epoch = str_to_fr(&input_json["epoch"].to_string(), 16);
|
||||
|
||||
let rln_identifier = str_to_fr(&input_json["rln_identifier"].to_string(), 10);
|
||||
|
||||
// TODO: check rln_identifier against public::RLN_IDENTIFIER
|
||||
|
||||
RLNWitnessInput {
|
||||
identity_secret,
|
||||
path_elements,
|
||||
identity_path_index,
|
||||
x,
|
||||
epoch,
|
||||
rln_identifier,
|
||||
}
|
||||
))
|
||||
}
|
||||
|
||||
/// Creates `RLNWitnessInput` from it's fields.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_from_values(
|
||||
identity_secret: Fr,
|
||||
merkle_proof: &MerkleProof,
|
||||
x: Fr,
|
||||
epoch: Fr,
|
||||
//rln_identifier: Fr,
|
||||
) -> RLNWitnessInput {
|
||||
external_nullifier: Fr,
|
||||
user_message_limit: Fr,
|
||||
message_id: Fr,
|
||||
) -> Result<RLNWitnessInput> {
|
||||
message_id_range_check(&message_id, &user_message_limit)?;
|
||||
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
let rln_identifier = hash_to_field(RLN_IDENTIFIER);
|
||||
|
||||
RLNWitnessInput {
|
||||
Ok(RLNWitnessInput {
|
||||
identity_secret,
|
||||
path_elements,
|
||||
identity_path_index,
|
||||
x,
|
||||
epoch,
|
||||
rln_identifier,
|
||||
}
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
|
||||
@@ -269,42 +267,118 @@ pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
|
||||
identity_path_index.push(rng.gen_range(0..2) as u8);
|
||||
}
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
let message_id = Fr::from(1);
|
||||
|
||||
RLNWitnessInput {
|
||||
identity_secret,
|
||||
path_elements,
|
||||
identity_path_index,
|
||||
x,
|
||||
epoch,
|
||||
rln_identifier,
|
||||
external_nullifier: poseidon_hash(&[epoch, rln_identifier]),
|
||||
user_message_limit,
|
||||
message_id,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn proof_values_from_witness(rln_witness: &RLNWitnessInput) -> RLNProofValues {
|
||||
pub fn proof_values_from_witness(rln_witness: &RLNWitnessInput) -> Result<RLNProofValues> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
// y share
|
||||
let a_0 = rln_witness.identity_secret;
|
||||
let a_1 = poseidon_hash(&[a_0, rln_witness.epoch]);
|
||||
let y = rln_witness.x * a_1;
|
||||
let y = y + a_0;
|
||||
let a_1 = poseidon_hash(&[a_0, rln_witness.external_nullifier, rln_witness.message_id]);
|
||||
let y = a_0 + rln_witness.x * a_1;
|
||||
|
||||
// Nullifier
|
||||
let nullifier = poseidon_hash(&[a_1, rln_witness.rln_identifier]);
|
||||
let nullifier = poseidon_hash(&[a_1]);
|
||||
|
||||
// Merkle tree root computations
|
||||
let root = compute_tree_root(
|
||||
&rln_witness.identity_secret,
|
||||
&rln_witness.user_message_limit,
|
||||
&rln_witness.path_elements,
|
||||
&rln_witness.identity_path_index,
|
||||
true,
|
||||
);
|
||||
|
||||
RLNProofValues {
|
||||
Ok(RLNProofValues {
|
||||
y,
|
||||
nullifier,
|
||||
root,
|
||||
x: rln_witness.x,
|
||||
epoch: rln_witness.epoch,
|
||||
rln_identifier: rln_witness.rln_identifier,
|
||||
}
|
||||
external_nullifier: rln_witness.external_nullifier,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn serialize_proof_values(rln_proof_values: &RLNProofValues) -> Vec<u8> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.root));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.external_nullifier));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.x));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.y));
|
||||
serialized.append(&mut fr_to_bytes_le(&rln_proof_values.nullifier));
|
||||
|
||||
serialized
|
||||
}
|
||||
|
||||
// Note: don't forget to skip the 128 bytes ZK proof, if serialized contains it.
|
||||
// This proc deserialzies only proof _values_, i.e. circuit outputs, not the zk proof.
|
||||
pub fn deserialize_proof_values(serialized: &[u8]) -> (RLNProofValues, usize) {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (root, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (external_nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (x, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (y, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
(
|
||||
RLNProofValues {
|
||||
y,
|
||||
nullifier,
|
||||
root,
|
||||
x,
|
||||
external_nullifier,
|
||||
},
|
||||
all_read,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn prepare_prove_input(
|
||||
identity_secret: Fr,
|
||||
id_index: usize,
|
||||
external_nullifier: Fr,
|
||||
signal: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret));
|
||||
serialized.append(&mut normalize_usize(id_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
serialized
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_clone)]
|
||||
pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
|
||||
serialized.append(&mut proof_data.clone());
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
serialized
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
@@ -312,15 +386,13 @@ pub fn proof_values_from_witness(rln_witness: &RLNWitnessInput) -> RLNProofValue
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
pub fn compute_tree_root(
|
||||
leaf: &Fr,
|
||||
identity_secret: &Fr,
|
||||
user_message_limit: &Fr,
|
||||
path_elements: &[Fr],
|
||||
identity_path_index: &[u8],
|
||||
hash_leaf: bool,
|
||||
) -> Fr {
|
||||
let mut root = *leaf;
|
||||
if hash_leaf {
|
||||
root = poseidon_hash(&[root]);
|
||||
}
|
||||
let id_commitment = poseidon_hash(&[*identity_secret]);
|
||||
let mut root = poseidon_hash(&[id_commitment, *user_message_limit]);
|
||||
|
||||
for i in 0..identity_path_index.len() {
|
||||
if identity_path_index[i] == 0 {
|
||||
@@ -334,30 +406,98 @@ pub fn compute_tree_root(
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Signal/nullifier utility functions
|
||||
// Protocol utility functions
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
// Generates a tupe (identity_secret, id_commitment) where
|
||||
// identity_secret is random and id_commitment = PoseidonHash(identity_secret)
|
||||
// Generates a tuple (identity_secret_hash, id_commitment) where
|
||||
// identity_secret_hash is random and id_commitment = PoseidonHash(identity_secret_hash)
|
||||
// RNG is instantiated using thread_rng()
|
||||
pub fn keygen() -> (Fr, Fr) {
|
||||
let mut rng = thread_rng();
|
||||
let identity_secret = Fr::rand(&mut rng);
|
||||
let id_commitment = poseidon_hash(&[identity_secret]);
|
||||
(identity_secret, id_commitment)
|
||||
let identity_secret_hash = Fr::rand(&mut rng);
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
// Hashes arbitrary signal to the underlying prime field
|
||||
pub fn hash_to_field(signal: &[u8]) -> Fr {
|
||||
// We hash the input signal using Keccak256
|
||||
// (note that a bigger curve order might require a bigger hash blocksize)
|
||||
let mut hash = [0; 32];
|
||||
// Generates a tuple (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) where
|
||||
// identity_trapdoor and identity_nullifier are random,
|
||||
// identity_secret_hash = PoseidonHash(identity_trapdoor, identity_nullifier),
|
||||
// id_commitment = PoseidonHash(identity_secret_hash),
|
||||
// RNG is instantiated using thread_rng()
|
||||
// Generated credentials are compatible with Semaphore credentials
|
||||
pub fn extended_keygen() -> (Fr, Fr, Fr, Fr) {
|
||||
let mut rng = thread_rng();
|
||||
let identity_trapdoor = Fr::rand(&mut rng);
|
||||
let identity_nullifier = Fr::rand(&mut rng);
|
||||
let identity_secret_hash = poseidon_hash(&[identity_trapdoor, identity_nullifier]);
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
(
|
||||
identity_trapdoor,
|
||||
identity_nullifier,
|
||||
identity_secret_hash,
|
||||
id_commitment,
|
||||
)
|
||||
}
|
||||
|
||||
// Generates a tuple (identity_secret_hash, id_commitment) where
|
||||
// identity_secret_hash is random and id_commitment = PoseidonHash(identity_secret_hash)
|
||||
// RNG is instantiated using 20 rounds of ChaCha seeded with the hash of the input
|
||||
pub fn seeded_keygen(signal: &[u8]) -> (Fr, Fr) {
|
||||
// ChaCha20 requires a seed of exactly 32 bytes.
|
||||
// We first hash the input seed signal to a 32 bytes array and pass this as seed to ChaCha20
|
||||
let mut seed = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(signal);
|
||||
hasher.finalize(&mut hash);
|
||||
hasher.finalize(&mut seed);
|
||||
|
||||
// We export the hash as a field element
|
||||
let (el, _) = bytes_le_to_fr(hash.as_ref());
|
||||
el
|
||||
let mut rng = ChaCha20Rng::from_seed(seed);
|
||||
let identity_secret_hash = Fr::rand(&mut rng);
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
// Generates a tuple (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) where
|
||||
// identity_trapdoor and identity_nullifier are random,
|
||||
// identity_secret_hash = PoseidonHash(identity_trapdoor, identity_nullifier),
|
||||
// id_commitment = PoseidonHash(identity_secret_hash),
|
||||
// RNG is instantiated using 20 rounds of ChaCha seeded with the hash of the input
|
||||
// Generated credentials are compatible with Semaphore credentials
|
||||
pub fn extended_seeded_keygen(signal: &[u8]) -> (Fr, Fr, Fr, Fr) {
|
||||
// ChaCha20 requires a seed of exactly 32 bytes.
|
||||
// We first hash the input seed signal to a 32 bytes array and pass this as seed to ChaCha20
|
||||
let mut seed = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(signal);
|
||||
hasher.finalize(&mut seed);
|
||||
|
||||
let mut rng = ChaCha20Rng::from_seed(seed);
|
||||
let identity_trapdoor = Fr::rand(&mut rng);
|
||||
let identity_nullifier = Fr::rand(&mut rng);
|
||||
let identity_secret_hash = poseidon_hash(&[identity_trapdoor, identity_nullifier]);
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
(
|
||||
identity_trapdoor,
|
||||
identity_nullifier,
|
||||
identity_secret_hash,
|
||||
id_commitment,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn compute_id_secret(share1: (Fr, Fr), share2: (Fr, Fr)) -> Result<Fr, String> {
|
||||
// Assuming a0 is the identity secret and a1 = poseidonHash([a0, external_nullifier]),
|
||||
// a (x,y) share satisfies the following relation
|
||||
// y = a_0 + x * a_1
|
||||
let (x1, y1) = share1;
|
||||
let (x2, y2) = share2;
|
||||
|
||||
// If the two input shares were computed for the same external_nullifier and identity secret, we can recover the latter
|
||||
// y1 = a_0 + x1 * a_1
|
||||
// y2 = a_0 + x2 * a_1
|
||||
let a_1 = (y1 - y2) / (x1 - x2);
|
||||
let a_0 = y1 - x1 * a_1;
|
||||
|
||||
// If shares come from the same polynomial, a0 is correctly recovered and a1 = poseidonHash([a0, external_nullifier])
|
||||
Ok(a_0)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
@@ -367,64 +507,48 @@ pub fn hash_to_field(signal: &[u8]) -> Fr {
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProofError {
|
||||
#[error("Error reading circuit key: {0}")]
|
||||
CircuitKeyError(#[from] std::io::Error),
|
||||
CircuitKeyError(#[from] Report),
|
||||
#[error("Error producing witness: {0}")]
|
||||
WitnessError(color_eyre::Report),
|
||||
WitnessError(Report),
|
||||
#[error("Error producing proof: {0}")]
|
||||
SynthesisError(#[from] SynthesisError),
|
||||
}
|
||||
|
||||
/// Generates a RLN proof
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a [`ProofError`] if proving fails.
|
||||
pub fn generate_proof(
|
||||
witness_calculator: &Mutex<WitnessCalculator>,
|
||||
fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
|
||||
witness: Vec<BigInt>,
|
||||
) -> Result<Vec<E::ScalarField>> {
|
||||
use ark_ff::PrimeField;
|
||||
let modulus = <E::ScalarField as PrimeField>::MODULUS;
|
||||
|
||||
// convert it to field elements
|
||||
use num_traits::Signed;
|
||||
let mut witness_vec = vec![];
|
||||
for w in witness.into_iter() {
|
||||
let w = if w.sign() == num_bigint::Sign::Minus {
|
||||
// Need to negate the witness element if negative
|
||||
modulus.into()
|
||||
- w.abs()
|
||||
.to_biguint()
|
||||
.ok_or(Report::msg("not a biguint value"))?
|
||||
} else {
|
||||
w.to_biguint().ok_or(Report::msg("not a biguint value"))?
|
||||
};
|
||||
witness_vec.push(E::ScalarField::from(w))
|
||||
}
|
||||
|
||||
Ok(witness_vec)
|
||||
}
|
||||
|
||||
pub fn generate_proof_with_witness(
|
||||
witness: Vec<BigInt>,
|
||||
proving_key: &(ProvingKey<Curve>, ConstraintMatrices<Fr>),
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<ArkProof<Curve>, ProofError> {
|
||||
// We confert the path indexes to field elements
|
||||
// TODO: check if necessary
|
||||
let mut path_elements = Vec::new();
|
||||
rln_witness
|
||||
.path_elements
|
||||
.iter()
|
||||
.for_each(|v| path_elements.push(to_bigint(v)));
|
||||
|
||||
let mut identity_path_index = Vec::new();
|
||||
rln_witness
|
||||
.identity_path_index
|
||||
.iter()
|
||||
.for_each(|v| identity_path_index.push(BigInt::from(*v)));
|
||||
|
||||
let inputs = [
|
||||
(
|
||||
"identity_secret",
|
||||
vec![to_bigint(&rln_witness.identity_secret)],
|
||||
),
|
||||
("path_elements", path_elements),
|
||||
("identity_path_index", identity_path_index),
|
||||
("x", vec![to_bigint(&rln_witness.x)]),
|
||||
("epoch", vec![to_bigint(&rln_witness.epoch)]),
|
||||
(
|
||||
"rln_identifier",
|
||||
vec![to_bigint(&rln_witness.rln_identifier)],
|
||||
),
|
||||
];
|
||||
let inputs = inputs
|
||||
.into_iter()
|
||||
.map(|(name, values)| (name.to_string(), values));
|
||||
|
||||
// If in debug mode, we measure and later print time take to compute witness
|
||||
#[cfg(debug_assertions)]
|
||||
let now = Instant::now();
|
||||
|
||||
let full_assignment = witness_calculator
|
||||
.lock()
|
||||
.expect("witness_calculator mutex should not get poisoned")
|
||||
.calculate_witness_element::<Curve, _>(inputs, false)
|
||||
.map_err(ProofError::WitnessError)?;
|
||||
let full_assignment =
|
||||
calculate_witness_element::<Curve>(witness).map_err(ProofError::WitnessError)?;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
println!("witness generation took: {:.2?}", now.elapsed());
|
||||
@@ -438,7 +562,111 @@ pub fn generate_proof(
|
||||
#[cfg(debug_assertions)]
|
||||
let now = Instant::now();
|
||||
|
||||
let proof = create_proof_with_reduction_and_matrices::<_, CircomReduction>(
|
||||
let proof = Groth16::<_, CircomReduction>::create_proof_with_reduction_and_matrices(
|
||||
&proving_key.0,
|
||||
r,
|
||||
s,
|
||||
&proving_key.1,
|
||||
proving_key.1.num_instance_variables,
|
||||
proving_key.1.num_constraints,
|
||||
full_assignment.as_slice(),
|
||||
)?;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
println!("proof generation took: {:.2?}", now.elapsed());
|
||||
|
||||
Ok(proof)
|
||||
}
|
||||
|
||||
/// Formats inputs for witness calculation
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn inputs_for_witness_calculation(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<[(&str, Vec<BigInt>); 7]> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
// We convert the path indexes to field elements
|
||||
// TODO: check if necessary
|
||||
let mut path_elements = Vec::new();
|
||||
|
||||
for v in rln_witness.path_elements.iter() {
|
||||
path_elements.push(to_bigint(v)?);
|
||||
}
|
||||
|
||||
let mut identity_path_index = Vec::new();
|
||||
rln_witness
|
||||
.identity_path_index
|
||||
.iter()
|
||||
.for_each(|v| identity_path_index.push(BigInt::from(*v)));
|
||||
|
||||
Ok([
|
||||
(
|
||||
"identitySecret",
|
||||
vec![to_bigint(&rln_witness.identity_secret)?],
|
||||
),
|
||||
(
|
||||
"userMessageLimit",
|
||||
vec![to_bigint(&rln_witness.user_message_limit)?],
|
||||
),
|
||||
("messageId", vec![to_bigint(&rln_witness.message_id)?]),
|
||||
("pathElements", path_elements),
|
||||
("identityPathIndex", identity_path_index),
|
||||
("x", vec![to_bigint(&rln_witness.x)?]),
|
||||
(
|
||||
"externalNullifier",
|
||||
vec![to_bigint(&rln_witness.external_nullifier)?],
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
/// Generates a RLN proof
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a [`ProofError`] if proving fails.
|
||||
pub fn generate_proof(
|
||||
#[cfg(not(target_arch = "wasm32"))] witness_calculator: &Mutex<WitnessCalculator>,
|
||||
#[cfg(target_arch = "wasm32")] witness_calculator: &mut WitnessCalculator,
|
||||
proving_key: &(ProvingKey<Curve>, ConstraintMatrices<Fr>),
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<ArkProof<Curve>, ProofError> {
|
||||
let inputs = inputs_for_witness_calculation(rln_witness)?
|
||||
.into_iter()
|
||||
.map(|(name, values)| (name.to_string(), values));
|
||||
|
||||
// If in debug mode, we measure and later print time take to compute witness
|
||||
#[cfg(debug_assertions)]
|
||||
let now = Instant::now();
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
let full_assignment = witness_calculator
|
||||
.calculate_witness_element::<Curve, _>(inputs, false)
|
||||
.map_err(ProofError::WitnessError)?;
|
||||
} else {
|
||||
let full_assignment = witness_calculator
|
||||
.lock()
|
||||
.expect("witness_calculator mutex should not get poisoned")
|
||||
.calculate_witness_element::<Curve, _>(inputs, false)
|
||||
.map_err(ProofError::WitnessError)?;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
println!("witness generation took: {:.2?}", now.elapsed());
|
||||
|
||||
// Random Values
|
||||
let mut rng = thread_rng();
|
||||
let r = Fr::rand(&mut rng);
|
||||
let s = Fr::rand(&mut rng);
|
||||
|
||||
// If in debug mode, we measure and later print time take to compute proof
|
||||
#[cfg(debug_assertions)]
|
||||
let now = Instant::now();
|
||||
let proof = Groth16::<_, CircomReduction>::create_proof_with_reduction_and_matrices(
|
||||
&proving_key.0,
|
||||
r,
|
||||
s,
|
||||
@@ -471,8 +699,7 @@ pub fn verify_proof(
|
||||
proof_values.root,
|
||||
proof_values.nullifier,
|
||||
proof_values.x,
|
||||
proof_values.epoch,
|
||||
proof_values.rln_identifier,
|
||||
proof_values.external_nullifier,
|
||||
];
|
||||
|
||||
// Check that the proof is valid
|
||||
@@ -483,10 +710,64 @@ pub fn verify_proof(
|
||||
#[cfg(debug_assertions)]
|
||||
let now = Instant::now();
|
||||
|
||||
let verified = ark_verify_proof(&pvk, proof, &inputs)?;
|
||||
let verified = Groth16::<_, CircomReduction>::verify_proof(&pvk, proof, &inputs)?;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
println!("verify took: {:.2?}", now.elapsed());
|
||||
|
||||
Ok(verified)
|
||||
}
|
||||
|
||||
// auxiliary function for serialisation Fr to json using ark serilize
|
||||
fn ark_se<S, A: CanonicalSerialize>(a: &A, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let mut bytes = vec![];
|
||||
a.serialize_compressed(&mut bytes)
|
||||
.map_err(serde::ser::Error::custom)?;
|
||||
s.serialize_bytes(&bytes)
|
||||
}
|
||||
|
||||
// auxiliary function for deserialisation Fr to json using ark serilize
|
||||
fn ark_de<'de, D, A: CanonicalDeserialize>(data: D) -> Result<A, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
let s: Vec<u8> = serde::de::Deserialize::deserialize(data)?;
|
||||
let a = A::deserialize_compressed_unchecked(s.as_slice());
|
||||
a.map_err(serde::de::Error::custom)
|
||||
}
|
||||
|
||||
/// Converts a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object to the corresponding JSON serialization.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitnessInput> {
|
||||
let rln_witness: RLNWitnessInput = serde_json::from_value(input_json).unwrap();
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
Ok(rln_witness)
|
||||
}
|
||||
|
||||
/// Converts a JSON value into [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let rln_witness_json = serde_json::to_value(rln_witness)?;
|
||||
Ok(rln_witness_json)
|
||||
}
|
||||
|
||||
pub fn message_id_range_check(message_id: &Fr, user_message_limit: &Fr) -> Result<()> {
|
||||
if message_id > user_message_limit {
|
||||
return Err(color_eyre::Report::msg(
|
||||
"message_id is not within user_message_limit",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
1680
rln/src/public.rs
1680
rln/src/public.rs
File diff suppressed because it is too large
Load Diff
980
rln/src/public_api_tests.rs
Normal file
980
rln/src/public_api_tests.rs
Normal file
@@ -0,0 +1,980 @@
|
||||
use crate::circuit::{Curve, Fr, TEST_TREE_HEIGHT};
|
||||
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
|
||||
use crate::protocol::*;
|
||||
use crate::public::RLN;
|
||||
use crate::utils::*;
|
||||
use ark_groth16::Proof as ArkProof;
|
||||
use ark_serialize::{CanonicalDeserialize, Read};
|
||||
use num_bigint::BigInt;
|
||||
use std::io::Cursor;
|
||||
use std::str::FromStr;
|
||||
use utils::ZerokitMerkleTree;
|
||||
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
#[test]
|
||||
// We test merkle batch Merkle tree additions
|
||||
fn test_merkle_operations() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We first add leaves one by one specifying the index
|
||||
for (i, leaf) in leaves.iter().enumerate() {
|
||||
// We check if the number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), i);
|
||||
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
rln.set_leaf(i, &mut buffer).unwrap();
|
||||
}
|
||||
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_single, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
}
|
||||
|
||||
// We check if numbers of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves using the internal index
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_next, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_single, root_next);
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_batch, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_single, root_batch);
|
||||
|
||||
// We now delete all leaves set and check if the root corresponds to the empty tree root
|
||||
// delete calls over indexes higher than no_of_leaves are ignored and will not increase self.tree.next_index
|
||||
for i in 0..no_of_leaves {
|
||||
rln.delete_leaf(i).unwrap();
|
||||
}
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_delete, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_empty, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_delete, root_empty);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We test leaf setting with a custom index, to enable batch updates to the root
|
||||
// Uses `set_leaves_from` to set leaves in a batch, from index `start_index`
|
||||
fn test_leaf_setting_with_index() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// set_index is the index from which we start setting leaves
|
||||
// random number between 0..no_of_leaves
|
||||
let set_index = rng.gen_range(0..no_of_leaves) as usize;
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_batch_with_init, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
|
||||
|
||||
// We add leaves in a batch starting from index 0..set_index
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We add the remaining n leaves in a batch starting from index m
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]).unwrap());
|
||||
rln.set_leaves_from(set_index, &mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_batch_with_custom_index, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
}
|
||||
|
||||
// We check if numbers of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves using the internal index
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_single_additions, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_batch_with_init, root_single_additions);
|
||||
|
||||
rln.flush().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests the atomic_operation fn, which set_leaves_from uses internally
|
||||
fn test_atomic_operation() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_insertion, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
let last_leaf = leaves.last().unwrap();
|
||||
let last_leaf_index = no_of_leaves - 1;
|
||||
let indices = vec![last_leaf_index as u8];
|
||||
let last_leaf = vec![*last_leaf];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf).unwrap());
|
||||
|
||||
rln.atomic_operation(last_leaf_index, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
// We get the root of the tree obtained after a no-op
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_noop, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_after_insertion, root_after_noop);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_operation_zero_indexed() {
|
||||
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_insertion, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
let zero_index = 0;
|
||||
let indices = vec![zero_index as u8];
|
||||
let zero_leaf: Vec<Fr> = vec![];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
|
||||
rln.atomic_operation(0, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
// We get the root of the tree obtained after a deletion
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_deletion, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_ne!(root_after_insertion, root_after_deletion);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_operation_consistency() {
|
||||
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), no_of_leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_insertion, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
let set_index = rng.gen_range(0..no_of_leaves) as usize;
|
||||
let indices = vec![set_index as u8];
|
||||
let zero_leaf: Vec<Fr> = vec![];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
|
||||
rln.atomic_operation(0, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
// We get the root of the tree obtained after a deletion
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_deletion, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_ne!(root_after_insertion, root_after_deletion);
|
||||
|
||||
// We get the leaf
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_leaf(set_index, &mut output_buffer).unwrap();
|
||||
let (received_leaf, _) = bytes_le_to_fr(output_buffer.into_inner().as_ref());
|
||||
|
||||
assert_eq!(received_leaf, Fr::from(0));
|
||||
}
|
||||
|
||||
#[allow(unused_must_use)]
|
||||
#[test]
|
||||
// This test checks if `set_leaves_from` throws an error when the index is out of bounds
|
||||
fn test_set_leaves_bad_index() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let bad_index = (1 << tree_height) - rng.gen_range(0..no_of_leaves) as usize;
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// Get root of empty tree
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_empty, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.set_leaves_from(bad_index, &mut buffer)
|
||||
.expect_err("Should throw an error");
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), 0);
|
||||
|
||||
// Get the root of the tree
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_after_bad_set, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(root_empty, root_after_bad_set);
|
||||
}
|
||||
|
||||
fn fq_from_str(s: String) -> ark_bn254::Fq {
|
||||
ark_bn254::Fq::from_str(&s).unwrap()
|
||||
}
|
||||
|
||||
fn g1_from_str(g1: &[String]) -> ark_bn254::G1Affine {
|
||||
let x = fq_from_str(g1[0].clone());
|
||||
let y = fq_from_str(g1[1].clone());
|
||||
let z = fq_from_str(g1[2].clone());
|
||||
ark_bn254::G1Affine::from(ark_bn254::G1Projective::new(x, y, z))
|
||||
}
|
||||
|
||||
fn g2_from_str(g2: &[Vec<String>]) -> ark_bn254::G2Affine {
|
||||
let c0 = fq_from_str(g2[0][0].clone());
|
||||
let c1 = fq_from_str(g2[0][1].clone());
|
||||
let x = ark_bn254::Fq2::new(c0, c1);
|
||||
|
||||
let c0 = fq_from_str(g2[1][0].clone());
|
||||
let c1 = fq_from_str(g2[1][1].clone());
|
||||
let y = ark_bn254::Fq2::new(c0, c1);
|
||||
|
||||
let c0 = fq_from_str(g2[2][0].clone());
|
||||
let c1 = fq_from_str(g2[2][1].clone());
|
||||
let z = ark_bn254::Fq2::new(c0, c1);
|
||||
|
||||
ark_bn254::G2Affine::from(ark_bn254::G2Projective::new(x, y, z))
|
||||
}
|
||||
|
||||
fn value_to_string_vec(value: &Value) -> Vec<String> {
|
||||
value
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_groth16_proof_hardcoded() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
|
||||
let rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
let valid_snarkjs_proof = json!({
|
||||
"pi_a": [
|
||||
"606446415626469993821291758185575230335423926365686267140465300918089871829",
|
||||
"14881534001609371078663128199084130129622943308489025453376548677995646280161",
|
||||
"1"
|
||||
],
|
||||
"pi_b": [
|
||||
[
|
||||
"18053812507994813734583839134426913715767914942522332114506614735770984570178",
|
||||
"11219916332635123001710279198522635266707985651975761715977705052386984005181"
|
||||
],
|
||||
[
|
||||
"17371289494006920912949790045699521359436706797224428511776122168520286372970",
|
||||
"14038575727257298083893642903204723310279435927688342924358714639926373603890"
|
||||
],
|
||||
[
|
||||
"1",
|
||||
"0"
|
||||
]
|
||||
],
|
||||
"pi_c": [
|
||||
"17701377127561410274754535747274973758826089226897242202671882899370780845888",
|
||||
"12608543716397255084418384146504333522628400182843246910626782513289789807030",
|
||||
"1"
|
||||
],
|
||||
"protocol": "groth16",
|
||||
"curve": "bn128"
|
||||
});
|
||||
let valid_ark_proof = ArkProof {
|
||||
a: g1_from_str(&value_to_string_vec(&valid_snarkjs_proof["pi_a"])),
|
||||
b: g2_from_str(
|
||||
&valid_snarkjs_proof["pi_b"]
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|item| value_to_string_vec(item))
|
||||
.collect::<Vec<Vec<String>>>(),
|
||||
),
|
||||
c: g1_from_str(&value_to_string_vec(&valid_snarkjs_proof["pi_c"])),
|
||||
};
|
||||
|
||||
let valid_proof_values = RLNProofValues {
|
||||
x: str_to_fr(
|
||||
"20645213238265527935869146898028115621427162613172918400241870500502509785943",
|
||||
10,
|
||||
)
|
||||
.unwrap(),
|
||||
external_nullifier: str_to_fr(
|
||||
"21074405743803627666274838159589343934394162804826017440941339048886754734203",
|
||||
10,
|
||||
)
|
||||
.unwrap(),
|
||||
y: str_to_fr(
|
||||
"16401008481486069296141645075505218976370369489687327284155463920202585288271",
|
||||
10,
|
||||
)
|
||||
.unwrap(),
|
||||
root: str_to_fr(
|
||||
"8502402278351299594663821509741133196466235670407051417832304486953898514733",
|
||||
10,
|
||||
)
|
||||
.unwrap(),
|
||||
nullifier: str_to_fr(
|
||||
"9102791780887227194595604713537772536258726662792598131262022534710887343694",
|
||||
10,
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let verified = verify_proof(&rln.verification_key, &valid_ark_proof, &valid_proof_values);
|
||||
assert!(verified.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// This test is similar to the one in lib, but uses only public API
|
||||
fn test_groth16_proof() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// Note: we only test Groth16 proof generation, so we ignore setting the tree in the RLN object
|
||||
let rln_witness = random_rln_witness(tree_height);
|
||||
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
|
||||
|
||||
// We compute a Groth16 proof
|
||||
let mut input_buffer = Cursor::new(serialize_witness(&rln_witness).unwrap());
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.prove(&mut input_buffer, &mut output_buffer).unwrap();
|
||||
let serialized_proof = output_buffer.into_inner();
|
||||
|
||||
// Before checking public verify API, we check that the (deserialized) proof generated by prove is actually valid
|
||||
let proof = ArkProof::deserialize_compressed(&mut Cursor::new(&serialized_proof)).unwrap();
|
||||
let verified = verify_proof(&rln.verification_key, &proof, &proof_values);
|
||||
// dbg!(verified.unwrap());
|
||||
assert!(verified.unwrap());
|
||||
|
||||
// We prepare the input to prove API, consisting of serialized_proof (compressed, 4*32 bytes) || serialized_proof_values (6*32 bytes)
|
||||
let serialized_proof_values = serialize_proof_values(&proof_values);
|
||||
let mut verify_data = Vec::<u8>::new();
|
||||
verify_data.extend(&serialized_proof);
|
||||
verify_data.extend(&serialized_proof_values);
|
||||
let mut input_buffer = Cursor::new(verify_data);
|
||||
|
||||
// We verify the Groth16 proof against the provided proof values
|
||||
let verified = rln.verify(&mut input_buffer).unwrap();
|
||||
|
||||
assert!(verified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rln_proof() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
let id_commitment = Fr::rand(&mut rng);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, Fr::from(100)]);
|
||||
leaves.push(rate_commitment);
|
||||
}
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
|
||||
// We set as leaf rate_commitment after storing its index
|
||||
let identity_index = rln.tree.leaves_set();
|
||||
let user_message_limit = Fr::from(65535);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
|
||||
// We generate a random signal
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized.append(&mut normalize_usize(identity_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&Fr::from(1)));
|
||||
serialized.append(&mut fr_to_bytes_le(&utils_poseidon_hash(&[
|
||||
epoch,
|
||||
rln_identifier,
|
||||
])));
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
let mut input_buffer = Cursor::new(serialized);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
|
||||
// output_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let mut proof_data = output_buffer.into_inner();
|
||||
|
||||
// We prepare input for verify_rln_proof API
|
||||
// input_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// that is [ proof_data || signal_len<8> | signal<var> ]
|
||||
proof_data.append(&mut normalize_usize(signal.len()));
|
||||
proof_data.append(&mut signal.to_vec());
|
||||
|
||||
let mut input_buffer = Cursor::new(proof_data);
|
||||
let verified = rln.verify_rln_proof(&mut input_buffer).unwrap();
|
||||
|
||||
assert!(verified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rln_with_witness() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
|
||||
// We set as leaf rate_commitment after storing its index
|
||||
let identity_index = rln.tree.leaves_set();
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
|
||||
// We generate a random signal
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized.append(&mut normalize_usize(identity_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&Fr::from(1)));
|
||||
serialized.append(&mut fr_to_bytes_le(&utils_poseidon_hash(&[
|
||||
epoch,
|
||||
rln_identifier,
|
||||
])));
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
let mut input_buffer = Cursor::new(serialized);
|
||||
|
||||
// We read input RLN witness and we serialize_compressed it
|
||||
let mut witness_byte: Vec<u8> = Vec::new();
|
||||
input_buffer.read_to_end(&mut witness_byte).unwrap();
|
||||
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut rln.tree, &witness_byte).unwrap();
|
||||
|
||||
let serialized_witness = serialize_witness(&rln_witness).unwrap();
|
||||
|
||||
// Calculate witness outside zerokit (simulating what JS is doing)
|
||||
let inputs = inputs_for_witness_calculation(&rln_witness)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(name, values)| (name.to_string(), values));
|
||||
let calculated_witness = rln
|
||||
.witness_calculator
|
||||
.lock()
|
||||
.expect("witness_calculator mutex should not get poisoned")
|
||||
.calculate_witness_element::<Curve, _>(inputs, false)
|
||||
.map_err(ProofError::WitnessError)
|
||||
.unwrap();
|
||||
|
||||
let calculated_witness_vec: Vec<BigInt> = calculated_witness
|
||||
.into_iter()
|
||||
.map(|v| to_bigint(&v).unwrap())
|
||||
.collect();
|
||||
|
||||
// Generating the proof
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof_with_witness(
|
||||
calculated_witness_vec,
|
||||
serialized_witness,
|
||||
&mut output_buffer,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// output_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let mut proof_data = output_buffer.into_inner();
|
||||
|
||||
// We prepare input for verify_rln_proof API
|
||||
// input_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// that is [ proof_data || signal_len<8> | signal<var> ]
|
||||
proof_data.append(&mut normalize_usize(signal.len()));
|
||||
proof_data.append(&mut signal.to_vec());
|
||||
|
||||
let mut input_buffer = Cursor::new(proof_data);
|
||||
let verified = rln.verify_rln_proof(&mut input_buffer).unwrap();
|
||||
|
||||
assert!(verified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proof_verification_with_roots() {
|
||||
// The first part is similar to test_rln_with_witness
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut leaves: Vec<Fr> = Vec::new();
|
||||
let mut rng = thread_rng();
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
|
||||
// We set as leaf id_commitment after storing its index
|
||||
let identity_index = rln.tree.leaves_set();
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
|
||||
// We generate a random signal
|
||||
let mut rng = thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | external_nullifier<32> | user_message_limit<32> | message_id<32> | signal_len<8> | signal<var> ]
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized.append(&mut normalize_usize(identity_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&Fr::from(1)));
|
||||
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
let mut input_buffer = Cursor::new(serialized);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
|
||||
// output_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let mut proof_data = output_buffer.into_inner();
|
||||
|
||||
// We prepare input for verify_rln_proof API
|
||||
// input_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// that is [ proof_data || signal_len<8> | signal<var> ]
|
||||
proof_data.append(&mut normalize_usize(signal.len()));
|
||||
proof_data.append(&mut signal.to_vec());
|
||||
let input_buffer = Cursor::new(proof_data);
|
||||
|
||||
// If no roots is provided, proof validation is skipped and if the remaining proof values are valid, the proof will be correctly verified
|
||||
let mut roots_serialized: Vec<u8> = Vec::new();
|
||||
let mut roots_buffer = Cursor::new(roots_serialized.clone());
|
||||
let verified = rln
|
||||
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
|
||||
.unwrap();
|
||||
|
||||
assert!(verified);
|
||||
|
||||
// We serialize in the roots buffer some random values and we check that the proof is not verified since doesn't contain the correct root the proof refers to
|
||||
for _ in 0..5 {
|
||||
roots_serialized.append(&mut fr_to_bytes_le(&Fr::rand(&mut rng)));
|
||||
}
|
||||
roots_buffer = Cursor::new(roots_serialized.clone());
|
||||
let verified = rln
|
||||
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(verified, false);
|
||||
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We add the real root and we check if now the proof is verified
|
||||
roots_serialized.append(&mut fr_to_bytes_le(&root));
|
||||
roots_buffer = Cursor::new(roots_serialized.clone());
|
||||
let verified = rln
|
||||
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
|
||||
.unwrap();
|
||||
|
||||
assert!(verified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recover_id_secret() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
let user_message_limit = Fr::from(100);
|
||||
let message_id = Fr::from(0);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
|
||||
// We set as leaf id_commitment after storing its index
|
||||
let identity_index = rln.tree.leaves_set();
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
|
||||
// We generate two random signals
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal1: [u8; 32] = rng.gen();
|
||||
|
||||
let signal2: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We generate two proofs using same epoch but different signals.
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
let mut serialized1: Vec<u8> = Vec::new();
|
||||
serialized1.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized1.append(&mut normalize_usize(identity_index));
|
||||
serialized1.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized1.append(&mut fr_to_bytes_le(&message_id));
|
||||
serialized1.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
|
||||
// The first part is the same for both proof input, so we clone
|
||||
let mut serialized2 = serialized1.clone();
|
||||
|
||||
// We attach the first signal to the first proof input
|
||||
serialized1.append(&mut normalize_usize(signal1.len()));
|
||||
serialized1.append(&mut signal1.to_vec());
|
||||
|
||||
// We attach the second signal to the first proof input
|
||||
serialized2.append(&mut normalize_usize(signal2.len()));
|
||||
serialized2.append(&mut signal2.to_vec());
|
||||
|
||||
// We generate the first proof
|
||||
let mut input_buffer = Cursor::new(serialized1);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
let proof_data_1 = output_buffer.into_inner();
|
||||
|
||||
// We generate the second proof
|
||||
let mut input_buffer = Cursor::new(serialized2);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
let proof_data_2 = output_buffer.into_inner();
|
||||
|
||||
let mut input_proof_data_1 = Cursor::new(proof_data_1.clone());
|
||||
let mut input_proof_data_2 = Cursor::new(proof_data_2);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.recover_id_secret(
|
||||
&mut input_proof_data_1,
|
||||
&mut input_proof_data_2,
|
||||
&mut output_buffer,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let serialized_identity_secret_hash = output_buffer.into_inner();
|
||||
|
||||
// We ensure that a non-empty value is written to output_buffer
|
||||
assert!(!serialized_identity_secret_hash.is_empty());
|
||||
|
||||
// We check if the recovered identity secret hash corresponds to the original one
|
||||
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
|
||||
|
||||
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash_new, id_commitment_new) = keygen();
|
||||
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
|
||||
|
||||
// We add it to the tree
|
||||
let identity_index_new = rln.tree.leaves_set();
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment_new));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
|
||||
// We generate a random signals
|
||||
let signal3: [u8; 32] = rng.gen();
|
||||
|
||||
// We prepare proof input. Note that epoch is the same as before
|
||||
let mut serialized3: Vec<u8> = Vec::new();
|
||||
serialized3.append(&mut fr_to_bytes_le(&identity_secret_hash_new));
|
||||
serialized3.append(&mut normalize_usize(identity_index_new));
|
||||
serialized3.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized3.append(&mut fr_to_bytes_le(&message_id));
|
||||
serialized3.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
serialized3.append(&mut normalize_usize(signal3.len()));
|
||||
serialized3.append(&mut signal3.to_vec());
|
||||
|
||||
// We generate the proof
|
||||
let mut input_buffer = Cursor::new(serialized3);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
let proof_data_3 = output_buffer.into_inner();
|
||||
|
||||
// We attempt to recover the secret using share1 (coming from identity_secret_hash) and share3 (coming from identity_secret_hash_new)
|
||||
|
||||
let mut input_proof_data_1 = Cursor::new(proof_data_1.clone());
|
||||
let mut input_proof_data_3 = Cursor::new(proof_data_3);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.recover_id_secret(
|
||||
&mut input_proof_data_1,
|
||||
&mut input_proof_data_3,
|
||||
&mut output_buffer,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let serialized_identity_secret_hash = output_buffer.into_inner();
|
||||
let (recovered_identity_secret_hash_new, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
|
||||
// ensure that the recovered secret does not match with either of the
|
||||
// used secrets in proof generation
|
||||
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_leaf() {
|
||||
// We generate a random tree
|
||||
let tree_height = 10;
|
||||
let mut rng = thread_rng();
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
// We generate a random leaf
|
||||
let leaf = Fr::rand(&mut rng);
|
||||
|
||||
// We generate a random index
|
||||
let index = rng.gen_range(0..rln.tree.capacity());
|
||||
|
||||
// We add the leaf to the tree
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
rln.set_leaf(index, &mut buffer).unwrap();
|
||||
|
||||
// We get the leaf
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_leaf(index, &mut output_buffer).unwrap();
|
||||
|
||||
// We ensure that the leaf is the same as the one we added
|
||||
let (received_leaf, _) = bytes_le_to_fr(output_buffer.into_inner().as_ref());
|
||||
assert_eq!(received_leaf, leaf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_metadata() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
let arbitrary_metadata: &[u8] = b"block_number:200000";
|
||||
rln.set_metadata(arbitrary_metadata).unwrap();
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_metadata(&mut buffer).unwrap();
|
||||
let received_metadata = buffer.into_inner();
|
||||
|
||||
assert_eq!(arbitrary_metadata, received_metadata);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_metadata() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
|
||||
let rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_metadata(&mut buffer).unwrap();
|
||||
let received_metadata = buffer.into_inner();
|
||||
|
||||
assert_eq!(received_metadata.len(), 0);
|
||||
}
|
||||
120
rln/src/utils.rs
120
rln/src/utils.rs
@@ -1,30 +1,28 @@
|
||||
// This crate provides cross-module useful utilities (mainly type conversions) not necessarily specific to RLN
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use ark_ff::{BigInteger, FpParameters, PrimeField};
|
||||
use ark_ff::PrimeField;
|
||||
use color_eyre::{Report, Result};
|
||||
use num_bigint::{BigInt, BigUint};
|
||||
use num_traits::Num;
|
||||
use serde_json::json;
|
||||
use std::io::Cursor;
|
||||
use std::iter::Extend;
|
||||
|
||||
pub fn modulus_bit_size() -> usize {
|
||||
<Fr as PrimeField>::Params::MODULUS
|
||||
.num_bits()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn to_bigint(el: &Fr) -> BigInt {
|
||||
let res: BigUint = (*el).try_into().unwrap();
|
||||
res.try_into().unwrap()
|
||||
pub fn to_bigint(el: &Fr) -> Result<BigInt> {
|
||||
let res: BigUint = (*el).into();
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
pub fn fr_byte_size() -> usize {
|
||||
let mbs = modulus_bit_size();
|
||||
(mbs + 64 - (mbs % 64)) / 8
|
||||
let mbs = <Fr as PrimeField>::MODULUS_BIT_SIZE;
|
||||
((mbs + 64 - (mbs % 64)) / 8) as usize
|
||||
}
|
||||
|
||||
pub fn str_to_fr(input: &str, radix: u32) -> Fr {
|
||||
assert!((radix == 10) || (radix == 16));
|
||||
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr> {
|
||||
if !(radix == 10 || radix == 16) {
|
||||
return Err(Report::msg("wrong radix"));
|
||||
}
|
||||
|
||||
// We remove any quote present and we trim
|
||||
let single_quote: char = '\"';
|
||||
@@ -32,16 +30,10 @@ pub fn str_to_fr(input: &str, radix: u32) -> Fr {
|
||||
input_clean = input_clean.trim().to_string();
|
||||
|
||||
if radix == 10 {
|
||||
BigUint::from_str_radix(&input_clean, radix)
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
Ok(BigUint::from_str_radix(&input_clean, radix)?.into())
|
||||
} else {
|
||||
input_clean = input_clean.replace("0x", "");
|
||||
BigUint::from_str_radix(&input_clean, radix)
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
Ok(BigUint::from_str_radix(&input_clean, radix)?.into())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,99 +74,129 @@ pub fn fr_to_bytes_be(input: &Fr) -> Vec<u8> {
|
||||
res
|
||||
}
|
||||
|
||||
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
|
||||
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Result<Vec<u8>> {
|
||||
let mut bytes: Vec<u8> = Vec::new();
|
||||
//We store the vector length
|
||||
bytes.extend(input.len().to_le_bytes().to_vec());
|
||||
bytes.extend(u64::try_from(input.len())?.to_le_bytes().to_vec());
|
||||
|
||||
// We store each element
|
||||
input.iter().for_each(|el| bytes.extend(fr_to_bytes_le(el)));
|
||||
|
||||
bytes
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Vec<u8> {
|
||||
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Result<Vec<u8>> {
|
||||
let mut bytes: Vec<u8> = Vec::new();
|
||||
//We store the vector length
|
||||
bytes.extend(input.len().to_be_bytes().to_vec());
|
||||
bytes.extend(u64::try_from(input.len())?.to_be_bytes().to_vec());
|
||||
|
||||
// We store each element
|
||||
input.iter().for_each(|el| bytes.extend(fr_to_bytes_be(el)));
|
||||
|
||||
bytes
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
|
||||
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Result<Vec<u8>> {
|
||||
let mut bytes: Vec<u8> = Vec::new();
|
||||
//We store the vector length
|
||||
bytes.extend(u64::try_from(input.len()).unwrap().to_le_bytes().to_vec());
|
||||
bytes.extend(u64::try_from(input.len())?.to_le_bytes().to_vec());
|
||||
|
||||
bytes.extend(input);
|
||||
bytes
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn vec_u8_to_bytes_be(input: Vec<u8>) -> Vec<u8> {
|
||||
pub fn vec_u8_to_bytes_be(input: Vec<u8>) -> Result<Vec<u8>> {
|
||||
let mut bytes: Vec<u8> = Vec::new();
|
||||
//We store the vector length
|
||||
bytes.extend(u64::try_from(input.len()).unwrap().to_be_bytes().to_vec());
|
||||
bytes.extend(u64::try_from(input.len())?.to_be_bytes().to_vec());
|
||||
|
||||
bytes.extend(input);
|
||||
bytes
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn bytes_le_to_vec_u8(input: &[u8]) -> (Vec<u8>, usize) {
|
||||
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
|
||||
let mut read: usize = 0;
|
||||
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into().unwrap())).unwrap();
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
|
||||
let res = input[8..8 + len].to_vec();
|
||||
read += res.len();
|
||||
|
||||
(res, read)
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
pub fn bytes_be_to_vec_u8(input: &[u8]) -> (Vec<u8>, usize) {
|
||||
pub fn bytes_be_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
|
||||
let mut read: usize = 0;
|
||||
|
||||
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into().unwrap())).unwrap();
|
||||
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
|
||||
let res = input[8..8 + len].to_vec();
|
||||
|
||||
read += res.len();
|
||||
|
||||
(res, read)
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
pub fn bytes_le_to_vec_fr(input: &[u8]) -> (Vec<Fr>, usize) {
|
||||
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
|
||||
let mut read: usize = 0;
|
||||
let mut res: Vec<Fr> = Vec::new();
|
||||
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into().unwrap())).unwrap();
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
|
||||
let el_size = fr_byte_size();
|
||||
for i in 0..len {
|
||||
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)].to_vec());
|
||||
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
|
||||
res.push(curr_el);
|
||||
read += el_size;
|
||||
}
|
||||
|
||||
(res, read)
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
pub fn bytes_be_to_vec_fr(input: &[u8]) -> (Vec<Fr>, usize) {
|
||||
pub fn bytes_be_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
|
||||
let mut read: usize = 0;
|
||||
let mut res: Vec<Fr> = Vec::new();
|
||||
|
||||
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into().unwrap())).unwrap();
|
||||
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
|
||||
let el_size = fr_byte_size();
|
||||
for i in 0..len {
|
||||
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)].to_vec());
|
||||
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
|
||||
res.push(curr_el);
|
||||
read += el_size;
|
||||
}
|
||||
|
||||
(res, read)
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
pub fn normalize_usize(input: usize) -> Vec<u8> {
|
||||
let mut normalized_usize = input.to_le_bytes().to_vec();
|
||||
normalized_usize.resize(8, 0);
|
||||
normalized_usize
|
||||
}
|
||||
|
||||
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>> {
|
||||
let nof_elem = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
if nof_elem == 0 {
|
||||
Ok(vec![])
|
||||
} else {
|
||||
let elements: Vec<usize> = input[8..]
|
||||
.chunks(8)
|
||||
.map(|ch| usize::from_le_bytes(ch[0..8].try_into().unwrap()))
|
||||
.collect();
|
||||
Ok(elements)
|
||||
}
|
||||
}
|
||||
|
||||
// using for test
|
||||
pub fn generate_input_buffer() -> Cursor<String> {
|
||||
Cursor::new(json!({}).to_string())
|
||||
}
|
||||
|
||||
/* Old conversion utilities between different libraries data types
|
||||
|
||||
980
rln/tests/ffi.rs
Normal file
980
rln/tests/ffi.rs
Normal file
@@ -0,0 +1,980 @@
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
use rln::circuit::*;
|
||||
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
|
||||
use rln::protocol::*;
|
||||
use rln::public::RLN;
|
||||
use rln::utils::*;
|
||||
use serde_json::json;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
const NO_OF_LEAVES: usize = 256;
|
||||
|
||||
fn create_rln_instance() -> &'static mut RLN<'static> {
|
||||
let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit();
|
||||
let input_config = json!({}).to_string();
|
||||
let input_buffer = &Buffer::from(input_config.as_bytes());
|
||||
let success = new(TEST_TREE_HEIGHT, input_buffer, rln_pointer.as_mut_ptr());
|
||||
assert!(success, "RLN object creation failed");
|
||||
unsafe { &mut *rln_pointer.assume_init() }
|
||||
}
|
||||
|
||||
fn set_leaves_init(rln_pointer: &mut RLN, leaves: &[Fr]) {
|
||||
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
|
||||
let input_buffer = &Buffer::from(leaves_ser.as_ref());
|
||||
let success = init_tree_with_leaves(rln_pointer, input_buffer);
|
||||
assert!(success, "init tree with leaves call failed");
|
||||
assert_eq!(rln_pointer.leaves_set(), leaves.len());
|
||||
}
|
||||
|
||||
fn get_random_leaves() -> Vec<Fr> {
|
||||
let mut rng = thread_rng();
|
||||
(0..NO_OF_LEAVES).map(|_| Fr::rand(&mut rng)).collect()
|
||||
}
|
||||
|
||||
fn get_tree_root(rln_pointer: &mut RLN) -> Fr {
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = get_root(rln_pointer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "get root call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (root, _) = bytes_le_to_fr(&result_data);
|
||||
root
|
||||
}
|
||||
|
||||
fn identity_pair_gen(rln_pointer: &mut RLN) -> (Fr, Fr) {
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
fn rln_proof_gen(rln_pointer: &mut RLN, serialized: &[u8]) -> Vec<u8> {
|
||||
let input_buffer = &Buffer::from(serialized);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = generate_rln_proof(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "generate rln proof call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
<&[u8]>::from(&output_buffer).to_vec()
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We test merkle batch Merkle tree additions
|
||||
fn test_merkle_operations_ffi() {
|
||||
// We generate a vector of random leaves
|
||||
let leaves = get_random_leaves();
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We first add leaves one by one specifying the index
|
||||
for (i, leaf) in leaves.iter().enumerate() {
|
||||
// We prepare the rate_commitment and we set the leaf at provided index
|
||||
let leaf_ser = fr_to_bytes_le(&leaf);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_leaf(rln_pointer, i, input_buffer);
|
||||
assert!(success, "set leaf call failed");
|
||||
}
|
||||
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let root_single = get_tree_root(rln_pointer);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let leaf_ser = fr_to_bytes_le(&leaf);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
}
|
||||
|
||||
// We get the root of the tree obtained adding leaves using the internal index
|
||||
let root_next = get_tree_root(rln_pointer);
|
||||
|
||||
// We check if roots are the same
|
||||
assert_eq!(root_single, root_next);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let root_batch = get_tree_root(rln_pointer);
|
||||
|
||||
// We check if roots are the same
|
||||
assert_eq!(root_single, root_batch);
|
||||
|
||||
// We now delete all leaves set and check if the root corresponds to the empty tree root
|
||||
// delete calls over indexes higher than no_of_leaves are ignored and will not increase self.tree.next_index
|
||||
for i in 0..NO_OF_LEAVES {
|
||||
let success = delete_leaf(rln_pointer, i);
|
||||
assert!(success, "delete leaf call failed");
|
||||
}
|
||||
|
||||
// We get the root of the tree obtained deleting all leaves
|
||||
let root_delete = get_tree_root(rln_pointer);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We get the root of the empty tree
|
||||
let root_empty = get_tree_root(rln_pointer);
|
||||
|
||||
// We check if roots are the same
|
||||
assert_eq!(root_delete, root_empty);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// This test is similar to the one in public.rs but it uses the RLN object as a pointer
|
||||
// Uses `set_leaves_from` to set leaves in a batch
|
||||
fn test_leaf_setting_with_index_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
assert_eq!(rln_pointer.leaves_set(), 0);
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let leaves = get_random_leaves();
|
||||
|
||||
// set_index is the index from which we start setting leaves
|
||||
// random number between 0..no_of_leaves
|
||||
let mut rng = thread_rng();
|
||||
let set_index = rng.gen_range(0..NO_OF_LEAVES) as usize;
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let root_batch_with_init = get_tree_root(rln_pointer);
|
||||
|
||||
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
|
||||
|
||||
// We add leaves in a batch starting from index 0..set_index
|
||||
set_leaves_init(rln_pointer, &leaves[0..set_index]);
|
||||
|
||||
// We add the remaining n leaves in a batch starting from index set_index
|
||||
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]).unwrap();
|
||||
let buffer = &Buffer::from(leaves_n.as_ref());
|
||||
let success = set_leaves_from(rln_pointer, set_index, buffer);
|
||||
assert!(success, "set leaves from call failed");
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let root_batch_with_custom_index = get_tree_root(rln_pointer);
|
||||
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let leaf_ser = fr_to_bytes_le(&leaf);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
}
|
||||
|
||||
// We get the root of the tree obtained adding leaves using the internal index
|
||||
let root_single_additions = get_tree_root(rln_pointer);
|
||||
assert_eq!(root_batch_with_init, root_single_additions);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// This test is similar to the one in public.rs but it uses the RLN object as a pointer
|
||||
fn test_atomic_operation_ffi() {
|
||||
// We generate a vector of random leaves
|
||||
let leaves = get_random_leaves();
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let root_after_insertion = get_tree_root(rln_pointer);
|
||||
|
||||
let last_leaf = leaves.last().unwrap();
|
||||
let last_leaf_index = NO_OF_LEAVES - 1;
|
||||
let indices = vec![last_leaf_index as u8];
|
||||
let last_leaf = vec![*last_leaf];
|
||||
let indices = vec_u8_to_bytes_le(&indices).unwrap();
|
||||
let indices_buffer = &Buffer::from(indices.as_ref());
|
||||
let leaves = vec_fr_to_bytes_le(&last_leaf).unwrap();
|
||||
let leaves_buffer = &Buffer::from(leaves.as_ref());
|
||||
|
||||
let success = atomic_operation(
|
||||
rln_pointer,
|
||||
last_leaf_index as usize,
|
||||
leaves_buffer,
|
||||
indices_buffer,
|
||||
);
|
||||
assert!(success, "atomic operation call failed");
|
||||
|
||||
// We get the root of the tree obtained after a no-op
|
||||
let root_after_noop = get_tree_root(rln_pointer);
|
||||
assert_eq!(root_after_insertion, root_after_noop);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// This test is similar to the one in public.rs but it uses the RLN object as a pointer
|
||||
fn test_set_leaves_bad_index_ffi() {
|
||||
// We generate a vector of random leaves
|
||||
let leaves = get_random_leaves();
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let bad_index = (1 << TEST_TREE_HEIGHT) - rng.gen_range(0..NO_OF_LEAVES) as usize;
|
||||
|
||||
// Get root of empty tree
|
||||
let root_empty = get_tree_root(rln_pointer);
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let leaves = vec_fr_to_bytes_le(&leaves).unwrap();
|
||||
let buffer = &Buffer::from(leaves.as_ref());
|
||||
let success = set_leaves_from(rln_pointer, bad_index, buffer);
|
||||
assert!(!success, "set leaves from call succeeded");
|
||||
|
||||
// Get root of tree after attempted set
|
||||
let root_after_bad_set = get_tree_root(rln_pointer);
|
||||
assert_eq!(root_empty, root_after_bad_set);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// This test is similar to the one in lib, but uses only public C API
|
||||
fn test_merkle_proof_ffi() {
|
||||
let leaf_index = 3;
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// generate identity
|
||||
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
|
||||
let id_commitment = utils_poseidon_hash(&[identity_secret_hash]);
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
let leaf_ser = fr_to_bytes_le(&rate_commitment);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_leaf(rln_pointer, leaf_index, input_buffer);
|
||||
assert!(success, "set leaf call failed");
|
||||
|
||||
// We obtain the Merkle tree root
|
||||
let root = get_tree_root(rln_pointer);
|
||||
|
||||
use ark_ff::BigInt;
|
||||
assert_eq!(
|
||||
root,
|
||||
BigInt([
|
||||
4939322235247991215,
|
||||
5110804094006647505,
|
||||
4427606543677101242,
|
||||
910933464535675827
|
||||
])
|
||||
.into()
|
||||
);
|
||||
|
||||
// We obtain the Merkle tree root
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = get_proof(rln_pointer, leaf_index, output_buffer.as_mut_ptr());
|
||||
assert!(success, "get merkle proof call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
|
||||
let (path_elements, read) = bytes_le_to_vec_fr(&result_data).unwrap();
|
||||
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..].to_vec()).unwrap();
|
||||
|
||||
// We check correct computation of the path and indexes
|
||||
let expected_path_elements: Vec<Fr> = [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
|
||||
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
|
||||
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
|
||||
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
|
||||
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
|
||||
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
|
||||
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
|
||||
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
|
||||
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
|
||||
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
|
||||
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
|
||||
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
|
||||
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
|
||||
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
|
||||
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
|
||||
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
|
||||
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
|
||||
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
|
||||
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
|
||||
]
|
||||
.map(|e| str_to_fr(e, 16).unwrap())
|
||||
.to_vec();
|
||||
|
||||
let expected_identity_path_index: Vec<u8> =
|
||||
vec![1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
|
||||
|
||||
assert_eq!(path_elements, expected_path_elements);
|
||||
assert_eq!(identity_path_index, expected_identity_path_index);
|
||||
|
||||
// We double check that the proof computed from public API is correct
|
||||
let root_from_proof = compute_tree_root(
|
||||
&identity_secret_hash,
|
||||
&user_message_limit,
|
||||
&path_elements,
|
||||
&identity_path_index,
|
||||
);
|
||||
|
||||
assert_eq!(root, root_from_proof);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Benchmarks proof generation and verification
|
||||
fn test_groth16_proofs_performance_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We compute some benchmarks regarding proof and verify API calls
|
||||
// Note that circuit loading requires some initial overhead.
|
||||
// Once the circuit is loaded (i.e., when the RLN object is created), proof generation and verification times should be similar at each call.
|
||||
let sample_size = 100;
|
||||
let mut prove_time: u128 = 0;
|
||||
let mut verify_time: u128 = 0;
|
||||
|
||||
for _ in 0..sample_size {
|
||||
// We generate random witness instances and relative proof values
|
||||
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
|
||||
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
|
||||
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
let rln_witness_ser = serialize_witness(&rln_witness).unwrap();
|
||||
let input_buffer = &Buffer::from(rln_witness_ser.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let now = Instant::now();
|
||||
let success = prove(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
prove_time += now.elapsed().as_nanos();
|
||||
assert!(success, "prove call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
|
||||
// We read the returned proof and we append proof values for verify
|
||||
let serialized_proof = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let serialized_proof_values = serialize_proof_values(&proof_values);
|
||||
let mut verify_data = Vec::<u8>::new();
|
||||
verify_data.extend(&serialized_proof);
|
||||
verify_data.extend(&serialized_proof_values);
|
||||
|
||||
// We prepare input proof values and we call verify
|
||||
let input_buffer = &Buffer::from(verify_data.as_ref());
|
||||
let mut proof_is_valid: bool = false;
|
||||
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
|
||||
let now = Instant::now();
|
||||
let success = verify(rln_pointer, input_buffer, proof_is_valid_ptr);
|
||||
verify_time += now.elapsed().as_nanos();
|
||||
assert!(success, "verify call failed");
|
||||
assert_eq!(proof_is_valid, true);
|
||||
}
|
||||
|
||||
println!(
|
||||
"Average prove API call time: {:?}",
|
||||
Duration::from_nanos((prove_time / sample_size).try_into().unwrap())
|
||||
);
|
||||
println!(
|
||||
"Average verify API call time: {:?}",
|
||||
Duration::from_nanos((verify_time / sample_size).try_into().unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Creating a RLN with raw data should generate same results as using a path to resources
|
||||
fn test_rln_raw_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We obtain the root from the RLN instance
|
||||
let root_rln_folder = get_tree_root(rln_pointer);
|
||||
|
||||
// Reading the raw data from the files required for instantiating a RLN instance using raw data
|
||||
let circom_path = "./resources/tree_height_20/rln.wasm";
|
||||
let mut circom_file = File::open(&circom_path).expect("no file found");
|
||||
let metadata = std::fs::metadata(&circom_path).expect("unable to read metadata");
|
||||
let mut circom_buffer = vec![0; metadata.len() as usize];
|
||||
circom_file
|
||||
.read_exact(&mut circom_buffer)
|
||||
.expect("buffer overflow");
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
let zkey_path = "./resources/tree_height_20/rln_final.arkzkey";
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
let zkey_path = "./resources/tree_height_20/rln_final.zkey";
|
||||
let mut zkey_file = File::open(&zkey_path).expect("no file found");
|
||||
let metadata = std::fs::metadata(&zkey_path).expect("unable to read metadata");
|
||||
let mut zkey_buffer = vec![0; metadata.len() as usize];
|
||||
zkey_file
|
||||
.read_exact(&mut zkey_buffer)
|
||||
.expect("buffer overflow");
|
||||
|
||||
let vk_path = "./resources/tree_height_20/verification_key.json";
|
||||
let mut vk_file = File::open(&vk_path).expect("no file found");
|
||||
let metadata = std::fs::metadata(&vk_path).expect("unable to read metadata");
|
||||
let mut vk_buffer = vec![0; metadata.len() as usize];
|
||||
vk_file.read_exact(&mut vk_buffer).expect("buffer overflow");
|
||||
|
||||
let circom_data = &Buffer::from(&circom_buffer[..]);
|
||||
let zkey_data = &Buffer::from(&zkey_buffer[..]);
|
||||
let vk_data = &Buffer::from(&vk_buffer[..]);
|
||||
|
||||
// Creating a RLN instance passing the raw data
|
||||
let mut rln_pointer_raw_bytes = MaybeUninit::<*mut RLN>::uninit();
|
||||
let tree_config = "".to_string();
|
||||
let tree_config_buffer = &Buffer::from(tree_config.as_bytes());
|
||||
let success = new_with_params(
|
||||
TEST_TREE_HEIGHT,
|
||||
circom_data,
|
||||
zkey_data,
|
||||
vk_data,
|
||||
tree_config_buffer,
|
||||
rln_pointer_raw_bytes.as_mut_ptr(),
|
||||
);
|
||||
assert!(success, "RLN object creation failed");
|
||||
let rln_pointer2 = unsafe { &mut *rln_pointer_raw_bytes.assume_init() };
|
||||
|
||||
// We obtain the root from the RLN instance containing raw data
|
||||
// And compare that the same root was generated
|
||||
let root_rln_raw = get_tree_root(rln_pointer2);
|
||||
assert_eq!(root_rln_folder, root_rln_raw);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Computes and verifies an RLN ZK proof using FFI APIs
|
||||
fn test_rln_proof_ffi() {
|
||||
let user_message_limit = Fr::from(100);
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let mut rng = thread_rng();
|
||||
let leaves: Vec<Fr> = (0..NO_OF_LEAVES)
|
||||
.map(|_| utils_poseidon_hash(&[Fr::rand(&mut rng), Fr::from(100)]))
|
||||
.collect();
|
||||
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let identity_index: usize = NO_OF_LEAVES;
|
||||
|
||||
// We generate a random signal
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
let message_id = Fr::from(0);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
|
||||
// We set as leaf rate_commitment, its index would be equal to no_of_leaves
|
||||
let leaf_ser = fr_to_bytes_le(&rate_commitment);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized.append(&mut normalize_usize(identity_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&message_id));
|
||||
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
// We call generate_rln_proof
|
||||
// result_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let mut proof_data = rln_proof_gen(rln_pointer, serialized.as_ref());
|
||||
|
||||
// We prepare input for verify_rln_proof API
|
||||
// input_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// that is [ proof_data | signal_len<8> | signal<var> ]
|
||||
proof_data.append(&mut normalize_usize(signal.len()));
|
||||
proof_data.append(&mut signal.to_vec());
|
||||
|
||||
// We call verify_rln_proof
|
||||
let input_buffer = &Buffer::from(proof_data.as_ref());
|
||||
let mut proof_is_valid: bool = false;
|
||||
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
|
||||
let success = verify_rln_proof(rln_pointer, input_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
assert_eq!(proof_is_valid, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Computes and verifies an RLN ZK proof by checking proof's root against an input roots buffer
|
||||
fn test_verify_with_roots() {
|
||||
// First part similar to test_rln_proof_ffi
|
||||
let user_message_limit = Fr::from(100);
|
||||
|
||||
// We generate a vector of random leaves
|
||||
let leaves = get_random_leaves();
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
let identity_index: usize = NO_OF_LEAVES;
|
||||
|
||||
// We generate a random signal
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
let message_id = Fr::from(0);
|
||||
|
||||
// We set as leaf rate_commitment, its index would be equal to no_of_leaves
|
||||
let leaf_ser = fr_to_bytes_le(&rate_commitment);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized.append(&mut normalize_usize(identity_index));
|
||||
serialized.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&message_id));
|
||||
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
serialized.append(&mut normalize_usize(signal.len()));
|
||||
serialized.append(&mut signal.to_vec());
|
||||
|
||||
// We call generate_rln_proof
|
||||
// result_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let mut proof_data = rln_proof_gen(rln_pointer, serialized.as_ref());
|
||||
|
||||
// We prepare input for verify_rln_proof API
|
||||
// input_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// that is [ proof_data | signal_len<8> | signal<var> ]
|
||||
proof_data.append(&mut normalize_usize(signal.len()));
|
||||
proof_data.append(&mut signal.to_vec());
|
||||
|
||||
// We test verify_with_roots
|
||||
|
||||
// We first try to verify against an empty buffer of roots.
|
||||
// In this case, since no root is provided, proof's root check is skipped and proof is verified if other proof values are valid
|
||||
let mut roots_data: Vec<u8> = Vec::new();
|
||||
|
||||
let input_buffer = &Buffer::from(proof_data.as_ref());
|
||||
let roots_buffer = &Buffer::from(roots_data.as_ref());
|
||||
let mut proof_is_valid: bool = false;
|
||||
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
|
||||
let success =
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be valid
|
||||
assert_eq!(proof_is_valid, true);
|
||||
|
||||
// We then try to verify against some random values not containing the correct one.
|
||||
for _ in 0..5 {
|
||||
roots_data.append(&mut fr_to_bytes_le(&Fr::rand(&mut rng)));
|
||||
}
|
||||
let input_buffer = &Buffer::from(proof_data.as_ref());
|
||||
let roots_buffer = &Buffer::from(roots_data.as_ref());
|
||||
let mut proof_is_valid: bool = false;
|
||||
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
|
||||
let success =
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be invalid.
|
||||
assert_eq!(proof_is_valid, false);
|
||||
|
||||
// We finally include the correct root
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let root = get_tree_root(rln_pointer);
|
||||
|
||||
// We include the root and verify the proof
|
||||
roots_data.append(&mut fr_to_bytes_le(&root));
|
||||
let input_buffer = &Buffer::from(proof_data.as_ref());
|
||||
let roots_buffer = &Buffer::from(roots_data.as_ref());
|
||||
let mut proof_is_valid: bool = false;
|
||||
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
|
||||
let success =
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be valid.
|
||||
assert_eq!(proof_is_valid, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Computes and verifies an RLN ZK proof using FFI APIs
|
||||
fn test_recover_id_secret_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
let message_id = Fr::from(0);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
|
||||
// We set as leaf rate_commitment, its index would be equal to 0 since tree is empty
|
||||
let leaf_ser = fr_to_bytes_le(&rate_commitment);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
|
||||
let identity_index: usize = 0;
|
||||
|
||||
// We generate two proofs using same epoch but different signals.
|
||||
|
||||
// We generate two random signals
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal1: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate two random signals
|
||||
let signal2: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
let mut serialized1: Vec<u8> = Vec::new();
|
||||
serialized1.append(&mut fr_to_bytes_le(&identity_secret_hash));
|
||||
serialized1.append(&mut normalize_usize(identity_index));
|
||||
serialized1.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized1.append(&mut fr_to_bytes_le(&message_id));
|
||||
serialized1.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
|
||||
// The first part is the same for both proof input, so we clone
|
||||
let mut serialized2 = serialized1.clone();
|
||||
|
||||
// We attach the first signal to the first proof input
|
||||
serialized1.append(&mut normalize_usize(signal1.len()));
|
||||
serialized1.append(&mut signal1.to_vec());
|
||||
|
||||
// We attach the second signal to the first proof input
|
||||
serialized2.append(&mut normalize_usize(signal2.len()));
|
||||
serialized2.append(&mut signal2.to_vec());
|
||||
|
||||
// We call generate_rln_proof for first proof values
|
||||
// result_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let proof_data_1 = rln_proof_gen(rln_pointer, serialized1.as_ref());
|
||||
|
||||
// We call generate_rln_proof
|
||||
// result_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let proof_data_2 = rln_proof_gen(rln_pointer, serialized2.as_ref());
|
||||
|
||||
let input_proof_buffer_1 = &Buffer::from(proof_data_1.as_ref());
|
||||
let input_proof_buffer_2 = &Buffer::from(proof_data_2.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = recover_id_secret(
|
||||
rln_pointer,
|
||||
input_proof_buffer_1,
|
||||
input_proof_buffer_2,
|
||||
output_buffer.as_mut_ptr(),
|
||||
);
|
||||
assert!(success, "recover id secret call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let serialized_identity_secret_hash = <&[u8]>::from(&output_buffer).to_vec();
|
||||
|
||||
// We passed two shares for the same secret, so recovery should be successful
|
||||
// To check it, we ensure that recovered identity secret hash is empty
|
||||
assert!(!serialized_identity_secret_hash.is_empty());
|
||||
|
||||
// We check if the recovered identity secret hash corresponds to the original one
|
||||
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
|
||||
|
||||
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
|
||||
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
|
||||
|
||||
// We set as leaf id_commitment, its index would be equal to 1 since at 0 there is id_commitment
|
||||
let leaf_ser = fr_to_bytes_le(&rate_commitment_new);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
|
||||
let identity_index_new: usize = 1;
|
||||
|
||||
// We generate a random signals
|
||||
let signal3: [u8; 32] = rng.gen();
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
// Note that epoch is the same as before
|
||||
let mut serialized: Vec<u8> = Vec::new();
|
||||
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash_new));
|
||||
serialized.append(&mut normalize_usize(identity_index_new));
|
||||
serialized.append(&mut fr_to_bytes_le(&user_message_limit));
|
||||
serialized.append(&mut fr_to_bytes_le(&message_id));
|
||||
serialized.append(&mut fr_to_bytes_le(&external_nullifier));
|
||||
serialized.append(&mut normalize_usize(signal3.len()));
|
||||
serialized.append(&mut signal3.to_vec());
|
||||
|
||||
// We call generate_rln_proof
|
||||
// result_data is [ proof<128> | root<32> | external_nullifier<32> | x<32> | y<32> | nullifier<32> ]
|
||||
let proof_data_3 = rln_proof_gen(rln_pointer, serialized.as_ref());
|
||||
|
||||
// We attempt to recover the secret using share1 (coming from identity_secret_hash) and share3 (coming from identity_secret_hash_new)
|
||||
|
||||
let input_proof_buffer_1 = &Buffer::from(proof_data_1.as_ref());
|
||||
let input_proof_buffer_3 = &Buffer::from(proof_data_3.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = recover_id_secret(
|
||||
rln_pointer,
|
||||
input_proof_buffer_1,
|
||||
input_proof_buffer_3,
|
||||
output_buffer.as_mut_ptr(),
|
||||
);
|
||||
assert!(success, "recover id secret call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let serialized_identity_secret_hash = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (recovered_identity_secret_hash_new, _) =
|
||||
bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
|
||||
// ensure that the recovered secret does not match with either of the
|
||||
// used secrets in proof generation
|
||||
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_seeded_keygen_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
);
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
|
||||
16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_seeded_extended_keygen_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity tuple from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success =
|
||||
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple(result_data);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
);
|
||||
let expected_identity_nullifier_seed_bytes = str_to_fr(
|
||||
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
|
||||
16,
|
||||
);
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
|
||||
16,
|
||||
);
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
|
||||
16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
identity_trapdoor,
|
||||
expected_identity_trapdoor_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
identity_nullifier,
|
||||
expected_identity_nullifier_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_hash_to_field_ffi() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
let input_buffer = &Buffer::from(signal.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "hash call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
|
||||
// We read the returned proof and we append proof values for verify
|
||||
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Test Poseidon hash FFI
|
||||
fn test_poseidon_hash_ffi() {
|
||||
// generate random number between 1..ROUND_PARAMS.len()
|
||||
let mut rng = thread_rng();
|
||||
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
|
||||
let mut inputs = Vec::with_capacity(number_of_inputs);
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
|
||||
let input_buffer = &Buffer::from(inputs_ser.as_ref());
|
||||
|
||||
let expected_hash = utils_poseidon_hash(inputs.as_ref());
|
||||
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "poseidon hash call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (received_hash, _) = bytes_le_to_fr(&result_data);
|
||||
|
||||
assert_eq!(received_hash, expected_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_leaf() {
|
||||
// We create a RLN instance
|
||||
let no_of_leaves = 1 << TEST_TREE_HEIGHT;
|
||||
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity tuple from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success =
|
||||
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "seeded key gen call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (_, _, _, id_commitment) = deserialize_identity_tuple(result_data);
|
||||
|
||||
// We insert the id_commitment into the tree at a random index
|
||||
let mut rng = thread_rng();
|
||||
let index = rng.gen_range(0..no_of_leaves) as usize;
|
||||
let leaf = fr_to_bytes_le(&id_commitment);
|
||||
let input_buffer = &Buffer::from(leaf.as_ref());
|
||||
let success = set_leaf(rln_pointer, index, input_buffer);
|
||||
assert!(success, "set leaf call failed");
|
||||
|
||||
// We get the leaf at the same index
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = get_leaf(rln_pointer, index, output_buffer.as_mut_ptr());
|
||||
assert!(success, "get leaf call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (received_id_commitment, _) = bytes_le_to_fr(&result_data);
|
||||
|
||||
// We check that the received id_commitment is the same as the one we inserted
|
||||
assert_eq!(received_id_commitment, id_commitment);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_metadata() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
|
||||
let success = set_metadata(rln_pointer, input_buffer);
|
||||
assert!(success, "set_metadata call failed");
|
||||
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = get_metadata(rln_pointer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "get_metadata call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
|
||||
assert_eq!(result_data, seed_bytes.to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_metadata() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = get_metadata(rln_pointer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "get_metadata call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
assert_eq!(output_buffer.len, 0);
|
||||
}
|
||||
}
|
||||
139
rln/tests/poseidon_tree.rs
Normal file
139
rln/tests/poseidon_tree.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Tests
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rln::hashers::{poseidon_hash, PoseidonHash};
|
||||
use rln::{circuit::*, poseidon_tree::PoseidonTree};
|
||||
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
#[test]
|
||||
// The test is checked correctness for `FullMerkleTree` and `OptimalMerkleTree` with Poseidon hash
|
||||
fn test_zerokit_merkle_implementations() {
|
||||
let sample_size = 100;
|
||||
let leaves: Vec<Fr> = (0..sample_size).map(|s| Fr::from(s)).collect();
|
||||
|
||||
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
|
||||
for i in 0..sample_size.try_into().unwrap() {
|
||||
tree_full.set(i, leaves[i]).unwrap();
|
||||
let proof = tree_full.proof(i).expect("index should be set");
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
tree_opt.set(i, leaves[i]).unwrap();
|
||||
let proof = tree_opt.proof(i).expect("index should be set");
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
}
|
||||
|
||||
// We check all roots are the same
|
||||
let tree_full_root = tree_full.root();
|
||||
let tree_opt_root = tree_opt.root();
|
||||
|
||||
assert_eq!(tree_full_root, tree_opt_root);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subtree_root() {
|
||||
const DEPTH: usize = 3;
|
||||
const LEAVES_LEN: usize = 6;
|
||||
|
||||
let mut tree = PoseidonTree::default(DEPTH).unwrap();
|
||||
let leaves: Vec<Fr> = (0..LEAVES_LEN).map(|s| Fr::from(s as i32)).collect();
|
||||
let _ = tree.set_range(0, leaves);
|
||||
|
||||
for i in 0..LEAVES_LEN {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree.get(i).unwrap(),
|
||||
tree.get_subtree_root(DEPTH, i).unwrap()
|
||||
);
|
||||
// check root
|
||||
assert_eq!(tree.root(), tree.get_subtree_root(0, i).unwrap());
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
for n in (1..=DEPTH).rev() {
|
||||
for i in (0..(1 << n)).step_by(2) {
|
||||
let idx_l = i * (1 << (DEPTH - n));
|
||||
let idx_r = (i + 1) * (1 << (DEPTH - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
|
||||
assert_eq!(poseidon_hash(&[prev_l, prev_r]), subroot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_empty_leaves_indices() {
|
||||
let depth = 4;
|
||||
let nof_leaves: usize = 1 << (depth - 1);
|
||||
|
||||
let mut tree = PoseidonTree::default(depth).unwrap();
|
||||
let leaves: Vec<Fr> = (0..nof_leaves).map(|s| Fr::from(s as i32)).collect();
|
||||
|
||||
// check set_range
|
||||
let _ = tree.set_range(0, leaves.clone());
|
||||
assert!(tree.get_empty_leaves_indices().is_empty());
|
||||
|
||||
let mut vec_idxs = Vec::new();
|
||||
// check delete function
|
||||
for i in 0..nof_leaves {
|
||||
vec_idxs.push(i);
|
||||
let _ = tree.delete(i);
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
// check set function
|
||||
for i in (0..nof_leaves).rev() {
|
||||
vec_idxs.pop();
|
||||
let _ = tree.set(i, leaves[i]);
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
|
||||
// check remove_indices_and_set_leaves inside override_range function
|
||||
assert!(tree.get_empty_leaves_indices().is_empty());
|
||||
let leaves_2: Vec<Fr> = (0..2).map(|s| Fr::from(s as i32)).collect();
|
||||
tree.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
|
||||
|
||||
// check remove_indices inside override_range function
|
||||
tree.override_range(0, [], [0, 1]).unwrap();
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check set_range inside override_range function
|
||||
tree.override_range(0, leaves_2.clone(), []).unwrap();
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
|
||||
|
||||
let leaves_4: Vec<Fr> = (0..4).map(|s| Fr::from(s as i32)).collect();
|
||||
// check if the indexes for write and delete are the same
|
||||
tree.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert!(tree.get_empty_leaves_indices().is_empty());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
// The result will be like this, because in the set_range function in pmtree
|
||||
// the next_index value is increased not by the number of elements to insert,
|
||||
// but by the union of indices for deleting and inserting.
|
||||
assert_eq!(
|
||||
tree.get_empty_leaves_indices(),
|
||||
vec![0, 1, 2, 3, 8, 9, 10, 11]
|
||||
);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
// The result will be like this, because in the set_range function in pmtree
|
||||
// the next_index value is increased not by the number of elements to insert,
|
||||
// but by the union of indices for deleting and inserting.
|
||||
// + we've already set to 6 and 7 in previous test
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec![0, 1, 8, 9, 10, 11]);
|
||||
}
|
||||
}
|
||||
251
rln/tests/protocol.rs
Normal file
251
rln/tests/protocol.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use ark_ff::BigInt;
|
||||
use rln::circuit::zkey_from_folder;
|
||||
use rln::circuit::{circom_from_folder, vk_from_folder, Fr, TEST_TREE_HEIGHT};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash};
|
||||
use rln::poseidon_tree::PoseidonTree;
|
||||
use rln::protocol::*;
|
||||
use rln::utils::str_to_fr;
|
||||
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
type ConfigOf<T> = <T as ZerokitMerkleTree>::Config;
|
||||
|
||||
#[test]
|
||||
// We test Merkle tree generation, proofs and verification
|
||||
fn test_merkle_proof() {
|
||||
let leaf_index = 3;
|
||||
|
||||
// generate identity
|
||||
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
let rate_commitment = poseidon_hash(&[id_commitment, 100.into()]);
|
||||
|
||||
// generate merkle tree
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
tree.set(leaf_index, rate_commitment.into()).unwrap();
|
||||
|
||||
// We check correct computation of the root
|
||||
let root = tree.root();
|
||||
|
||||
assert_eq!(
|
||||
root,
|
||||
BigInt([
|
||||
4939322235247991215,
|
||||
5110804094006647505,
|
||||
4427606543677101242,
|
||||
910933464535675827
|
||||
])
|
||||
.into()
|
||||
);
|
||||
|
||||
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
|
||||
// We check correct computation of the path and indexes
|
||||
let expected_path_elements: Vec<Fr> = [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
|
||||
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
|
||||
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
|
||||
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
|
||||
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
|
||||
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
|
||||
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
|
||||
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
|
||||
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
|
||||
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
|
||||
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
|
||||
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
|
||||
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
|
||||
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
|
||||
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
|
||||
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
|
||||
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
|
||||
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
|
||||
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
|
||||
]
|
||||
.map(|e| str_to_fr(e, 16).unwrap())
|
||||
.to_vec();
|
||||
|
||||
let expected_identity_path_index: Vec<u8> =
|
||||
vec![1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
|
||||
|
||||
assert_eq!(path_elements, expected_path_elements);
|
||||
assert_eq!(identity_path_index, expected_identity_path_index);
|
||||
|
||||
// We check correct verification of the proof
|
||||
assert!(tree.verify(&rate_commitment, &merkle_proof).unwrap());
|
||||
}
|
||||
|
||||
fn get_test_witness() -> RLNWitnessInput {
|
||||
let leaf_index = 3;
|
||||
// Generate identity pair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
|
||||
//// generate merkle tree
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
tree.set(leaf_index, rate_commitment.into()).unwrap();
|
||||
|
||||
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
|
||||
|
||||
let signal = b"hey hey";
|
||||
let x = hash_to_field(signal);
|
||||
|
||||
// We set the remaining values to random ones
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
Fr::from(1),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We test a RLN proof generation and verification
|
||||
fn test_witness_from_json() {
|
||||
// We generate all relevant keys
|
||||
let proving_key = zkey_from_folder().unwrap();
|
||||
let verification_key = vk_from_folder().unwrap();
|
||||
let builder = circom_from_folder().unwrap();
|
||||
|
||||
// We compute witness from the json input
|
||||
let rln_witness = get_test_witness();
|
||||
let rln_witness_json = rln_witness_to_json(&rln_witness).unwrap();
|
||||
let rln_witness_deser = rln_witness_from_json(rln_witness_json).unwrap();
|
||||
assert_eq!(rln_witness_deser, rln_witness);
|
||||
|
||||
// Let's generate a zkSNARK proof
|
||||
let proof = generate_proof(builder, &proving_key, &rln_witness_deser).unwrap();
|
||||
let proof_values = proof_values_from_witness(&rln_witness_deser).unwrap();
|
||||
|
||||
// Let's verify the proof
|
||||
let verified = verify_proof(&verification_key, &proof, &proof_values);
|
||||
|
||||
assert!(verified.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We test a RLN proof generation and verification
|
||||
fn test_end_to_end() {
|
||||
let rln_witness = get_test_witness();
|
||||
let rln_witness_json = rln_witness_to_json(&rln_witness).unwrap();
|
||||
let rln_witness_deser = rln_witness_from_json(rln_witness_json).unwrap();
|
||||
assert_eq!(rln_witness_deser, rln_witness);
|
||||
|
||||
// We generate all relevant keys
|
||||
let proving_key = zkey_from_folder().unwrap();
|
||||
let verification_key = vk_from_folder().unwrap();
|
||||
let builder = circom_from_folder().unwrap();
|
||||
|
||||
// Let's generate a zkSNARK proof
|
||||
let proof = generate_proof(builder, &proving_key, &rln_witness_deser).unwrap();
|
||||
|
||||
let proof_values = proof_values_from_witness(&rln_witness_deser).unwrap();
|
||||
|
||||
// Let's verify the proof
|
||||
let success = verify_proof(&verification_key, &proof, &proof_values).unwrap();
|
||||
|
||||
assert!(success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_witness_serialization() {
|
||||
// We test witness JSON serialization
|
||||
let rln_witness = get_test_witness();
|
||||
let rln_witness_json = rln_witness_to_json(&rln_witness).unwrap();
|
||||
let rln_witness_deser = rln_witness_from_json(rln_witness_json).unwrap();
|
||||
assert_eq!(rln_witness_deser, rln_witness);
|
||||
|
||||
// We test witness serialization
|
||||
let ser = serialize_witness(&rln_witness).unwrap();
|
||||
let (deser, _) = deserialize_witness(&ser).unwrap();
|
||||
assert_eq!(rln_witness, deser);
|
||||
|
||||
// We test Proof values serialization
|
||||
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
|
||||
let ser = serialize_proof_values(&proof_values);
|
||||
let (deser, _) = deserialize_proof_values(&ser);
|
||||
assert_eq!(proof_values, deser);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests seeded keygen
|
||||
// Note that hardcoded values are only valid for Bn254
|
||||
fn test_seeded_keygen() {
|
||||
// Generate identity pair using a seed phrase
|
||||
let seed_phrase: &str = "A seed phrase example";
|
||||
let (identity_secret_hash, id_commitment) = seeded_keygen(seed_phrase.as_bytes());
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_phrase = str_to_fr(
|
||||
"0x20df38f3f00496f19fe7c6535492543b21798ed7cb91aebe4af8012db884eda3",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_id_commitment_seed_phrase = str_to_fr(
|
||||
"0x1223a78a5d66043a7f9863e14507dc80720a5602b2a894923e5b5147d5a9c325",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_phrase
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_phrase);
|
||||
|
||||
// Generate identity pair using an byte array
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let (identity_secret_hash, id_commitment) = seeded_keygen(seed_bytes);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
|
||||
|
||||
// We check again if the identity pair generated with the same seed phrase corresponds to the previously generated one
|
||||
let (identity_secret_hash, id_commitment) = seeded_keygen(seed_phrase.as_bytes());
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_phrase
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_phrase);
|
||||
}
|
||||
}
|
||||
249
rln/tests/public.rs
Normal file
249
rln/tests/public.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use ark_ff::BigInt;
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
|
||||
use rln::protocol::{compute_tree_root, deserialize_identity_tuple};
|
||||
use rln::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
|
||||
use rln::utils::*;
|
||||
use std::io::Cursor;
|
||||
|
||||
#[test]
|
||||
// This test is similar to the one in lib, but uses only public API
|
||||
fn test_merkle_proof() {
|
||||
let leaf_index = 3;
|
||||
let user_message_limit = 1;
|
||||
|
||||
let mut rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
|
||||
|
||||
// generate identity
|
||||
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
|
||||
let id_commitment = utils_poseidon_hash(&vec![identity_secret_hash]);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit.into()]);
|
||||
|
||||
// check that leaves indices is empty
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_empty_leaves_indices(&mut buffer).unwrap();
|
||||
let idxs = bytes_le_to_vec_usize(&buffer.into_inner()).unwrap();
|
||||
assert!(idxs.is_empty());
|
||||
|
||||
// We pass rate_commitment as Read buffer to RLN's set_leaf
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&rate_commitment));
|
||||
rln.set_leaf(leaf_index, &mut buffer).unwrap();
|
||||
|
||||
// check that leaves before leaf_index is set to zero
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_empty_leaves_indices(&mut buffer).unwrap();
|
||||
let idxs = bytes_le_to_vec_usize(&buffer.into_inner()).unwrap();
|
||||
assert_eq!(idxs, [0, 1, 2]);
|
||||
|
||||
// We check correct computation of the root
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
assert_eq!(
|
||||
root,
|
||||
Fr::from(BigInt([
|
||||
17110646155607829651,
|
||||
5040045984242729823,
|
||||
6965416728592533086,
|
||||
2328960363755461975
|
||||
]))
|
||||
);
|
||||
|
||||
// We check correct computation of merkle proof
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_proof(leaf_index, &mut buffer).unwrap();
|
||||
|
||||
let buffer_inner = buffer.into_inner();
|
||||
let (path_elements, read) = bytes_le_to_vec_fr(&buffer_inner).unwrap();
|
||||
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec()).unwrap();
|
||||
|
||||
// We check correct computation of the path and indexes
|
||||
let expected_path_elements: Vec<Fr> = [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
|
||||
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
|
||||
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
|
||||
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
|
||||
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
|
||||
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
|
||||
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
|
||||
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
|
||||
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
|
||||
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
|
||||
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
|
||||
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
|
||||
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
|
||||
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
|
||||
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
|
||||
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
|
||||
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
|
||||
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
|
||||
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
|
||||
]
|
||||
.map(|e| str_to_fr(e, 16).unwrap())
|
||||
.to_vec();
|
||||
|
||||
let expected_identity_path_index: Vec<u8> =
|
||||
vec![1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
|
||||
|
||||
assert_eq!(path_elements, expected_path_elements);
|
||||
assert_eq!(identity_path_index, expected_identity_path_index);
|
||||
|
||||
// check subtree root computation for leaf 0 for all corresponding node until the root
|
||||
let l_idx = 0;
|
||||
for n in (1..=TEST_TREE_HEIGHT).rev() {
|
||||
let idx_l = l_idx * (1 << (TEST_TREE_HEIGHT - n));
|
||||
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_HEIGHT - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_subtree_root(n, idx_l, &mut buffer).unwrap();
|
||||
let (prev_l, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_subtree_root(n, idx_r, &mut buffer).unwrap();
|
||||
let (prev_r, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_subtree_root(n - 1, idx_sr, &mut buffer).unwrap();
|
||||
let (subroot, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
let res = utils_poseidon_hash(&[prev_l, prev_r]);
|
||||
assert_eq!(res, subroot);
|
||||
}
|
||||
|
||||
// We double check that the proof computed from public API is correct
|
||||
let root_from_proof = compute_tree_root(
|
||||
&identity_secret_hash,
|
||||
&user_message_limit.into(),
|
||||
&path_elements,
|
||||
&identity_path_index,
|
||||
);
|
||||
|
||||
assert_eq!(root, root_from_proof);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seeded_keygen() {
|
||||
let rln = RLN::default();
|
||||
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
let mut input_buffer = Cursor::new(&seed_bytes);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
let serialized_output = output_buffer.into_inner();
|
||||
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized_output);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&serialized_output[read..].to_vec());
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seeded_extended_keygen() {
|
||||
let rln = RLN::default();
|
||||
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
let mut input_buffer = Cursor::new(&seed_bytes);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
rln.seeded_extended_key_gen(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
let serialized_output = output_buffer.into_inner();
|
||||
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple(serialized_output);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_identity_nullifier_seed_bytes = str_to_fr(
|
||||
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(identity_trapdoor, expected_identity_trapdoor_seed_bytes);
|
||||
assert_eq!(identity_nullifier, expected_identity_nullifier_seed_bytes);
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_to_field() {
|
||||
let mut rng = thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
let mut input_buffer = Cursor::new(&signal);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_hash(&mut input_buffer, &mut output_buffer).unwrap();
|
||||
let serialized_hash = output_buffer.into_inner();
|
||||
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poseidon_hash() {
|
||||
let mut rng = thread_rng();
|
||||
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
|
||||
let mut inputs = Vec::with_capacity(number_of_inputs);
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let expected_hash = utils_poseidon_hash(&inputs);
|
||||
|
||||
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs).unwrap());
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_poseidon_hash(&mut input_buffer, &mut output_buffer).unwrap();
|
||||
let serialized_hash = output_buffer.into_inner();
|
||||
let (hash, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
assert_eq!(hash, expected_hash);
|
||||
}
|
||||
}
|
||||
1
rln/vendor/rln
vendored
1
rln/vendor/rln
vendored
Submodule rln/vendor/rln deleted from 616ee9b0b0
@@ -1,49 +0,0 @@
|
||||
[package]
|
||||
name = "semaphore-wrapper"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = []
|
||||
dylib = [ "wasmer/dylib", "wasmer-engine-dylib", "wasmer-compiler-cranelift" ]
|
||||
|
||||
[dependencies]
|
||||
ark-bn254 = { version = "0.3.0" }
|
||||
ark-circom = { git = "https://github.com/gakonst/ark-circom", features=["circom-2"] }
|
||||
ark-ec = { version = "0.3.0", default-features = false, features = ["parallel"] }
|
||||
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", features = ["parallel"] }
|
||||
ark-relations = { version = "0.3.0", default-features = false }
|
||||
ark-std = { version = "0.3.0", default-features = false, features = ["parallel"] }
|
||||
color-eyre = "0.5"
|
||||
num-bigint = { version = "0.4", default-features = false, features = ["rand"] }
|
||||
once_cell = "1.8"
|
||||
primitive-types = "0.11.1"
|
||||
rand = "0.8.4"
|
||||
semaphore = { git = "https://github.com/worldcoin/semaphore-rs", rev = "d462a43"}
|
||||
serde = "1.0"
|
||||
thiserror = "1.0.0"
|
||||
wasmer = { version = "2.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_chacha = "0.3.1"
|
||||
serde_json = "1.0.79"
|
||||
|
||||
[build-dependencies]
|
||||
color-eyre = "0.5"
|
||||
wasmer = { version = "2.0" }
|
||||
wasmer-engine-dylib = { version = "2.2.1", optional = true }
|
||||
wasmer-compiler-cranelift = { version = "2.2.1", optional = true }
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
lto = true
|
||||
panic = "abort"
|
||||
opt-level = 3
|
||||
|
||||
# Compilation profile for any non-workspace member.
|
||||
# Dependencies are optimized, even in a dev build. This improves dev performance
|
||||
# while having neglible impact on incremental build times.
|
||||
[profile.dev.package."*"]
|
||||
opt-level = 3
|
||||
@@ -1,10 +0,0 @@
|
||||
# Semaphore example package
|
||||
|
||||
This is basically a wrapper around/copy of
|
||||
https://github.com/worldcoin/semaphore-rs to illustrate how e.g. RLN package
|
||||
can be structured like.
|
||||
|
||||
Goal is also to provide a basic FFI around protocol.rs, which is currently not
|
||||
in scope for that project.
|
||||
|
||||
See that project for more information.
|
||||
@@ -1,111 +0,0 @@
|
||||
// Adapted from semaphore-rs/build.rs
|
||||
use color_eyre::eyre::{eyre, Result};
|
||||
use std::{
|
||||
path::{Component, Path, PathBuf},
|
||||
process::Command,
|
||||
};
|
||||
|
||||
const ZKEY_FILE: &str = "./vendor/semaphore/build/snark/semaphore_final.zkey";
|
||||
const WASM_FILE: &str = "./vendor/semaphore/build/snark/semaphore.wasm";
|
||||
|
||||
// See <https://internals.rust-lang.org/t/path-to-lexical-absolute/14940>
|
||||
fn absolute(path: &str) -> Result<PathBuf> {
|
||||
let path = Path::new(path);
|
||||
let mut absolute = if path.is_absolute() {
|
||||
PathBuf::new()
|
||||
} else {
|
||||
std::env::current_dir()?
|
||||
};
|
||||
for component in path.components() {
|
||||
match component {
|
||||
Component::CurDir => {}
|
||||
Component::ParentDir => {
|
||||
absolute.pop();
|
||||
}
|
||||
component => absolute.push(component.as_os_str()),
|
||||
}
|
||||
}
|
||||
Ok(absolute)
|
||||
}
|
||||
|
||||
fn build_circuit() -> Result<()> {
|
||||
println!("cargo:rerun-if-changed=./vendor/semaphore");
|
||||
let run = |cmd: &[&str]| -> Result<()> {
|
||||
// TODO: Use ExitCode::exit_ok() when stable.
|
||||
Command::new(cmd[0])
|
||||
.args(cmd[1..].iter())
|
||||
.current_dir("./vendor/semaphore")
|
||||
.status()?
|
||||
.success()
|
||||
.then(|| ())
|
||||
.ok_or(eyre!("procees returned failure"))?;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Compute absolute paths
|
||||
let zkey_file = absolute(ZKEY_FILE)?;
|
||||
let wasm_file = absolute(WASM_FILE)?;
|
||||
|
||||
// Build circuits if not exists
|
||||
// TODO: This does not rebuild if the semaphore submodule is changed.
|
||||
// NOTE: This requires npm / nodejs to be installed.
|
||||
if !(zkey_file.exists() && wasm_file.exists()) {
|
||||
run(&["npm", "install"])?;
|
||||
run(&["npm", "exec", "ts-node", "./scripts/compile-circuits.ts"])?;
|
||||
}
|
||||
assert!(zkey_file.exists());
|
||||
assert!(wasm_file.exists());
|
||||
|
||||
// Export generated paths
|
||||
println!("cargo:rustc-env=BUILD_RS_ZKEY_FILE={}", zkey_file.display());
|
||||
println!("cargo:rustc-env=BUILD_RS_WASM_FILE={}", wasm_file.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "dylib")]
|
||||
fn build_dylib() -> Result<()> {
|
||||
use enumset::enum_set;
|
||||
use std::{env, str::FromStr};
|
||||
use wasmer::{Module, Store, Target, Triple};
|
||||
use wasmer_compiler_cranelift::Cranelift;
|
||||
use wasmer_engine_dylib::Dylib;
|
||||
|
||||
let wasm_file = absolute(WASM_FILE)?;
|
||||
assert!(wasm_file.exists());
|
||||
|
||||
let out_dir = env::var("OUT_DIR")?;
|
||||
let out_dir = Path::new(&out_dir).to_path_buf();
|
||||
let dylib_file = out_dir.join("semaphore.dylib");
|
||||
println!(
|
||||
"cargo:rustc-env=CIRCUIT_WASM_DYLIB={}",
|
||||
dylib_file.display()
|
||||
);
|
||||
|
||||
if dylib_file.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create a WASM engine for the target that can compile
|
||||
let triple = Triple::from_str(&env::var("TARGET")?).map_err(|e| eyre!(e))?;
|
||||
let cpu_features = enum_set!();
|
||||
let target = Target::new(triple, cpu_features);
|
||||
let compiler_config = Cranelift::default();
|
||||
let engine = Dylib::new(compiler_config).target(target).engine();
|
||||
|
||||
// Compile the WASM module
|
||||
let store = Store::new(&engine);
|
||||
let module = Module::from_file(&store, &wasm_file)?;
|
||||
module.serialize_to_file(&dylib_file)?;
|
||||
assert!(dylib_file.exists());
|
||||
println!("cargo:warning=Circuit dylib is in {}", dylib_file.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
build_circuit()?;
|
||||
#[cfg(feature = "dylib")]
|
||||
build_dylib()?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
// Adapted from semaphore-rs/src/circuit.rs
|
||||
use ark_bn254::{Bn254, Fr};
|
||||
use ark_circom::{read_zkey, WitnessCalculator};
|
||||
use ark_groth16::ProvingKey;
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use core::include_bytes;
|
||||
use once_cell::sync::{Lazy, OnceCell};
|
||||
use std::{io::Cursor, sync::Mutex};
|
||||
use wasmer::{Module, Store};
|
||||
|
||||
#[cfg(feature = "dylib")]
|
||||
use std::{env, path::Path};
|
||||
#[cfg(feature = "dylib")]
|
||||
use wasmer::Dylib;
|
||||
|
||||
const ZKEY_BYTES: &[u8] = include_bytes!(env!("BUILD_RS_ZKEY_FILE"));
|
||||
|
||||
#[cfg(not(feature = "dylib"))]
|
||||
const WASM: &[u8] = include_bytes!(env!("BUILD_RS_WASM_FILE"));
|
||||
|
||||
static ZKEY: Lazy<(ProvingKey<Bn254>, ConstraintMatrices<Fr>)> = Lazy::new(|| {
|
||||
let mut reader = Cursor::new(ZKEY_BYTES);
|
||||
read_zkey(&mut reader).expect("zkey should be valid")
|
||||
});
|
||||
|
||||
static WITNESS_CALCULATOR: OnceCell<Mutex<WitnessCalculator>> = OnceCell::new();
|
||||
|
||||
/// Initialize the library.
|
||||
#[cfg(feature = "dylib")]
|
||||
pub fn initialize(dylib_path: &Path) {
|
||||
WITNESS_CALCULATOR
|
||||
.set(from_dylib(dylib_path))
|
||||
.expect("Failed to initialize witness calculator");
|
||||
|
||||
// Force init of ZKEY
|
||||
Lazy::force(&ZKEY);
|
||||
}
|
||||
|
||||
#[cfg(feature = "dylib")]
|
||||
fn from_dylib(path: &Path) -> Mutex<WitnessCalculator> {
|
||||
let store = Store::new(&Dylib::headless().engine());
|
||||
// The module must be exported using [`Module::serialize`].
|
||||
let module = unsafe {
|
||||
Module::deserialize_from_file(&store, path).expect("Failed to load wasm dylib module")
|
||||
};
|
||||
let result =
|
||||
WitnessCalculator::from_module(module).expect("Failed to create witness calculator");
|
||||
Mutex::new(result)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn zkey() -> &'static (ProvingKey<Bn254>, ConstraintMatrices<Fr>) {
|
||||
&*ZKEY
|
||||
}
|
||||
|
||||
#[cfg(feature = "dylib")]
|
||||
#[must_use]
|
||||
pub fn witness_calculator() -> &'static Mutex<WitnessCalculator> {
|
||||
WITNESS_CALCULATOR.get_or_init(|| {
|
||||
let path = env::var("CIRCUIT_WASM_DYLIB").expect(
|
||||
"Semaphore-rs is not initialized. The library needs to be initialized before use when \
|
||||
build with the `cdylib` feature. You can initialize by calling `initialize` or \
|
||||
seting the `CIRCUIT_WASM_DYLIB` environment variable.",
|
||||
);
|
||||
from_dylib(Path::new(&path))
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "dylib"))]
|
||||
#[must_use]
|
||||
pub fn witness_calculator() -> &'static Mutex<WitnessCalculator> {
|
||||
WITNESS_CALCULATOR.get_or_init(|| {
|
||||
let store = Store::default();
|
||||
let module = Module::from_binary(&store, WASM).expect("wasm should be valid");
|
||||
let result =
|
||||
WitnessCalculator::from_module(module).expect("Failed to create witness calculator");
|
||||
Mutex::new(result)
|
||||
})
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
#![allow(clippy::multiple_crate_versions)]
|
||||
|
||||
pub mod circuit;
|
||||
pub mod protocol;
|
||||
|
||||
#[cfg(feature = "dylib")]
|
||||
pub use circuit::initialize;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::protocol::*;
|
||||
use semaphore::{hash_to_field, identity::Identity, poseidon_tree::PoseidonTree, Field};
|
||||
|
||||
#[test]
|
||||
fn test_semaphore() {
|
||||
// generate identity
|
||||
let id = Identity::from_seed(b"secret");
|
||||
|
||||
// generate merkle tree
|
||||
let leaf = Field::from(0);
|
||||
let mut tree = PoseidonTree::new(21, leaf);
|
||||
tree.set(0, id.commitment());
|
||||
|
||||
let merkle_proof = tree.proof(0).expect("proof should exist");
|
||||
let root = tree.root().into();
|
||||
|
||||
// change signal and external_nullifier here
|
||||
let signal_hash = hash_to_field(b"xxx");
|
||||
let external_nullifier_hash = hash_to_field(b"appId");
|
||||
|
||||
let nullifier_hash = generate_nullifier_hash(&id, external_nullifier_hash);
|
||||
|
||||
let proof =
|
||||
generate_proof(&id, &merkle_proof, external_nullifier_hash, signal_hash).unwrap();
|
||||
|
||||
let success = verify_proof(
|
||||
root,
|
||||
nullifier_hash,
|
||||
signal_hash,
|
||||
external_nullifier_hash,
|
||||
&proof,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert!(success);
|
||||
}
|
||||
}
|
||||
@@ -1,291 +0,0 @@
|
||||
// Adapted from semaphore-rs/src/protocol.rs
|
||||
// For illustration purposes only as an example protocol
|
||||
|
||||
// Private module
|
||||
use crate::circuit::{witness_calculator, zkey};
|
||||
|
||||
use ark_bn254::{Bn254, Parameters};
|
||||
use ark_circom::CircomReduction;
|
||||
use ark_ec::bn::Bn;
|
||||
use ark_groth16::{
|
||||
create_proof_with_reduction_and_matrices, prepare_verifying_key, Proof as ArkProof,
|
||||
};
|
||||
use ark_relations::r1cs::SynthesisError;
|
||||
use ark_std::UniformRand;
|
||||
use color_eyre::Result;
|
||||
use primitive_types::U256;
|
||||
use rand::{thread_rng, Rng};
|
||||
use semaphore::{
|
||||
identity::Identity,
|
||||
merkle_tree::{self, Branch},
|
||||
poseidon_hash,
|
||||
poseidon_tree::PoseidonHash,
|
||||
Field,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Instant;
|
||||
use thiserror::Error;
|
||||
|
||||
// Matches the private G1Tup type in ark-circom.
|
||||
pub type G1 = (U256, U256);
|
||||
|
||||
// Matches the private G2Tup type in ark-circom.
|
||||
pub type G2 = ([U256; 2], [U256; 2]);
|
||||
|
||||
/// Wrap a proof object so we have serde support
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Proof(G1, G2, G1);
|
||||
|
||||
impl From<ArkProof<Bn<Parameters>>> for Proof {
|
||||
fn from(proof: ArkProof<Bn<Parameters>>) -> Self {
|
||||
let proof = ark_circom::ethereum::Proof::from(proof);
|
||||
let (a, b, c) = proof.as_tuple();
|
||||
Self(a, b, c)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Proof> for ArkProof<Bn<Parameters>> {
|
||||
fn from(proof: Proof) -> Self {
|
||||
let eth_proof = ark_circom::ethereum::Proof {
|
||||
a: ark_circom::ethereum::G1 {
|
||||
x: proof.0 .0,
|
||||
y: proof.0 .1,
|
||||
},
|
||||
#[rustfmt::skip] // Rustfmt inserts some confusing spaces
|
||||
b: ark_circom::ethereum::G2 {
|
||||
// The order of coefficients is flipped.
|
||||
x: [proof.1.0[1], proof.1.0[0]],
|
||||
y: [proof.1.1[1], proof.1.1[0]],
|
||||
},
|
||||
c: ark_circom::ethereum::G1 {
|
||||
x: proof.2 .0,
|
||||
y: proof.2 .1,
|
||||
},
|
||||
};
|
||||
eth_proof.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to merkle proof into a bigint vector
|
||||
/// TODO: we should create a From trait for this
|
||||
fn merkle_proof_to_vec(proof: &merkle_tree::Proof<PoseidonHash>) -> Vec<Field> {
|
||||
proof
|
||||
.0
|
||||
.iter()
|
||||
.map(|x| match x {
|
||||
Branch::Left(value) | Branch::Right(value) => *value,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Generates the nullifier hash
|
||||
#[must_use]
|
||||
pub fn generate_nullifier_hash(identity: &Identity, external_nullifier: Field) -> Field {
|
||||
poseidon_hash(&[external_nullifier, identity.nullifier])
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProofError {
|
||||
#[error("Error reading circuit key: {0}")]
|
||||
CircuitKeyError(#[from] std::io::Error),
|
||||
#[error("Error producing witness: {0}")]
|
||||
WitnessError(color_eyre::Report),
|
||||
#[error("Error producing proof: {0}")]
|
||||
SynthesisError(#[from] SynthesisError),
|
||||
}
|
||||
|
||||
/// Generates a semaphore proof
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a [`ProofError`] if proving fails.
|
||||
pub fn generate_proof(
|
||||
identity: &Identity,
|
||||
merkle_proof: &merkle_tree::Proof<PoseidonHash>,
|
||||
external_nullifier_hash: Field,
|
||||
signal_hash: Field,
|
||||
) -> Result<Proof, ProofError> {
|
||||
generate_proof_rng(
|
||||
identity,
|
||||
merkle_proof,
|
||||
external_nullifier_hash,
|
||||
signal_hash,
|
||||
&mut thread_rng(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a semaphore proof from entropy
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a [`ProofError`] if proving fails.
|
||||
pub fn generate_proof_rng(
|
||||
identity: &Identity,
|
||||
merkle_proof: &merkle_tree::Proof<PoseidonHash>,
|
||||
external_nullifier_hash: Field,
|
||||
signal_hash: Field,
|
||||
rng: &mut impl Rng,
|
||||
) -> Result<Proof, ProofError> {
|
||||
generate_proof_rs(
|
||||
identity,
|
||||
merkle_proof,
|
||||
external_nullifier_hash,
|
||||
signal_hash,
|
||||
ark_bn254::Fr::rand(rng),
|
||||
ark_bn254::Fr::rand(rng),
|
||||
)
|
||||
}
|
||||
|
||||
fn generate_proof_rs(
|
||||
identity: &Identity,
|
||||
merkle_proof: &merkle_tree::Proof<PoseidonHash>,
|
||||
external_nullifier_hash: Field,
|
||||
signal_hash: Field,
|
||||
r: ark_bn254::Fr,
|
||||
s: ark_bn254::Fr,
|
||||
) -> Result<Proof, ProofError> {
|
||||
let inputs = [
|
||||
("identityNullifier", vec![identity.nullifier]),
|
||||
("identityTrapdoor", vec![identity.trapdoor]),
|
||||
("treePathIndices", merkle_proof.path_index()),
|
||||
("treeSiblings", merkle_proof_to_vec(merkle_proof)),
|
||||
("externalNullifier", vec![external_nullifier_hash]),
|
||||
("signalHash", vec![signal_hash]),
|
||||
];
|
||||
let inputs = inputs.into_iter().map(|(name, values)| {
|
||||
(
|
||||
name.to_string(),
|
||||
values.iter().copied().map(Into::into).collect::<Vec<_>>(),
|
||||
)
|
||||
});
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
let full_assignment = witness_calculator()
|
||||
.lock()
|
||||
.expect("witness_calculator mutex should not get poisoned")
|
||||
.calculate_witness_element::<Bn254, _>(inputs, false)
|
||||
.map_err(ProofError::WitnessError)?;
|
||||
|
||||
println!("witness generation took: {:.2?}", now.elapsed());
|
||||
|
||||
let now = Instant::now();
|
||||
let zkey = zkey();
|
||||
let ark_proof = create_proof_with_reduction_and_matrices::<_, CircomReduction>(
|
||||
&zkey.0,
|
||||
r,
|
||||
s,
|
||||
&zkey.1,
|
||||
zkey.1.num_instance_variables,
|
||||
zkey.1.num_constraints,
|
||||
full_assignment.as_slice(),
|
||||
)?;
|
||||
let proof = ark_proof.into();
|
||||
println!("proof generation took: {:.2?}", now.elapsed());
|
||||
|
||||
Ok(proof)
|
||||
}
|
||||
|
||||
/// Verifies a given semaphore proof
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns a [`ProofError`] if verifying fails. Verification failure does not
|
||||
/// necessarily mean the proof is incorrect.
|
||||
pub fn verify_proof(
|
||||
root: Field,
|
||||
nullifier_hash: Field,
|
||||
signal_hash: Field,
|
||||
external_nullifier_hash: Field,
|
||||
proof: &Proof,
|
||||
) -> Result<bool, ProofError> {
|
||||
let zkey = zkey();
|
||||
let pvk = prepare_verifying_key(&zkey.0.vk);
|
||||
|
||||
let public_inputs = [
|
||||
root.into(),
|
||||
nullifier_hash.into(),
|
||||
signal_hash.into(),
|
||||
external_nullifier_hash.into(),
|
||||
];
|
||||
let ark_proof = (*proof).into();
|
||||
let result = ark_groth16::verify_proof(&pvk, &ark_proof, &public_inputs[..])?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand::SeedableRng as _;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use semaphore::{hash_to_field, poseidon_tree::PoseidonTree};
|
||||
use serde_json::json;
|
||||
|
||||
fn arb_proof(seed: u64) -> Proof {
|
||||
// Deterministic randomness for testing
|
||||
let mut rng = ChaChaRng::seed_from_u64(seed);
|
||||
|
||||
// generate identity
|
||||
let seed: [u8; 16] = rng.gen();
|
||||
let id = Identity::from_seed(&seed);
|
||||
|
||||
// generate merkle tree
|
||||
let leaf = Field::from(0);
|
||||
let mut tree = PoseidonTree::new(21, leaf);
|
||||
tree.set(0, id.commitment());
|
||||
|
||||
let merkle_proof = tree.proof(0).expect("proof should exist");
|
||||
|
||||
let external_nullifier: [u8; 16] = rng.gen();
|
||||
let external_nullifier_hash = hash_to_field(&external_nullifier);
|
||||
|
||||
let signal: [u8; 16] = rng.gen();
|
||||
let signal_hash = hash_to_field(&signal);
|
||||
|
||||
generate_proof_rng(
|
||||
&id,
|
||||
&merkle_proof,
|
||||
external_nullifier_hash,
|
||||
signal_hash,
|
||||
&mut rng,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_cast_roundtrip() {
|
||||
let proof = arb_proof(123);
|
||||
let ark_proof: ArkProof<Bn<Parameters>> = proof.into();
|
||||
let result: Proof = ark_proof.into();
|
||||
assert_eq!(proof, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_serialize() {
|
||||
let proof = arb_proof(456);
|
||||
let json = serde_json::to_value(&proof).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
json!([
|
||||
[
|
||||
"0x249ae469686987ee9368da60dd177a8c42891c02f5760e955e590c79d55cfab2",
|
||||
"0xf22e25870f49388459d388afb24dcf6ec11bb2d4def1e2ec26d6e42f373aad8"
|
||||
],
|
||||
[
|
||||
[
|
||||
"0x17bd25dbd7436c30ea5b8a3a47aadf11ed646c4b25cc14a84ff8cbe0252ff1f8",
|
||||
"0x1c140668c56688367416534d57b4a14e5a825efdd5e121a6a2099f6dc4cd277b"
|
||||
],
|
||||
[
|
||||
"0x26a8524759d969ea0682a092cf7a551697d81962d6c998f543f81e52d83e05e1",
|
||||
"0x273eb3f796fd1807b9df9c6d769d983e3dabdc61677b75d48bb7691303b2c8dd"
|
||||
]
|
||||
],
|
||||
[
|
||||
"0x62715c53a0eb4c46dbb5f73f1fd7449b9c63d37c1ece65debc39b472065a90f",
|
||||
"0x114f7becc66f1cd7a8b01c89db8233622372fc0b6fc037c4313bca41e2377fd9"
|
||||
]
|
||||
])
|
||||
);
|
||||
}
|
||||
}
|
||||
1
semaphore/vendor/semaphore
vendored
1
semaphore/vendor/semaphore
vendored
Submodule semaphore/vendor/semaphore deleted from 5186a940ff
38
utils/Cargo.toml
Normal file
38
utils/Cargo.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
[package]
|
||||
name = "zerokit_utils"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Various utilities for Zerokit"
|
||||
documentation = "https://github.com/vacp2p/zerokit"
|
||||
homepage = "https://vac.dev"
|
||||
repository = "https://github.com/vacp2p/zerokit"
|
||||
|
||||
[lib]
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
ark-ff = { version = "=0.4.1", default-features = false, features = ["asm"] }
|
||||
num-bigint = { version = "=0.4.3", default-features = false, features = ["rand"] }
|
||||
color-eyre = "=0.6.2"
|
||||
pmtree = { package = "vacp2p_pmtree", version = "=2.0.2", optional = true}
|
||||
sled = "=0.34.7"
|
||||
serde = "=1.0.163"
|
||||
lazy_static = "1.4.0"
|
||||
hex = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
ark-bn254 = "=0.4.0"
|
||||
num-traits = "=0.2.15"
|
||||
hex-literal = "=0.3.4"
|
||||
tiny-keccak = { version = "=2.0.2", features = ["keccak"] }
|
||||
criterion = { version = "=0.4.0", features = ["html_reports"] }
|
||||
|
||||
[features]
|
||||
default = ["parallel"]
|
||||
parallel = ["ark-ff/parallel"]
|
||||
pmtree-ft = ["pmtree"]
|
||||
|
||||
[[bench]]
|
||||
name = "merkle_tree_benchmark"
|
||||
harness = false
|
||||
11
utils/Makefile.toml
Normal file
11
utils/Makefile.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[tasks.build]
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
[tasks.test]
|
||||
command = "cargo"
|
||||
args = ["test", "--release"]
|
||||
|
||||
[tasks.bench]
|
||||
command = "cargo"
|
||||
args = ["bench"]
|
||||
15
utils/README.md
Normal file
15
utils/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Utils crate
|
||||
|
||||
## Building
|
||||
|
||||
1. `cargo build`
|
||||
|
||||
## Testing
|
||||
|
||||
1. `cargo test`
|
||||
|
||||
## Benchmarking
|
||||
|
||||
1. `cargo bench`
|
||||
|
||||
To view the results of the benchmark, open the `target/criterion/report/index.html` file generated after the bench
|
||||
161
utils/benches/merkle_tree_benchmark.rs
Normal file
161
utils/benches/merkle_tree_benchmark.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use hex_literal::hex;
|
||||
use lazy_static::lazy_static;
|
||||
use std::{fmt::Display, str::FromStr};
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
use zerokit_utils::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleTree,
|
||||
};
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
struct Keccak256;
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Debug, Default)]
|
||||
struct TestFr([u8; 32]);
|
||||
|
||||
impl Hasher for Keccak256 {
|
||||
type Fr = TestFr;
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
TestFr([0; 32])
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
let mut output = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
for element in inputs {
|
||||
hasher.update(element.0.as_slice());
|
||||
}
|
||||
hasher.finalize(&mut output);
|
||||
TestFr(output)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for TestFr {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", String::from_utf8_lossy(self.0.as_slice()))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for TestFr {
|
||||
type Err = std::string::FromUtf8Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(TestFr(s.as_bytes().try_into().unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref LEAVES: [TestFr; 4] = [
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
]
|
||||
.map(TestFr);
|
||||
}
|
||||
|
||||
pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
|
||||
let mut tree =
|
||||
OptimalMerkleTree::<Keccak256>::new(2, TestFr([0; 32]), OptimalMerkleConfig::default())
|
||||
.unwrap();
|
||||
|
||||
c.bench_function("OptimalMerkleTree::set", |b| {
|
||||
b.iter(|| {
|
||||
tree.set(0, LEAVES[0]).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::delete", |b| {
|
||||
b.iter(|| {
|
||||
tree.delete(0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::override_range", |b| {
|
||||
b.iter(|| {
|
||||
tree.override_range(0, *LEAVES, [0, 1, 2, 3]).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::compute_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.compute_root().unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::get", |b| {
|
||||
b.iter(|| {
|
||||
tree.get(0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
// check intermediate node getter which required additional computation of sub root index
|
||||
c.bench_function("OptimalMerkleTree::get_subtree_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.get_subtree_root(1, 0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::get_empty_leaves_indices", |b| {
|
||||
b.iter(|| {
|
||||
tree.get_empty_leaves_indices();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
pub fn full_merkle_tree_benchmark(c: &mut Criterion) {
|
||||
let mut tree =
|
||||
FullMerkleTree::<Keccak256>::new(2, TestFr([0; 32]), FullMerkleConfig::default()).unwrap();
|
||||
|
||||
c.bench_function("FullMerkleTree::set", |b| {
|
||||
b.iter(|| {
|
||||
tree.set(0, LEAVES[0]).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::delete", |b| {
|
||||
b.iter(|| {
|
||||
tree.delete(0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::override_range", |b| {
|
||||
b.iter(|| {
|
||||
tree.override_range(0, *LEAVES, [0, 1, 2, 3]).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::compute_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.compute_root().unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::get", |b| {
|
||||
b.iter(|| {
|
||||
tree.get(0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
// check intermediate node getter which required additional computation of sub root index
|
||||
c.bench_function("FullMerkleTree::get_subtree_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.get_subtree_root(1, 0).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::get_empty_leaves_indices", |b| {
|
||||
b.iter(|| {
|
||||
tree.get_empty_leaves_indices();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
optimal_merkle_tree_benchmark,
|
||||
full_merkle_tree_benchmark
|
||||
);
|
||||
criterion_main!(benches);
|
||||
10
utils/src/lib.rs
Normal file
10
utils/src/lib.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
pub mod poseidon;
|
||||
pub use self::poseidon::*;
|
||||
|
||||
pub mod merkle_tree;
|
||||
pub use self::merkle_tree::*;
|
||||
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
pub mod pm_tree;
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
pub use self::pm_tree::*;
|
||||
415
utils/src/merkle_tree/full_merkle_tree.rs
Normal file
415
utils/src/merkle_tree/full_merkle_tree.rs
Normal file
@@ -0,0 +1,415 @@
|
||||
use crate::merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
use color_eyre::{Report, Result};
|
||||
use std::{
|
||||
cmp::max,
|
||||
fmt::Debug,
|
||||
iter::{once, repeat, successors},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Full Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
/// Merkle tree with all leaf and intermediate hashes stored
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct FullMerkleTree<H: Hasher> {
|
||||
/// The depth of the tree, i.e. the number of levels from leaf to root
|
||||
depth: usize,
|
||||
|
||||
/// The nodes cached from the empty part of the tree (where leaves are set to default).
|
||||
/// Since the rightmost part of the tree is usually changed much later than its creation,
|
||||
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
|
||||
/// and by caching few intermediate nodes to the root computed from default leaves
|
||||
cached_nodes: Vec<H::Fr>,
|
||||
|
||||
/// The tree nodes
|
||||
nodes: Vec<H::Fr>,
|
||||
|
||||
/// The indices of leaves which are set into zero upto next_index.
|
||||
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
|
||||
cached_leaves_indices: Vec<u8>,
|
||||
|
||||
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
// (deletions leave next_index unchanged)
|
||||
next_index: usize,
|
||||
|
||||
// metadata that an application may use to store additional information
|
||||
metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Element of a Merkle proof
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub enum FullMerkleBranch<H: Hasher> {
|
||||
/// Left branch taken, value is the right sibling hash.
|
||||
Left(H::Fr),
|
||||
|
||||
/// Right branch taken, value is the left sibling hash.
|
||||
Right(H::Fr),
|
||||
}
|
||||
|
||||
/// Merkle proof path, bottom to top.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FullMerkleConfig(());
|
||||
|
||||
impl FromStr for FullMerkleConfig {
|
||||
type Err = Report;
|
||||
|
||||
fn from_str(_s: &str) -> Result<Self> {
|
||||
Ok(FullMerkleConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementations
|
||||
impl<H: Hasher> ZerokitMerkleTree for FullMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
type Proof = FullMerkleProof<H>;
|
||||
type Hasher = H;
|
||||
type Config = FullMerkleConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
FullMerkleTree::<H>::new(depth, Self::Hasher::default_leaf(), Self::Config::default())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(depth: usize, initial_leaf: FrOf<Self::Hasher>, _config: Self::Config) -> Result<Self> {
|
||||
// Compute cache node values, leaf to root
|
||||
let cached_nodes = successors(Some(initial_leaf), |prev| Some(H::hash(&[*prev, *prev])))
|
||||
.take(depth + 1)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Compute node values
|
||||
let nodes = cached_nodes
|
||||
.iter()
|
||||
.rev()
|
||||
.enumerate()
|
||||
.flat_map(|(levels, hash)| repeat(hash).take(1 << levels))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
|
||||
|
||||
let next_index = 0;
|
||||
|
||||
Ok(Self {
|
||||
depth,
|
||||
cached_nodes,
|
||||
nodes,
|
||||
cached_leaves_indices: vec![0; 1 << depth],
|
||||
next_index,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Returns the depth of the tree
|
||||
fn depth(&self) -> usize {
|
||||
self.depth
|
||||
}
|
||||
|
||||
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
fn capacity(&self) -> usize {
|
||||
1 << self.depth
|
||||
}
|
||||
|
||||
// Returns the total number of leaves set
|
||||
fn leaves_set(&self) -> usize {
|
||||
self.next_index
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
// Returns the root of the tree
|
||||
fn root(&self) -> FrOf<Self::Hasher> {
|
||||
self.nodes[0]
|
||||
}
|
||||
|
||||
// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<()> {
|
||||
self.set_range(leaf, once(hash))?;
|
||||
self.next_index = max(self.next_index, leaf + 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get a leaf from the specified tree index
|
||||
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(Report::msg("leaf index out of bounds"));
|
||||
}
|
||||
Ok(self.nodes[self.capacity() + leaf - 1])
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
} else if n == self.depth {
|
||||
self.get(index)
|
||||
} else {
|
||||
let mut idx = self.capacity() + index - 1;
|
||||
let mut nd = self.depth;
|
||||
loop {
|
||||
let parent = self.parent(idx).unwrap();
|
||||
nd -= 1;
|
||||
if nd == n {
|
||||
return Ok(self.nodes[parent]);
|
||||
} else {
|
||||
idx = parent;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize> {
|
||||
self.cached_leaves_indices
|
||||
.iter()
|
||||
.take(self.next_index)
|
||||
.enumerate()
|
||||
.filter(|&(_, &v)| v == 0u8)
|
||||
.map(|(idx, _)| idx)
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Sets tree nodes, starting from start index
|
||||
// Function proper of FullMerkleTree implementation
|
||||
fn set_range<I: IntoIterator<Item = FrOf<Self::Hasher>>>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
hashes: I,
|
||||
) -> Result<()> {
|
||||
let index = self.capacity() + start - 1;
|
||||
let mut count = 0;
|
||||
// first count number of hashes, and check that they fit in the tree
|
||||
// then insert into the tree
|
||||
let hashes = hashes.into_iter().collect::<Vec<_>>();
|
||||
if hashes.len() + start > self.capacity() {
|
||||
return Err(Report::msg("provided hashes do not fit in the tree"));
|
||||
}
|
||||
hashes.into_iter().for_each(|hash| {
|
||||
self.nodes[index + count] = hash;
|
||||
self.cached_leaves_indices[start + count] = 1;
|
||||
count += 1;
|
||||
});
|
||||
if count != 0 {
|
||||
self.update_nodes(index, index + (count - 1))?;
|
||||
self.next_index = max(self.next_index, start + count);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: IntoIterator<Item = usize>,
|
||||
{
|
||||
let indices = indices.into_iter().collect::<Vec<_>>();
|
||||
let min_index = *indices.first().unwrap();
|
||||
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let max_index = start + leaves_vec.len();
|
||||
|
||||
let mut set_values = vec![Self::Hasher::default_leaf(); max_index - min_index];
|
||||
|
||||
for i in min_index..start {
|
||||
if !indices.contains(&i) {
|
||||
let value = self.get(i)?;
|
||||
set_values[i - min_index] = value;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..leaves_vec.len() {
|
||||
set_values[start - min_index + i] = leaves_vec[i];
|
||||
}
|
||||
|
||||
for i in indices {
|
||||
self.cached_leaves_indices[i] = 0;
|
||||
}
|
||||
|
||||
self.set_range(start, set_values)
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
self.cached_leaves_indices[index] = 0;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Computes a merkle proof the the leaf at the specified index
|
||||
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
let mut index = self.capacity() + leaf - 1;
|
||||
let mut path = Vec::with_capacity(self.depth + 1);
|
||||
while let Some(parent) = self.parent(index) {
|
||||
// Add proof for node at index to parent
|
||||
path.push(match index & 1 {
|
||||
1 => FullMerkleBranch::Left(self.nodes[index + 1]),
|
||||
0 => FullMerkleBranch::Right(self.nodes[index - 1]),
|
||||
_ => unreachable!(),
|
||||
});
|
||||
index = parent;
|
||||
}
|
||||
Ok(FullMerkleProof(path))
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, hash: &FrOf<Self::Hasher>, proof: &FullMerkleProof<H>) -> Result<bool> {
|
||||
Ok(proof.compute_root_from(hash) == self.root())
|
||||
}
|
||||
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
|
||||
Ok(self.root())
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
Ok(self.metadata.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> FullMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
// Utilities for updating the tree nodes
|
||||
|
||||
/// For a given node index, return the parent node index
|
||||
/// Returns None if there is no parent (root node)
|
||||
fn parent(&self, index: usize) -> Option<usize> {
|
||||
if index == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(((index + 1) >> 1) - 1)
|
||||
}
|
||||
}
|
||||
|
||||
/// For a given node index, return index of the first (left) child.
|
||||
fn first_child(&self, index: usize) -> usize {
|
||||
(index << 1) + 1
|
||||
}
|
||||
|
||||
fn levels(&self, index: usize) -> usize {
|
||||
// `n.next_power_of_two()` will return `n` iff `n` is a power of two.
|
||||
// The extra offset corrects this.
|
||||
(index + 2).next_power_of_two().trailing_zeros() as usize - 1
|
||||
}
|
||||
|
||||
fn update_nodes(&mut self, start: usize, end: usize) -> Result<()> {
|
||||
if self.levels(start) != self.levels(end) {
|
||||
return Err(Report::msg("self.levels(start) != self.levels(end)"));
|
||||
}
|
||||
if let (Some(start), Some(end)) = (self.parent(start), self.parent(end)) {
|
||||
for parent in start..=end {
|
||||
let child = self.first_child(parent);
|
||||
self.nodes[parent] = H::hash(&[self.nodes[child], self.nodes[child + 1]]);
|
||||
}
|
||||
self.update_nodes(start, end)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> ZerokitMerkleProof for FullMerkleProof<H> {
|
||||
type Index = u8;
|
||||
type Hasher = H;
|
||||
|
||||
#[must_use]
|
||||
// Returns the length of a Merkle proof
|
||||
fn length(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Computes the leaf index corresponding to a Merkle proof
|
||||
#[must_use]
|
||||
fn leaf_index(&self) -> usize {
|
||||
self.0.iter().rev().fold(0, |index, branch| match branch {
|
||||
FullMerkleBranch::Left(_) => index << 1,
|
||||
FullMerkleBranch::Right(_) => (index << 1) + 1,
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Returns the path elements forming a Merkle proof
|
||||
fn get_path_elements(&self) -> Vec<FrOf<Self::Hasher>> {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|x| match x {
|
||||
FullMerkleBranch::Left(value) | FullMerkleBranch::Right(value) => *value,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the path indexes forming a Merkle proof
|
||||
#[must_use]
|
||||
fn get_path_index(&self) -> Vec<Self::Index> {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|branch| match branch {
|
||||
FullMerkleBranch::Left(_) => 0,
|
||||
FullMerkleBranch::Right(_) => 1,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
|
||||
#[must_use]
|
||||
fn compute_root_from(&self, hash: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher> {
|
||||
self.0.iter().fold(*hash, |hash, branch| match branch {
|
||||
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),
|
||||
FullMerkleBranch::Right(sibling) => H::hash(&[*sibling, hash]),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Debug formatting for printing a (Full) Merkle Proof Branch
|
||||
impl<H> Debug for FullMerkleBranch<H>
|
||||
where
|
||||
H: Hasher,
|
||||
H::Fr: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Left(arg0) => f.debug_tuple("Left").field(arg0).finish(),
|
||||
Self::Right(arg0) => f.debug_tuple("Right").field(arg0).finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Debug formatting for printing a (Full) Merkle Proof
|
||||
impl<H> Debug for FullMerkleProof<H>
|
||||
where
|
||||
H: Hasher,
|
||||
H::Fr: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("Proof").field(&self.0).finish()
|
||||
}
|
||||
}
|
||||
82
utils/src/merkle_tree/merkle_tree.rs
Normal file
82
utils/src/merkle_tree/merkle_tree.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
// This crate provides different implementation of Merkle tree
|
||||
// Currently two interchangeable implementations are supported:
|
||||
// - FullMerkleTree: each tree node is stored
|
||||
// - OptimalMerkleTree: only nodes used to prove accumulation of set leaves are stored
|
||||
// Library defaults are set in the poseidon_tree crate
|
||||
//
|
||||
// Merkle tree implementations are adapted from https://github.com/kilic/rln/blob/master/src/merkle.rs
|
||||
// and https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
|
||||
|
||||
//!
|
||||
//! # To do
|
||||
//!
|
||||
//! * Disk based storage backend (using mmaped files should be easy)
|
||||
//! * Implement serialization for tree and Merkle proof
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use color_eyre::Result;
|
||||
|
||||
/// In the Hasher trait we define the node type, the default leaf
|
||||
/// and the hash function used to initialize a Merkle Tree implementation
|
||||
pub trait Hasher {
|
||||
/// Type of the leaf and tree node
|
||||
type Fr: Clone + Copy + Eq + Default + std::fmt::Debug + std::fmt::Display + FromStr;
|
||||
|
||||
/// Returns the default tree leaf
|
||||
fn default_leaf() -> Self::Fr;
|
||||
|
||||
/// Utility to compute the hash of an intermediate node
|
||||
fn hash(input: &[Self::Fr]) -> Self::Fr;
|
||||
}
|
||||
|
||||
pub type FrOf<H> = <H as Hasher>::Fr;
|
||||
|
||||
/// In the ZerokitMerkleTree trait we define the methods that are required to be implemented by a Merkle tree
|
||||
/// Including, OptimalMerkleTree, FullMerkleTree
|
||||
pub trait ZerokitMerkleTree {
|
||||
type Proof: ZerokitMerkleProof;
|
||||
type Hasher: Hasher;
|
||||
type Config: Default + FromStr;
|
||||
|
||||
fn default(depth: usize) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
fn new(depth: usize, default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
fn depth(&self) -> usize;
|
||||
fn capacity(&self) -> usize;
|
||||
fn leaves_set(&self) -> usize;
|
||||
fn root(&self) -> FrOf<Self::Hasher>;
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>>;
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>>;
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()>;
|
||||
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = FrOf<Self::Hasher>>;
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>>;
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize>;
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, to_remove_indices: J) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: IntoIterator<Item = usize>;
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()>;
|
||||
fn delete(&mut self, index: usize) -> Result<()>;
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof>;
|
||||
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool>;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()>;
|
||||
fn metadata(&self) -> Result<Vec<u8>>;
|
||||
fn close_db_connection(&mut self) -> Result<()>;
|
||||
}
|
||||
|
||||
pub trait ZerokitMerkleProof {
|
||||
type Index;
|
||||
type Hasher: Hasher;
|
||||
|
||||
fn length(&self) -> usize;
|
||||
fn leaf_index(&self) -> usize;
|
||||
fn get_path_elements(&self) -> Vec<FrOf<Self::Hasher>>;
|
||||
fn get_path_index(&self) -> Vec<Self::Index>;
|
||||
fn compute_root_from(&self, leaf: &FrOf<Self::Hasher>) -> FrOf<Self::Hasher>;
|
||||
}
|
||||
7
utils/src/merkle_tree/mod.rs
Normal file
7
utils/src/merkle_tree/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod full_merkle_tree;
|
||||
#[allow(clippy::module_inception)]
|
||||
pub mod merkle_tree;
|
||||
pub mod optimal_merkle_tree;
|
||||
pub use self::full_merkle_tree::*;
|
||||
pub use self::merkle_tree::*;
|
||||
pub use self::optimal_merkle_tree::*;
|
||||
380
utils/src/merkle_tree/optimal_merkle_tree.rs
Normal file
380
utils/src/merkle_tree/optimal_merkle_tree.rs
Normal file
@@ -0,0 +1,380 @@
|
||||
use crate::merkle_tree::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
use crate::FrOf;
|
||||
use color_eyre::{Report, Result};
|
||||
use std::collections::HashMap;
|
||||
use std::str::FromStr;
|
||||
use std::{cmp::max, fmt::Debug};
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Optimal Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
/// The Merkle tree structure
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OptimalMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
/// The depth of the tree, i.e. the number of levels from leaf to root
|
||||
depth: usize,
|
||||
|
||||
/// The nodes cached from the empty part of the tree (where leaves are set to default).
|
||||
/// Since the rightmost part of the tree is usually changed much later than its creation,
|
||||
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
|
||||
/// and by caching few intermediate nodes to the root computed from default leaves
|
||||
cached_nodes: Vec<H::Fr>,
|
||||
|
||||
/// The tree nodes
|
||||
nodes: HashMap<(usize, usize), H::Fr>,
|
||||
|
||||
/// The indices of leaves which are set into zero upto next_index.
|
||||
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
|
||||
cached_leaves_indices: Vec<u8>,
|
||||
|
||||
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
// (deletions leave next_index unchanged)
|
||||
next_index: usize,
|
||||
|
||||
// metadata that an application may use to store additional information
|
||||
metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
/// The Merkle proof
|
||||
/// Contains a vector of (node, branch_index) that defines the proof path elements and branch direction (1 or 0)
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OptimalMerkleConfig(());
|
||||
|
||||
impl FromStr for OptimalMerkleConfig {
|
||||
type Err = Report;
|
||||
|
||||
fn from_str(_s: &str) -> Result<Self> {
|
||||
Ok(OptimalMerkleConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementations
|
||||
|
||||
impl<H: Hasher> ZerokitMerkleTree for OptimalMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
type Proof = OptimalMerkleProof<H>;
|
||||
type Hasher = H;
|
||||
type Config = OptimalMerkleConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
OptimalMerkleTree::<H>::new(depth, H::default_leaf(), Self::Config::default())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(depth: usize, default_leaf: H::Fr, _config: Self::Config) -> Result<Self> {
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
for i in 0..depth {
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
|
||||
}
|
||||
cached_nodes.reverse();
|
||||
Ok(OptimalMerkleTree {
|
||||
cached_nodes: cached_nodes.clone(),
|
||||
depth,
|
||||
nodes: HashMap::new(),
|
||||
cached_leaves_indices: vec![0; 1 << depth],
|
||||
next_index: 0,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Returns the depth of the tree
|
||||
fn depth(&self) -> usize {
|
||||
self.depth
|
||||
}
|
||||
|
||||
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
fn capacity(&self) -> usize {
|
||||
1 << self.depth
|
||||
}
|
||||
|
||||
// Returns the total number of leaves set
|
||||
fn leaves_set(&self) -> usize {
|
||||
self.next_index
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
// Returns the root of the tree
|
||||
fn root(&self) -> H::Fr {
|
||||
self.get_node(0, 0)
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
} else if n == self.depth {
|
||||
self.get(index)
|
||||
} else {
|
||||
Ok(self.get_node(n, index >> (self.depth - n)))
|
||||
}
|
||||
}
|
||||
|
||||
// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<()> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
self.nodes.insert((self.depth, index), leaf);
|
||||
self.recalculate_from(index)?;
|
||||
self.next_index = max(self.next_index, index + 1);
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get a leaf from the specified tree index
|
||||
fn get(&self, index: usize) -> Result<H::Fr> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
Ok(self.get_node(self.depth, index))
|
||||
}
|
||||
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize> {
|
||||
self.cached_leaves_indices
|
||||
.iter()
|
||||
.take(self.next_index)
|
||||
.enumerate()
|
||||
.filter(|&(_, &v)| v == 0u8)
|
||||
.map(|(idx, _)| idx)
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Sets multiple leaves from the specified tree index
|
||||
fn set_range<I: IntoIterator<Item = H::Fr>>(&mut self, start: usize, leaves: I) -> Result<()> {
|
||||
let leaves = leaves.into_iter().collect::<Vec<_>>();
|
||||
// check if the range is valid
|
||||
if start + leaves.len() > self.capacity() {
|
||||
return Err(Report::msg("provided range exceeds set size"));
|
||||
}
|
||||
for (i, leaf) in leaves.iter().enumerate() {
|
||||
self.nodes.insert((self.depth, start + i), *leaf);
|
||||
self.cached_leaves_indices[start + i] = 1;
|
||||
self.recalculate_from(start + i)?;
|
||||
}
|
||||
self.next_index = max(self.next_index, start + leaves.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: IntoIterator<Item = usize>,
|
||||
{
|
||||
let indices = indices.into_iter().collect::<Vec<_>>();
|
||||
let min_index = *indices.first().unwrap();
|
||||
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let max_index = start + leaves_vec.len();
|
||||
|
||||
let mut set_values = vec![Self::Hasher::default_leaf(); max_index - min_index];
|
||||
|
||||
for i in min_index..start {
|
||||
if !indices.contains(&i) {
|
||||
let value = self.get_leaf(i);
|
||||
set_values[i - min_index] = value;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..leaves_vec.len() {
|
||||
set_values[start - min_index + i] = leaves_vec[i];
|
||||
}
|
||||
|
||||
for i in indices {
|
||||
self.cached_leaves_indices[i] = 0;
|
||||
}
|
||||
|
||||
self.set_range(start, set_values)
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: H::Fr) -> Result<()> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
self.cached_leaves_indices[index] = 0;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Computes a merkle proof the the leaf at the specified index
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
|
||||
let mut i = index;
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
i ^= 1;
|
||||
witness.push((self.get_node(depth, i), (1 - (i & 1)).try_into().unwrap()));
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
if depth == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if i != 0 {
|
||||
Err(Report::msg("i != 0"))
|
||||
} else {
|
||||
Ok(OptimalMerkleProof(witness))
|
||||
}
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool> {
|
||||
if witness.length() != self.depth {
|
||||
return Err(Report::msg("witness length doesn't match tree depth"));
|
||||
}
|
||||
let expected_root = witness.compute_root_from(leaf);
|
||||
Ok(expected_root.eq(&self.root()))
|
||||
}
|
||||
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
|
||||
self.recalculate_from(0)?;
|
||||
Ok(self.root())
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
Ok(self.metadata.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> OptimalMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
// Utilities for updating the tree nodes
|
||||
|
||||
fn get_node(&self, depth: usize, index: usize) -> H::Fr {
|
||||
let node = *self
|
||||
.nodes
|
||||
.get(&(depth, index))
|
||||
.unwrap_or_else(|| &self.cached_nodes[depth]);
|
||||
node
|
||||
}
|
||||
|
||||
pub fn get_leaf(&self, index: usize) -> H::Fr {
|
||||
self.get_node(self.depth, index)
|
||||
}
|
||||
|
||||
fn hash_couple(&mut self, depth: usize, index: usize) -> H::Fr {
|
||||
let b = index & !1;
|
||||
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
|
||||
}
|
||||
|
||||
fn recalculate_from(&mut self, index: usize) -> Result<()> {
|
||||
let mut i = index;
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
let h = self.hash_couple(depth, i);
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
self.nodes.insert((depth, i), h);
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
if depth == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if depth != 0 {
|
||||
return Err(Report::msg("did not reach the depth"));
|
||||
}
|
||||
if i != 0 {
|
||||
return Err(Report::msg("did not go through all indexes"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> ZerokitMerkleProof for OptimalMerkleProof<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
type Index = u8;
|
||||
type Hasher = H;
|
||||
|
||||
#[must_use]
|
||||
// Returns the length of a Merkle proof
|
||||
fn length(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Computes the leaf index corresponding to a Merkle proof
|
||||
#[must_use]
|
||||
fn leaf_index(&self) -> usize {
|
||||
// In current implementation the path indexes in a proof correspond to the binary representation of the leaf index
|
||||
let mut binary_repr = self.get_path_index();
|
||||
binary_repr.reverse();
|
||||
binary_repr
|
||||
.into_iter()
|
||||
.fold(0, |acc, digit| (acc << 1) + usize::from(digit))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Returns the path elements forming a Merkle proof
|
||||
fn get_path_elements(&self) -> Vec<H::Fr> {
|
||||
self.0.iter().map(|x| x.0).collect()
|
||||
}
|
||||
|
||||
/// Returns the path indexes forming a Merkle proof
|
||||
#[must_use]
|
||||
fn get_path_index(&self) -> Vec<u8> {
|
||||
self.0.iter().map(|x| x.1).collect()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
|
||||
fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
|
||||
let mut acc: H::Fr = *leaf;
|
||||
for w in self.0.iter() {
|
||||
if w.1 == 0 {
|
||||
acc = H::hash(&[acc, w.0]);
|
||||
} else {
|
||||
acc = H::hash(&[w.0, acc]);
|
||||
}
|
||||
}
|
||||
acc
|
||||
}
|
||||
}
|
||||
|
||||
// Debug formatting for printing a (Optimal) Merkle Proof
|
||||
impl<H> Debug for OptimalMerkleProof<H>
|
||||
where
|
||||
H: Hasher,
|
||||
H::Fr: Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("Proof").field(&self.0).finish()
|
||||
}
|
||||
}
|
||||
4
utils/src/pm_tree/mod.rs
Normal file
4
utils/src/pm_tree/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod sled_adapter;
|
||||
pub use self::sled_adapter::*;
|
||||
pub use pmtree;
|
||||
pub use sled::*;
|
||||
105
utils/src/pm_tree/sled_adapter.rs
Normal file
105
utils/src/pm_tree/sled_adapter.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use pmtree::*;
|
||||
|
||||
use sled::Db as Sled;
|
||||
use std::collections::HashMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct SledDB(Sled);
|
||||
|
||||
impl SledDB {
|
||||
fn new_with_tries(config: <SledDB as Database>::Config, tries: u32) -> PmtreeResult<Self> {
|
||||
// If we've tried more than 10 times, we give up and return an error.
|
||||
if tries >= 10 {
|
||||
return Err(PmtreeErrorKind::DatabaseError(
|
||||
DatabaseErrorKind::CustomError(format!(
|
||||
"Cannot create database: exceeded maximum retry attempts. {config:#?}"
|
||||
)),
|
||||
));
|
||||
}
|
||||
match config.open() {
|
||||
Ok(db) => Ok(SledDB(db)),
|
||||
Err(e) if e.to_string().contains("WouldBlock") => {
|
||||
// try till the fd is freed
|
||||
// sleep for 10^tries milliseconds, then recursively try again
|
||||
thread::sleep(Duration::from_millis(10u64.pow(tries)));
|
||||
Self::new_with_tries(config, tries + 1)
|
||||
}
|
||||
Err(e) => {
|
||||
// On any other error, we return immediately.
|
||||
Err(PmtreeErrorKind::DatabaseError(
|
||||
DatabaseErrorKind::CustomError(format!(
|
||||
"Cannot create database: {e} {config:#?}"
|
||||
)),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Database for SledDB {
|
||||
type Config = sled::Config;
|
||||
|
||||
fn new(config: Self::Config) -> PmtreeResult<Self> {
|
||||
let db = Self::new_with_tries(config, 0)?;
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
fn load(config: Self::Config) -> PmtreeResult<Self> {
|
||||
let db = match config.open() {
|
||||
Ok(db) => db,
|
||||
Err(e) => {
|
||||
return Err(PmtreeErrorKind::DatabaseError(
|
||||
DatabaseErrorKind::CustomError(format!("Cannot load database: {e}")),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
if !db.was_recovered() {
|
||||
return Err(PmtreeErrorKind::DatabaseError(
|
||||
DatabaseErrorKind::CustomError(format!(
|
||||
"Database was not recovered: {}",
|
||||
config.path.display()
|
||||
)),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(SledDB(db))
|
||||
}
|
||||
|
||||
fn close(&mut self) -> PmtreeResult<()> {
|
||||
let _ = self.0.flush().map_err(|_| {
|
||||
PmtreeErrorKind::DatabaseError(DatabaseErrorKind::CustomError(
|
||||
"Cannot flush database".to_string(),
|
||||
))
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, key: DBKey) -> PmtreeResult<Option<Value>> {
|
||||
match self.0.get(key) {
|
||||
Ok(value) => Ok(value.map(|val| val.to_vec())),
|
||||
Err(_e) => Err(PmtreeErrorKind::TreeError(TreeErrorKind::InvalidKey)),
|
||||
}
|
||||
}
|
||||
|
||||
fn put(&mut self, key: DBKey, value: Value) -> PmtreeResult<()> {
|
||||
match self.0.insert(key, value) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_e) => Err(PmtreeErrorKind::TreeError(TreeErrorKind::InvalidKey)),
|
||||
}
|
||||
}
|
||||
|
||||
fn put_batch(&mut self, subtree: HashMap<DBKey, Value>) -> PmtreeResult<()> {
|
||||
let mut batch = sled::Batch::default();
|
||||
|
||||
for (key, value) in subtree {
|
||||
batch.insert(&key, value);
|
||||
}
|
||||
|
||||
self.0
|
||||
.apply_batch(batch)
|
||||
.map_err(|_| PmtreeErrorKind::TreeError(TreeErrorKind::InvalidKey))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
5
utils/src/poseidon/mod.rs
Normal file
5
utils/src/poseidon/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod poseidon_hash;
|
||||
pub use self::poseidon_hash::*;
|
||||
|
||||
pub mod poseidon_constants;
|
||||
pub use self::poseidon_constants::*;
|
||||
264
utils/src/poseidon/poseidon_constants.rs
Normal file
264
utils/src/poseidon/poseidon_constants.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
// This crate provides an implementation to compute the Poseidon hash round constants and MDS matrices.
|
||||
|
||||
// SECURITY NOTE: The MDS matrices are generated interatively using the Grain LFSR until certain criteria are met.
|
||||
// According to the paper, such matrices have to respect some conditions which are checked by 3 different algorithms in the reference implementation.
|
||||
// At the moment such algorithms are not implemented, however *for the hardcoded parameters* the first random matrix generated satisfy such conditions.
|
||||
// If different parameters are implemented, it should be checked against the reference implementation how many matrices are generated before outputting
|
||||
// the right one, and pass this number to the skip_matrices parameter of find_poseidon_ark_and_mds function in order to output the correct one.
|
||||
// Poseidon reference implementation: https://extgit.iaik.tugraz.at/krypto/hadeshash/-/blob/master/code/generate_parameters_grain.sage (algorithm_1, algorithm_2, algorithm_3)
|
||||
|
||||
// The following implementation was adapted from https://github.com/arkworks-rs/sponge/blob/7d9b3a474c9ddb62890014aeaefcb142ac2b3776/src/poseidon/grain_lfsr.rs
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use ark_ff::PrimeField;
|
||||
use num_bigint::BigUint;
|
||||
|
||||
pub struct PoseidonGrainLFSR {
|
||||
pub prime_num_bits: u64,
|
||||
pub state: [bool; 80],
|
||||
pub head: usize,
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
impl PoseidonGrainLFSR {
|
||||
pub fn new(
|
||||
is_field: u64,
|
||||
is_sbox_an_inverse: u64,
|
||||
prime_num_bits: u64,
|
||||
state_len: u64,
|
||||
num_full_rounds: u64,
|
||||
num_partial_rounds: u64,
|
||||
) -> Self {
|
||||
let mut state = [false; 80];
|
||||
|
||||
// Only fields are supported for now
|
||||
assert!(is_field == 1);
|
||||
|
||||
// b0, b1 describes the field
|
||||
state[1] = is_field == 1;
|
||||
|
||||
assert!(is_sbox_an_inverse == 0 || is_sbox_an_inverse == 1);
|
||||
|
||||
// b2, ..., b5 describes the S-BOX
|
||||
state[5] = is_sbox_an_inverse == 1;
|
||||
|
||||
// b6, ..., b17 are the binary representation of n (prime_num_bits)
|
||||
{
|
||||
let mut cur = prime_num_bits;
|
||||
for i in (6..=17).rev() {
|
||||
state[i] = cur & 1 == 1;
|
||||
cur >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// b18, ..., b29 are the binary representation of t (state_len, rate + capacity)
|
||||
{
|
||||
let mut cur = state_len;
|
||||
for i in (18..=29).rev() {
|
||||
state[i] = cur & 1 == 1;
|
||||
cur >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// b30, ..., b39 are the binary representation of R_F (the number of full rounds)
|
||||
{
|
||||
let mut cur = num_full_rounds;
|
||||
for i in (30..=39).rev() {
|
||||
state[i] = cur & 1 == 1;
|
||||
cur >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// b40, ..., b49 are the binary representation of R_P (the number of partial rounds)
|
||||
{
|
||||
let mut cur = num_partial_rounds;
|
||||
for i in (40..=49).rev() {
|
||||
state[i] = cur & 1 == 1;
|
||||
cur >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// b50, ..., b79 are set to 1
|
||||
for item in state.iter_mut().skip(50) {
|
||||
*item = true;
|
||||
}
|
||||
|
||||
let head = 0;
|
||||
|
||||
let mut res = Self {
|
||||
prime_num_bits,
|
||||
state,
|
||||
head,
|
||||
};
|
||||
res.init();
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_bits(&mut self, num_bits: usize) -> Vec<bool> {
|
||||
let mut res = Vec::new();
|
||||
|
||||
for _ in 0..num_bits {
|
||||
// Obtain the first bit
|
||||
let mut new_bit = self.update();
|
||||
|
||||
// Loop until the first bit is true
|
||||
while !new_bit {
|
||||
// Discard the second bit
|
||||
let _ = self.update();
|
||||
// Obtain another first bit
|
||||
new_bit = self.update();
|
||||
}
|
||||
|
||||
// Obtain the second bit
|
||||
res.push(self.update());
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_field_elements_rejection_sampling<F: PrimeField>(
|
||||
&mut self,
|
||||
num_elems: usize,
|
||||
) -> Vec<F> {
|
||||
assert_eq!(F::MODULUS_BIT_SIZE as u64, self.prime_num_bits);
|
||||
let modulus: BigUint = F::MODULUS.into();
|
||||
|
||||
let mut res = Vec::new();
|
||||
for _ in 0..num_elems {
|
||||
// Perform rejection sampling
|
||||
loop {
|
||||
// Obtain n bits and make it most-significant-bit first
|
||||
let mut bits = self.get_bits(self.prime_num_bits as usize);
|
||||
bits.reverse();
|
||||
|
||||
let bytes = bits
|
||||
.chunks(8)
|
||||
.map(|chunk| {
|
||||
let mut result = 0u8;
|
||||
for (i, bit) in chunk.iter().enumerate() {
|
||||
result |= u8::from(*bit) << i
|
||||
}
|
||||
result
|
||||
})
|
||||
.collect::<Vec<u8>>();
|
||||
|
||||
let value = BigUint::from_bytes_le(&bytes);
|
||||
|
||||
if value < modulus {
|
||||
res.push(F::from(value.clone()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn get_field_elements_mod_p<F: PrimeField>(&mut self, num_elems: usize) -> Vec<F> {
|
||||
assert_eq!(F::MODULUS_BIT_SIZE as u64, self.prime_num_bits);
|
||||
|
||||
let mut res = Vec::new();
|
||||
for _ in 0..num_elems {
|
||||
// Obtain n bits and make it most-significant-bit first
|
||||
let mut bits = self.get_bits(self.prime_num_bits as usize);
|
||||
bits.reverse();
|
||||
|
||||
let bytes = bits
|
||||
.chunks(8)
|
||||
.map(|chunk| {
|
||||
let mut result = 0u8;
|
||||
for (i, bit) in chunk.iter().enumerate() {
|
||||
result |= u8::from(*bit) << i
|
||||
}
|
||||
result
|
||||
})
|
||||
.collect::<Vec<u8>>();
|
||||
|
||||
res.push(F::from_le_bytes_mod_order(&bytes));
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update(&mut self) -> bool {
|
||||
let new_bit = self.state[(self.head + 62) % 80]
|
||||
^ self.state[(self.head + 51) % 80]
|
||||
^ self.state[(self.head + 38) % 80]
|
||||
^ self.state[(self.head + 23) % 80]
|
||||
^ self.state[(self.head + 13) % 80]
|
||||
^ self.state[self.head];
|
||||
self.state[self.head] = new_bit;
|
||||
self.head += 1;
|
||||
self.head %= 80;
|
||||
|
||||
new_bit
|
||||
}
|
||||
|
||||
fn init(&mut self) {
|
||||
for _ in 0..160 {
|
||||
let new_bit = self.state[(self.head + 62) % 80]
|
||||
^ self.state[(self.head + 51) % 80]
|
||||
^ self.state[(self.head + 38) % 80]
|
||||
^ self.state[(self.head + 23) % 80]
|
||||
^ self.state[(self.head + 13) % 80]
|
||||
^ self.state[self.head];
|
||||
self.state[self.head] = new_bit;
|
||||
self.head += 1;
|
||||
self.head %= 80;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_poseidon_ark_and_mds<F: PrimeField>(
|
||||
is_field: u64,
|
||||
is_sbox_an_inverse: u64,
|
||||
prime_bits: u64,
|
||||
rate: usize,
|
||||
full_rounds: u64,
|
||||
partial_rounds: u64,
|
||||
skip_matrices: usize,
|
||||
) -> (Vec<F>, Vec<Vec<F>>) {
|
||||
let mut lfsr = PoseidonGrainLFSR::new(
|
||||
is_field,
|
||||
is_sbox_an_inverse,
|
||||
prime_bits,
|
||||
rate as u64,
|
||||
full_rounds,
|
||||
partial_rounds,
|
||||
);
|
||||
|
||||
let mut ark = Vec::<F>::with_capacity((full_rounds + partial_rounds) as usize);
|
||||
for _ in 0..(full_rounds + partial_rounds) {
|
||||
let values = lfsr.get_field_elements_rejection_sampling::<F>(rate);
|
||||
for el in values {
|
||||
ark.push(el);
|
||||
}
|
||||
}
|
||||
|
||||
let mut mds = Vec::<Vec<F>>::with_capacity(rate);
|
||||
mds.resize(rate, vec![F::zero(); rate]);
|
||||
|
||||
// Note that we build the MDS matrix generating 2*rate elements. If the matrix built is not secure (see checks with algorithm 1, 2, 3 in reference implementation)
|
||||
// it has to be skipped. Since here we do not implement such algorithm we allow to pass a parameter to skip generations of elements giving unsecure matrixes.
|
||||
// At the moment, the skip_matrices parameter has to be generated from the reference implementation and passed to this function
|
||||
for _ in 0..skip_matrices {
|
||||
let _ = lfsr.get_field_elements_mod_p::<F>(2 * (rate));
|
||||
}
|
||||
|
||||
// a qualifying matrix must satisfy the following requirements
|
||||
// - there is no duplication among the elements in x or y
|
||||
// - there is no i and j such that x[i] + y[j] = p
|
||||
// - the resultant MDS passes all the three tests
|
||||
|
||||
let xs = lfsr.get_field_elements_mod_p::<F>(rate);
|
||||
let ys = lfsr.get_field_elements_mod_p::<F>(rate);
|
||||
|
||||
for i in 0..(rate) {
|
||||
for (j, ys_item) in ys.iter().enumerate().take(rate) {
|
||||
mds[i][j] = (xs[i] + ys_item).inverse().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
(ark, mds)
|
||||
}
|
||||
141
utils/src/poseidon/poseidon_hash.rs
Normal file
141
utils/src/poseidon/poseidon_hash.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
// This crate implements the Poseidon hash algorithm https://eprint.iacr.org/2019/458.pdf
|
||||
|
||||
// Implementation partially taken from https://github.com/arnaucube/poseidon-rs/blob/233027d6075a637c29ad84a8a44f5653b81f0410/src/lib.rs
|
||||
// and adapted to work over arkworks field traits and custom data structures
|
||||
|
||||
use crate::poseidon_constants::find_poseidon_ark_and_mds;
|
||||
use ark_ff::PrimeField;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RoundParamenters<F: PrimeField> {
|
||||
pub t: usize,
|
||||
pub n_rounds_f: usize,
|
||||
pub n_rounds_p: usize,
|
||||
pub skip_matrices: usize,
|
||||
pub c: Vec<F>,
|
||||
pub m: Vec<Vec<F>>,
|
||||
}
|
||||
|
||||
pub struct Poseidon<F: PrimeField> {
|
||||
round_params: Vec<RoundParamenters<F>>,
|
||||
}
|
||||
impl<F: PrimeField> Poseidon<F> {
|
||||
// Loads round parameters and generates round constants
|
||||
// poseidon_params is a vector containing tuples (t, RF, RP, skip_matrices)
|
||||
// where: t is the rate (input lenght + 1), RF is the number of full rounds, RP is the number of partial rounds
|
||||
// and skip_matrices is a (temporary) parameter used to generate secure MDS matrices (see comments in the description of find_poseidon_ark_and_mds)
|
||||
// TODO: implement automatic generation of round parameters
|
||||
pub fn from(poseidon_params: &[(usize, usize, usize, usize)]) -> Self {
|
||||
let mut read_params = Vec::<RoundParamenters<F>>::new();
|
||||
|
||||
for &(t, n_rounds_f, n_rounds_p, skip_matrices) in poseidon_params {
|
||||
let (ark, mds) = find_poseidon_ark_and_mds::<F>(
|
||||
1, // is_field = 1
|
||||
0, // is_sbox_inverse = 0
|
||||
F::MODULUS_BIT_SIZE as u64,
|
||||
t,
|
||||
n_rounds_f as u64,
|
||||
n_rounds_p as u64,
|
||||
skip_matrices,
|
||||
);
|
||||
let rp = RoundParamenters {
|
||||
t,
|
||||
n_rounds_p,
|
||||
n_rounds_f,
|
||||
skip_matrices,
|
||||
c: ark,
|
||||
m: mds,
|
||||
};
|
||||
read_params.push(rp);
|
||||
}
|
||||
|
||||
Poseidon {
|
||||
round_params: read_params,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_parameters(&self) -> Vec<RoundParamenters<F>> {
|
||||
self.round_params.clone()
|
||||
}
|
||||
|
||||
pub fn ark(&self, state: &mut [F], c: &[F], it: usize) {
|
||||
for i in 0..state.len() {
|
||||
state[i] += c[it + i];
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sbox(&self, n_rounds_f: usize, n_rounds_p: usize, state: &mut [F], i: usize) {
|
||||
if (i < n_rounds_f / 2) || (i >= n_rounds_f / 2 + n_rounds_p) {
|
||||
for current_state in &mut state.iter_mut() {
|
||||
let aux = *current_state;
|
||||
*current_state *= *current_state;
|
||||
*current_state *= *current_state;
|
||||
*current_state *= aux;
|
||||
}
|
||||
} else {
|
||||
let aux = state[0];
|
||||
state[0] *= state[0];
|
||||
state[0] *= state[0];
|
||||
state[0] *= aux;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mix(&self, state: &[F], m: &[Vec<F>]) -> Vec<F> {
|
||||
let mut new_state: Vec<F> = Vec::new();
|
||||
for i in 0..state.len() {
|
||||
new_state.push(F::zero());
|
||||
for (j, state_item) in state.iter().enumerate() {
|
||||
let mut mij = m[i][j];
|
||||
mij *= state_item;
|
||||
new_state[i] += mij;
|
||||
}
|
||||
}
|
||||
new_state.clone()
|
||||
}
|
||||
|
||||
pub fn hash(&self, inp: Vec<F>) -> Result<F, String> {
|
||||
// Note that the rate t becomes input lenght + 1, hence for lenght N we pick parameters with T = N + 1
|
||||
let t = inp.len() + 1;
|
||||
|
||||
// We seek the index (Poseidon's round_params is an ordered vector) for the parameters corresponding to t
|
||||
let param_index = self.round_params.iter().position(|el| el.t == t);
|
||||
|
||||
if inp.is_empty() || param_index.is_none() {
|
||||
return Err("No parameters found for inputs length".to_string());
|
||||
}
|
||||
|
||||
let param_index = param_index.unwrap();
|
||||
|
||||
let mut state = vec![F::zero(); t];
|
||||
state[1..].clone_from_slice(&inp);
|
||||
|
||||
for i in 0..(self.round_params[param_index].n_rounds_f
|
||||
+ self.round_params[param_index].n_rounds_p)
|
||||
{
|
||||
self.ark(
|
||||
&mut state,
|
||||
&self.round_params[param_index].c,
|
||||
i * self.round_params[param_index].t,
|
||||
);
|
||||
self.sbox(
|
||||
self.round_params[param_index].n_rounds_f,
|
||||
self.round_params[param_index].n_rounds_p,
|
||||
&mut state,
|
||||
i,
|
||||
);
|
||||
state = self.mix(&state, &self.round_params[param_index].m);
|
||||
}
|
||||
|
||||
Ok(state[0])
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Default for Poseidon<F>
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
// Default instantiation has no round constants set. Will return an error when hashing is attempted.
|
||||
fn default() -> Self {
|
||||
Self::from(&[])
|
||||
}
|
||||
}
|
||||
338
utils/tests/merkle_tree.rs
Normal file
338
utils/tests/merkle_tree.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
// Tests adapted from https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use hex_literal::hex;
|
||||
use std::{fmt::Display, str::FromStr};
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
use zerokit_utils::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleProof, ZerokitMerkleTree,
|
||||
};
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
struct Keccak256;
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Debug, Default)]
|
||||
struct TestFr([u8; 32]);
|
||||
|
||||
impl Hasher for Keccak256 {
|
||||
type Fr = TestFr;
|
||||
|
||||
fn default_leaf() -> Self::Fr {
|
||||
TestFr([0; 32])
|
||||
}
|
||||
|
||||
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
|
||||
let mut output = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
for element in inputs {
|
||||
hasher.update(element.0.as_slice());
|
||||
}
|
||||
hasher.finalize(&mut output);
|
||||
TestFr(output)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for TestFr {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", hex::encode(self.0.as_slice()))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for TestFr {
|
||||
type Err = std::string::FromUtf8Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(TestFr(s.as_bytes().try_into().unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for TestFr {
|
||||
fn from(value: u32) -> Self {
|
||||
let mut bytes: Vec<u8> = vec![0; 28];
|
||||
bytes.extend_from_slice(&value.to_be_bytes());
|
||||
TestFr(bytes.as_slice().try_into().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_DEPTH: usize = 2;
|
||||
|
||||
fn default_full_merkle_tree(depth: usize) -> FullMerkleTree<Keccak256> {
|
||||
FullMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), FullMerkleConfig::default())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn default_optimal_merkle_tree(depth: usize) -> OptimalMerkleTree<Keccak256> {
|
||||
OptimalMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), OptimalMerkleConfig::default())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_root() {
|
||||
let default_tree_root = TestFr(hex!(
|
||||
"b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"
|
||||
));
|
||||
|
||||
let roots = [
|
||||
hex!("c1ba1812ff680ce84c1d5b4f1087eeb08147a4d510f3496b2849df3a73f5af95"),
|
||||
hex!("893760ec5b5bee236f29e85aef64f17139c3c1b7ff24ce64eb6315fca0f2485b"),
|
||||
hex!("222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c"),
|
||||
hex!("a9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36"),
|
||||
]
|
||||
.map(TestFr);
|
||||
|
||||
let nof_leaves = 4;
|
||||
let leaves: Vec<TestFr> = (1..=nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree.root(), default_tree_root);
|
||||
for i in 0..nof_leaves {
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree.root(), roots[i]);
|
||||
}
|
||||
|
||||
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree.root(), default_tree_root);
|
||||
for i in 0..nof_leaves {
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree.root(), roots[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_empty_leaves_indices() {
|
||||
let depth = 4;
|
||||
let nof_leaves: usize = 1 << (depth - 1);
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
let leaves_2: Vec<TestFr> = (0u32..2).map(TestFr::from).collect();
|
||||
let leaves_4: Vec<TestFr> = (0u32..4).map(TestFr::from).collect();
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(depth);
|
||||
let _ = tree_full.set_range(0, leaves.clone());
|
||||
assert!(tree_full.get_empty_leaves_indices().is_empty());
|
||||
|
||||
let mut vec_idxs = Vec::new();
|
||||
for i in 0..nof_leaves {
|
||||
vec_idxs.push(i);
|
||||
let _ = tree_full.delete(i);
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
|
||||
for i in (0..nof_leaves).rev() {
|
||||
vec_idxs.pop();
|
||||
let _ = tree_full.set(i, leaves[i]);
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
|
||||
// Check situation when the number of items to insert is less than the number of items to delete
|
||||
tree_full
|
||||
.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
|
||||
// check if the indexes for write and delete are the same
|
||||
tree_full
|
||||
.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![]);
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_full
|
||||
.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree_full
|
||||
.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1]);
|
||||
|
||||
//// Optimal Merkle Tree Trest
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
let _ = tree_opt.set_range(0, leaves.clone());
|
||||
assert!(tree_opt.get_empty_leaves_indices().is_empty());
|
||||
|
||||
let mut vec_idxs = Vec::new();
|
||||
for i in 0..nof_leaves {
|
||||
vec_idxs.push(i);
|
||||
let _ = tree_opt.delete(i);
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
for i in (0..nof_leaves).rev() {
|
||||
vec_idxs.pop();
|
||||
let _ = tree_opt.set(i, leaves[i]);
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
|
||||
// Check situation when the number of items to insert is less than the number of items to delete
|
||||
tree_opt
|
||||
.override_range(0, leaves_2.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
|
||||
// check if the indexes for write and delete are the same
|
||||
tree_opt
|
||||
.override_range(0, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![]);
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_opt
|
||||
.override_range(4, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree_opt
|
||||
.override_range(2, leaves_4.clone(), [0, 1, 2, 3])
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subtree_root() {
|
||||
let depth = 3;
|
||||
let nof_leaves: usize = 6;
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree_full = default_optimal_merkle_tree(depth);
|
||||
let _ = tree_full.set_range(0, leaves.iter().cloned());
|
||||
|
||||
for i in 0..nof_leaves {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree_full.get(i).unwrap(),
|
||||
tree_full.get_subtree_root(depth, i).unwrap()
|
||||
);
|
||||
|
||||
// check root
|
||||
assert_eq!(tree_full.root(), tree_full.get_subtree_root(0, i).unwrap());
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
for n in (1..=depth).rev() {
|
||||
for i in (0..(1 << n)).step_by(2) {
|
||||
let idx_l = i * (1 << (depth - n));
|
||||
let idx_r = (i + 1) * (1 << (depth - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree_full.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree_full.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree_full.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
|
||||
// check intermediate nodes
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
|
||||
}
|
||||
}
|
||||
|
||||
let mut tree_opt = default_full_merkle_tree(depth);
|
||||
let _ = tree_opt.set_range(0, leaves.iter().cloned());
|
||||
|
||||
for i in 0..nof_leaves {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree_opt.get(i).unwrap(),
|
||||
tree_opt.get_subtree_root(depth, i).unwrap()
|
||||
);
|
||||
// check root
|
||||
assert_eq!(tree_opt.root(), tree_opt.get_subtree_root(0, i).unwrap());
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
for n in (1..=depth).rev() {
|
||||
for i in (0..(1 << n)).step_by(2) {
|
||||
let idx_l = i * (1 << (depth - n));
|
||||
let idx_r = (i + 1) * (1 << (depth - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree_opt.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree_opt.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree_opt.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
|
||||
// check intermediate nodes
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof() {
|
||||
let nof_leaves = 4;
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
// We thest the FullMerkleTree implementation
|
||||
let mut tree = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
for i in 0..nof_leaves {
|
||||
// We set the leaves
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree.proof(i).expect("index should be set");
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree.verify(&leaves[i], &proof).unwrap());
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree.verify(&leaves[(i + 1) % nof_leaves], &proof).unwrap());
|
||||
}
|
||||
|
||||
// We test the OptimalMerkleTree implementation
|
||||
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
for i in 0..nof_leaves {
|
||||
// We set the leaves
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree.proof(i).expect("index should be set");
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree.verify(&leaves[i], &proof).unwrap());
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree.verify(&leaves[(i + 1) % nof_leaves], &proof).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_override_range() {
|
||||
let nof_leaves = 4;
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
|
||||
// We set the leaves
|
||||
tree.set_range(0, leaves.iter().cloned()).unwrap();
|
||||
|
||||
let new_leaves = [
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000006"),
|
||||
]
|
||||
.map(TestFr);
|
||||
|
||||
let to_delete_indices: [usize; 2] = [0, 1];
|
||||
|
||||
// We override the leaves
|
||||
tree.override_range(
|
||||
0, // start from the end of the initial leaves
|
||||
new_leaves.iter().cloned(),
|
||||
to_delete_indices.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// ensure that the leaves are set correctly
|
||||
for (i, &new_leaf) in new_leaves.iter().enumerate() {
|
||||
assert_eq!(tree.get_leaf(i), new_leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
3542
utils/tests/poseidon_constants.rs
Normal file
3542
utils/tests/poseidon_constants.rs
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user