Compare commits

...

30 Commits

Author SHA1 Message Date
Richard Ramos
7bc85dfa19 feat: expose hash, poseidon_hash and delete_leaf 2023-04-28 07:23:20 -04:00
tyshko-rostyslav
4f98fd8028 chore(rln): bring hash functions under a single module (#146) 2023-04-20 16:24:29 +05:30
tyshko-rostyslav
9931e901e5 most changes (#145)
Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-04-13 06:45:12 +05:30
Aaryamann Challani
0fb7e0bbcb feat: abstract shared behaviour into ZerokitMerkleTree (#142)
* feat: abstract shared behaviour into ZerokitMerkleTree

* fix: tests
2023-04-11 16:46:13 +05:30
tyshko-rostyslav
672287b77b call_bool_method_with_error_msg (#144)
Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-04-10 19:45:16 +05:30
Aaryamann Challani
2e868d6cbf fix(ci): force draft=false for nightly releases (#143) 2023-03-31 18:15:37 +05:30
tyshko-rostyslav
39bea35a6d Macro to call functions with an error message with output (#141)
Another variation of our call, this time when output is used
2023-03-31 14:44:04 +02:00
tyshko-rostyslav
6ff4eeb237 Macro to call functions with an error message (#140)
abstract out calls

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-03-29 15:16:36 +02:00
Aaryamann Challani
1f983bb232 fix(rln): move std::path to cfg_if block (#138) 2023-03-24 09:31:01 +05:30
tyshko-rostyslav
13a2c61355 add wasm-strip to reduce size even more (#137)
* added wasm-strip fixed docs

* requested change

* fix installdeps

* fix ubuntu

* fix macos

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-03-24 09:30:48 +05:30
tyshko-rostyslav
2bbb710e83 add Cargo.lock to the repo (#136)
add Cargo.lock to the repo
2023-03-23 07:45:36 +01:00
tyshko-rostyslav
8cd4baba8a leave our fork of ark-circom (#132)
* leave our fork of `ark-circom`

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-03-22 07:01:24 +01:00
Aaryamann Challani
9045e31006 fix ci tag (#133)
* fix(ci): release tag

* fix: use 0.2.1
2023-03-20 17:47:46 +05:30
Aaryamann Challani
9e44bb64dc fix(semaphore): use fixed rev (#130) 2023-03-20 14:06:25 +05:30
Aaryamann Challani
bb7dfb80ee feat(ci): cross-compile release assets, cache deps (#128)
* feat(ci): cross-compile release assets, cache deps

chore(ci): add caching to regular tests

* fix(ci): include cross only in ci env, add note about release assets
2023-03-14 17:44:06 +05:30
Aaryamann Challani
c319f32a1e feat(rln): package rln w/ resources into a static lib (#118)
* feat(rln): package resources into lib

* fix(rln): use Path

* fix(rln): fmt

* fix(rln): trailing slash
2023-03-07 18:15:06 +05:30
tyshko-rostyslav
bf2aa16a71 chore(rln): ensure all dependencies have fixed revision (#127) 2023-03-07 09:11:08 +05:30
tyshko-rostyslav
c423bdea61 chore(rln): update pmtree implementation (#125)
* most changes

* fmt

* hide tests back under feature

* grooming

* changed `SledConfig`

* requested change: rm `dbpath`

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-03-03 10:33:39 +05:30
Carlos Pérez
5eb98d4b33 change: Replace u64 for usize for length vars (#94)
* change: Replace `u64` for `usize` for length vars

Resolves: #39

* fix(rln): usize instead of u64 in tests

* fix(rln): linter

* fix: outlier u64 usage

* fix(rln|rln-wasm): serde of usize types

---------

Co-authored-by: Aaryamann Challani <43716372+rymnc@users.noreply.github.com>
2023-03-02 19:15:58 +05:30
Aaryamann Challani
b698153e28 fix(ci): nightly release (#124) 2023-03-02 07:41:49 +05:30
Aaryamann Challani
a6c8090c93 feat(v0.2): changelog (#122) 2023-02-28 16:25:18 +05:30
tyshko-rostyslav
7ee7675d52 Redundunt dependencies (#111)
* most changes

* delete unused deps + update ark-circom

* fix build

* revert

* default deatures

* return

* кумуке 2

* try

* rm

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-02-28 00:19:47 +05:30
Aaryamann Challani
062055dc5e fix(license): add licensing info to cargo.toml (#121) 2023-02-27 11:47:55 +05:30
tyshko-rostyslav
55b00fd653 Code quality (#114)
* to color_eyre::Result 1st part

* tests and seconds batch

* third batch

* rln fixes + multiplier

* rln-wasm, assert rln, multiplier

* io to color_eyre

* fmt + clippy

* fix lint

* temporary fix of `ark-circom`

* fix ci after merge

* fmt

* fix rln tests

* minor

* fix tests

* imports

* requested change

* report + commented line + requested change

* requested changes

* fix build

* lint fixes

* better comments

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-02-27 11:46:16 +05:30
Rahul Ghangas
62018b4eba Update documentation for building and testing (#120)
* chore: fix Makefile pre-build

* chore: add Makefile command to install depenedncy cargo-make

* chore: update all READMEs with instructions to install dependencies, build and test

* chore: add target to fetch all submodules
2023-02-24 11:50:51 +05:30
oskarth
48fa1b9b3d chore: Add MIT/Apache dual license (#119) 2023-02-24 11:20:01 +08:00
Aaryamann Challani
a6145ab201 feat(rln): expose poseidon to ffi (#112) 2023-02-16 13:26:13 +05:30
Aaryamann Challani
e21e9954ac fix(semaphore): revert ark-circom dependency (#116) 2023-02-16 12:32:43 +05:30
Carlos Pérez
de5eb2066a change: Replace profile overwrites to Workspace Cargo.toml (#95)
Since profile info specified inside workspace members `Cargo.toml`'s
is ignored by Cargo, this replaces the place to specify these details
for the workspace-level `Cargo.toml`.

NOTE that `panic` and `rpath` aren't supported with the Overwritting
feature. Therefore, the only required thing (if considered necessary) is
to create a new profile which also enables these things.

Resolves: #93
2023-02-08 12:38:42 +01:00
tyshko-rostyslav
7aba62ff51 Add rust-clippy to CI (#108)
Convert clippy warnings to errors, fix them 

---------

Co-authored-by: tyshkor <tyshko1@gmail.com>
2023-02-06 05:54:59 +01:00
51 changed files with 6174 additions and 1623 deletions

View File

@@ -46,10 +46,9 @@ jobs:
profile: minimal
toolchain: stable
override: true
- name: Update git submodules
run: git submodule update --init --recursive
- name: Install cargo-make
run: cargo install cargo-make
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cargo-make test
run: |
cargo make test --release
@@ -72,10 +71,9 @@ jobs:
profile: minimal
toolchain: stable
override: true
- name: Update git submodules
run: git submodule update --init --recursive
- name: Install cargo-make
run: cargo install cargo-make
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
run: make installdeps
- name: cargo-make test
run: |
cargo make test --release
@@ -97,10 +95,11 @@ jobs:
profile: minimal
toolchain: stable
override: true
- run: git submodule update --init --recursive
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
run: make installdeps
- name: Install wasm-pack
uses: jetli/wasm-pack-action@v0.3.0
- run: cargo install cargo-make
- run: cargo make build
working-directory: rln-wasm
- run: cargo-make test
@@ -123,10 +122,9 @@ jobs:
profile: minimal
toolchain: stable
override: true
- name: Update git submodules
run: git submodule update --init --recursive
- name: Install cargo-make
run: cargo install cargo-make
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
run: make installdeps
- name: cargo-make test
run: |
cargo make test --release
@@ -149,10 +147,9 @@ jobs:
profile: minimal
toolchain: stable
override: true
- name: Update git submodules
run: git submodule update --init --recursive
- name: Install cargo-make
run: cargo install cargo-make
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
run: make installdeps
- name: cargo-make test
run: |
cargo make test --release
@@ -177,30 +174,31 @@ jobs:
toolchain: stable
override: true
components: rustfmt, clippy
- name: Update git submodules
run: git submodule update --init --recursive
- uses: Swatinem/rust-cache@v2
- name: Install Dependencies
run: make installdeps
- name: cargo fmt
if: success() || failure()
run: cargo fmt --all -- --check
- name: multiplier - cargo clippy
if: success() || failure()
run: |
cargo clippy --release
cargo clippy --release -- -D warnings
working-directory: multiplier
- name: semaphore - cargo clippy
if: success() || failure()
run: |
cargo clippy --release
cargo clippy --release -- -D warnings
working-directory: semaphore
- name: rln - cargo clippy
if: success() || failure()
run: |
cargo clippy --release
cargo clippy --release -- -D warnings
working-directory: rln
- name: utils - cargo clippy
if: success() || failure()
run: |
cargo clippy --release
cargo clippy --release -- -D warnings
working-directory: utils
# We skip clippy on rln-wasm, since wasm target is managed by cargo make
# Currently not treating warnings as error, too noisy

View File

@@ -6,6 +6,12 @@ on:
jobs:
linux:
strategy:
matrix:
target:
- x86_64-unknown-linux-gnu
- aarch64-unknown-linux-gnu
- i686-unknown-linux-gnu
name: Linux build
runs-on: ubuntu-latest
steps:
@@ -17,50 +23,57 @@ jobs:
profile: minimal
toolchain: stable
override: true
- name: Update git submodules
run: git submodule update --init --recursive
- name: cargo build
target: ${{ matrix.target }}
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cross build
run: |
cargo build --release --workspace --exclude rln-wasm
cross build --release --target ${{ matrix.target }} --workspace --exclude rln-wasm
mkdir release
cp target/release/librln* release/
tar -czvf linux-rln.tar.gz release/
cp target/${{ matrix.target }}/release/librln* release/
tar -czvf ${{ matrix.target }}-rln.tar.gz release/
- name: Upload archive artifact
uses: actions/upload-artifact@v2
with:
name: linux-archive
path: linux-rln.tar.gz
name: ${{ matrix.target }}-archive
path: ${{ matrix.target }}-rln.tar.gz
retention-days: 2
macos:
name: MacOS build
runs-on: macos-latest
strategy:
matrix:
target:
- x86_64-apple-darwin
- aarch64-apple-darwin
steps:
- name: Checkout sources
uses: actions/checkout@v2
with:
ref: master
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Update git submodules
run: git submodule update --init --recursive
- name: cargo build
target: ${{ matrix.target }}
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: cross build
run: |
cargo build --release --workspace --exclude rln-wasm
cross build --release --target ${{ matrix.target }} --workspace --exclude rln-wasm
mkdir release
cp target/release/librln* release/
tar -czvf macos-rln.tar.gz release/
cp target/${{ matrix.target }}/release/librln* release/
tar -czvf ${{ matrix.target }}-rln.tar.gz release/
- name: Upload archive artifact
uses: actions/upload-artifact@v2
with:
name: macos-archive
path: macos-rln.tar.gz
name: ${{ matrix.target }}-archive
path: ${{ matrix.target }}-rln.tar.gz
retention-days: 2
browser-rln-wasm:
@@ -69,22 +82,20 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v2
with:
ref: master
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: git submodule update --init --recursive
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
run: make installdeps
- name: Install wasm-pack
uses: jetli/wasm-pack-action@v0.3.0
- name: Install cargo-make
run: cargo install cargo-make
- name: cargo make build
- name: cross make build
run: |
cargo make build
cross make build
mkdir release
cp pkg/** release/
tar -czvf browser-rln-wasm.tar.gz release/
@@ -111,7 +122,7 @@ jobs:
uses: actions/download-artifact@v2
- name: Delete tag
uses: dev-drprasad/delete-tag-and-release@v0.2.0
uses: dev-drprasad/delete-tag-and-release@v0.2.1
with:
delete_release: true
tag_name: nightly
@@ -120,14 +131,13 @@ jobs:
- name: Create prerelease
run: |
start_tag=$(gh release list -L 2 --exclude-drafts | grep -v nightly | cut -d$'\t' -f3)
start_tag=$(gh release list -L 2 --exclude-drafts | grep -v nightly | cut -d$'\t' -f3 | sed -n '1p')
gh release create nightly --prerelease --target master \
--title 'Nightly build ("master" branch)' \
--generate-notes \
--draft=false \
--notes-start-tag $start_tag \
linux-archive/linux-rln.tar.gz \
macos-archive/macos-rln.tar.gz \
browser-rln-wasm-archive/browser-rln-wasm.tar.gz
*-archive/*.tar.gz \
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -136,6 +146,4 @@ jobs:
with:
failOnError: false
name: |
linux-archive
macos-archive
browser-rln-wasm-archive
*-archive

5
.gitignore vendored
View File

@@ -8,10 +8,7 @@ rln/pmtree_db
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
wabt/
# These are backup files generated by rustfmt
**/*.rs.bk

View File

@@ -1,18 +1,15 @@
## Upcoming release
## 2023-02-28 v0.2
Release highlights:
This release contains:
- Improved code quality
- Allows consumers of zerokit RLN to set leaves to the Merkle Tree from an arbitrary index. Useful for batching updates to the Merkle Tree.
- Improved performance for proof generation and verification
- rln_wasm which allows for the consumption of RLN through a WebAssembly interface
- Refactored to generate Semaphore-compatible credentials
- Dual License under Apache 2.0 and MIT
- RLN compiles as a static library, which can be consumed through a C FFI
The full list of changes is below.
### Features
- Creation of `set_leaves_from`, which allows consumers to add leaves to a tree from a given starting index. `init_tree_with_leaves` internally uses `set_leaves_from`, with index 0.
### Changes
- Renaming of `set_leaves` to `init_tree_with_leaves`, which is a more accurate representation of the function's utility.
### Fixes
- None
## 2022-09-19 v0.1

3803
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,3 +7,16 @@ members = [
"rln-wasm",
"utils",
]
# Compilation profile for any non-workspace member.
# Dependencies are optimized, even in a dev build. This improves dev performance
# while having neglible impact on incremental build times.
[profile.dev.package."*"]
opt-level = 3
[profile.release.package."rln-wasm"]
# Tell `rustc` to optimize for small code size.
opt-level = "s"
[profile.release.package."semaphore"]
codegen-units = 1

32
Cross.toml Normal file
View File

@@ -0,0 +1,32 @@
[target.x86_64-pc-windows-gnu]
image = "ghcr.io/cross-rs/x86_64-pc-windows-gnu:latest"
[target.aarch64-unknown-linux-gnu]
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:latest"
[target.x86_64-unknown-linux-gnu]
image = "ghcr.io/cross-rs/x86_64-unknown-linux-gnu:latest"
[target.arm-unknown-linux-gnueabi]
image = "ghcr.io/cross-rs/arm-unknown-linux-gnueabi:latest"
[target.i686-pc-windows-gnu]
image = "ghcr.io/cross-rs/i686-pc-windows-gnu:latest"
[target.i686-unknown-linux-gnu]
image = "ghcr.io/cross-rs/i686-unknown-linux-gnu:latest"
[target.arm-unknown-linux-gnueabihf]
image = "ghcr.io/cross-rs/arm-unknown-linux-gnueabihf:latest"
[target.mips-unknown-linux-gnu]
image = "ghcr.io/cross-rs/mips-unknown-linux-gnu:latest"
[target.mips64-unknown-linux-gnuabi64]
image = "ghcr.io/cross-rs/mips64-unknown-linux-gnuabi64:latest"
[target.mips64el-unknown-linux-gnuabi64]
image = "ghcr.io/cross-rs/mips64el-unknown-linux-gnuabi64:latest"
[target.mipsel-unknown-linux-gnu]
image = "ghcr.io/cross-rs/mipsel-unknown-linux-gnu:latest"

203
LICENSE-APACHE Normal file
View File

@@ -0,0 +1,203 @@
Copyright (c) 2022 Vac Research
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
LICENSE-MIT Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2022 Vac Research
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE O THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@@ -1,13 +1,30 @@
.PHONY: all test clean
.PHONY: all installdeps build test clean
all: .pre-build
@cargo make build
all: .pre-build build
.pre-build:
ifndef $(cargo make --help)
@cargo install --force cargo-make
.fetch-submodules:
@git submodule update --init --recursive
.pre-build: .fetch-submodules
@cargo install cargo-make
ifdef CI
@cargo install cross --git https://github.com/cross-rs/cross --branch main
endif
installdeps: .pre-build
ifeq ($(shell uname),Darwin)
@brew update
@brew install cmake ninja
else ifeq ($(shell uname),Linux)
@sudo apt-get update
@sudo apt-get install -y cmake ninja-build
endif
@git clone --recursive https://github.com/WebAssembly/wabt.git
@cd wabt && mkdir build && cd build && cmake .. -GNinja && ninja && sudo ninja install
build: .pre-build
@cargo make build
test: .pre-build
@cargo make test

View File

@@ -17,3 +17,19 @@ in Rust.
- [RLN library](https://github.com/kilic/rln) written in Rust based on Bellman.
- [semaphore-rs](https://github.com/worldcoin/semaphore-rs) written in Rust based on ark-circom.
## Build and Test
To install missing dependencies, run the following commands from the root folder
```bash
make installdeps
```
To build and test all crates, run the following commands from the root folder
```bash
make build
make test
```
## Release assets
We use [`cross-rs`](https://github.com/cross-rs/cross) to cross-compile and generate release assets for rln.

View File

@@ -2,6 +2,7 @@
name = "multiplier"
version = "0.1.0"
edition = "2018"
license = "MIT OR Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -12,23 +13,19 @@ edition = "2018"
# fnv = { version = "1.0.3", default-features = false }
# num = { version = "0.4.0" }
# num-traits = { version = "0.2.0", default-features = false }
num-bigint = { version = "0.4", default-features = false, features = ["rand"] }
# ZKP Generation
ark-ec = { version = "0.3.0", default-features = false, features = ["parallel"] }
# ark-ff = { version = "0.3.0", default-features = false, features = ["parallel", "asm"] }
ark-std = { version = "0.3.0", default-features = false, features = ["parallel"] }
ark-bn254 = { version = "0.3.0" }
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", features = ["parallel"] }
# ark-poly = { version = "^0.3.0", default-features = false, features = ["parallel"] }
ark-relations = { version = "0.3.0", default-features = false }
ark-serialize = { version = "0.3.0", default-features = false }
ark-circom = { git = "https://github.com/gakonst/ark-circom", features = ["circom-2"] }
ark-circom = { git = "https://github.com/gakonst/ark-circom", features = ["circom-2"], rev = "35ce5a9" }
# error handling
# thiserror = "1.0.26"
color-eyre = "0.5"
color-eyre = "0.6.1"
# decoding of data
# hex = "0.4.3"

View File

@@ -3,7 +3,15 @@
Example wrapper around a basic Circom circuit to test Circom 2 integration
through ark-circom and FFI.
# FFI
## Build and Test
To build and test, run the following commands within the module folder
```bash
cargo make build
cargo make test
```
## FFI
To generate C or Nim bindings from Rust FFI, use `cbindgen` or `nbindgen`:

View File

@@ -31,12 +31,12 @@ impl<'a> From<&Buffer> for &'a [u8] {
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn new_circuit(ctx: *mut *mut Multiplier) -> bool {
println!("multiplier ffi: new");
let mul = Multiplier::new();
unsafe { *ctx = Box::into_raw(Box::new(mul)) };
true
if let Ok(mul) = Multiplier::new() {
unsafe { *ctx = Box::into_raw(Box::new(mul)) };
true
} else {
false
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]

View File

@@ -1,6 +1,6 @@
use ark_circom::{CircomBuilder, CircomConfig};
use ark_std::rand::thread_rng;
use color_eyre::Result;
use color_eyre::{Report, Result};
use ark_bn254::Bn254;
use ark_groth16::{
@@ -25,17 +25,18 @@ fn groth16_proof_example() -> Result<()> {
let circom = builder.build()?;
let inputs = circom.get_public_inputs().unwrap();
let inputs = circom
.get_public_inputs()
.ok_or(Report::msg("no public inputs"))?;
let proof = prove(circom, &params, &mut rng)?;
let pvk = prepare_verifying_key(&params.vk);
let verified = verify_proof(&pvk, &proof, &inputs)?;
assert!(verified);
Ok(())
match verify_proof(&pvk, &proof, &inputs) {
Ok(_) => Ok(()),
Err(_) => Err(Report::msg("not verified")),
}
}
fn main() {

View File

@@ -7,9 +7,8 @@ use ark_groth16::{
Proof, ProvingKey,
};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
// , SerializationError};
use std::io::{self, Read, Write};
use color_eyre::{Report, Result};
use std::io::{Read, Write};
pub struct Multiplier {
circom: CircomCircuit<Bn254>,
@@ -18,12 +17,11 @@ pub struct Multiplier {
impl Multiplier {
// TODO Break this apart here
pub fn new() -> Multiplier {
pub fn new() -> Result<Multiplier> {
let cfg = CircomConfig::<Bn254>::new(
"./resources/circom2_multiplier2.wasm",
"./resources/circom2_multiplier2.r1cs",
)
.unwrap();
)?;
let mut builder = CircomBuilder::new(cfg);
builder.push_input("a", 3);
@@ -34,40 +32,41 @@ impl Multiplier {
let mut rng = thread_rng();
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng).unwrap();
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng)?;
let circom = builder.build().unwrap();
let circom = builder.build()?;
//let inputs = circom.get_public_inputs().unwrap();
Multiplier { circom, params }
Ok(Multiplier { circom, params })
}
// TODO Input Read
pub fn prove<W: Write>(&self, result_data: W) -> io::Result<()> {
pub fn prove<W: Write>(&self, result_data: W) -> Result<()> {
let mut rng = thread_rng();
// XXX: There's probably a better way to do this
let circom = self.circom.clone();
let params = self.params.clone();
let proof = prove(circom, &params, &mut rng).unwrap();
let proof = prove(circom, &params, &mut rng)?;
// XXX: Unclear if this is different from other serialization(s)
let _ = proof.serialize(result_data).unwrap();
proof.serialize(result_data)?;
Ok(())
}
pub fn verify<R: Read>(&self, input_data: R) -> io::Result<bool> {
let proof = Proof::deserialize(input_data).unwrap();
pub fn verify<R: Read>(&self, input_data: R) -> Result<bool> {
let proof = Proof::deserialize(input_data)?;
let pvk = prepare_verifying_key(&self.params.vk);
// XXX Part of input data?
let inputs = self.circom.get_public_inputs().unwrap();
let inputs = self
.circom
.get_public_inputs()
.ok_or(Report::msg("no public inputs"))?;
let verified = verify_proof(&pvk, &proof, &inputs).unwrap();
let verified = verify_proof(&pvk, &proof, &inputs)?;
Ok(verified)
}
@@ -75,6 +74,6 @@ impl Multiplier {
impl Default for Multiplier {
fn default() -> Self {
Self::new()
Self::new().unwrap()
}
}

View File

@@ -4,8 +4,7 @@ mod tests {
#[test]
fn multiplier_proof() {
let mul = Multiplier::new();
//let inputs = mul.circom.get_public_inputs().unwrap();
let mul = Multiplier::new().unwrap();
let mut output_data: Vec<u8> = Vec::new();
let _ = mul.prove(&mut output_data);

View File

@@ -2,6 +2,7 @@
name = "private-settlement"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -2,3 +2,10 @@
This module is to provide APIs to manage, compute and verify [Private Settlement](https://rfc.vac.dev/spec/44/) zkSNARK proofs and primitives.
## Build and Test
To build and test, run the following commands within the module folder
```bash
cargo make build
cargo make test
```

View File

@@ -1,6 +1,6 @@
[package]
name = "rln-wasm"
version = "0.0.7"
version = "0.0.8"
edition = "2021"
license = "MIT or Apache2"
@@ -31,6 +31,3 @@ console_error_panic_hook = { version = "0.1.7", optional = true }
wasm-bindgen-test = "0.3.13"
wasm-bindgen-futures = "0.4.33"
[profile.release]
# Tell `rustc` to optimize for small code size.
opt-level = "s"

View File

@@ -9,9 +9,14 @@ script = "sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/
clear = true
dependencies = [
"pack-build",
"pack-rename"
"pack-rename",
"post-build"
]
[tasks.post-build]
command = "wasm-strip"
args = ["./pkg/rln_wasm_bg.wasm"]
[tasks.test]
command = "wasm-pack"
args = ["test", "--release", "--node"]

View File

@@ -10,11 +10,22 @@ curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
```
cargo install cargo-make
```
OR
```
make installdeps
```
3. Compile zerokit for `wasm32-unknown-unknown`:
```
cd rln-wasm
cargo make build
```
4. Compile a slimmer version of zerokit for `wasm32-unknown-unknown`:
```
cd rln-wasm
cargo make post-build
```
## Running tests
```

View File

@@ -3,9 +3,11 @@
extern crate wasm_bindgen;
extern crate web_sys;
use std::vec::Vec;
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
use num_bigint::BigInt;
use rln::public::RLN;
use rln::public::{hash, poseidon_hash, RLN};
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
@@ -20,34 +22,196 @@ pub struct RLNWrapper {
instance: RLN<'static>,
}
// Macro to call methods with arbitrary amount of arguments,
// which have the last argument is output buffer pointer
// First argument to the macro is context,
// second is the actual method on `RLN`
// third is the aforementioned output buffer argument
// rest are all other arguments to the method
macro_rules! call_with_output_and_error_msg {
// this variant is needed for the case when
// there are zero other arguments
($instance:expr, $method:ident, $error_msg:expr) => {
{
let mut output_data: Vec<u8> = Vec::new();
let new_instance = $instance.process();
if let Err(err) = new_instance.instance.$method(&mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
($instance:expr, $method:ident, $error_msg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
let new_instance = $instance.process();
if let Err(err) = new_instance.instance.$method($($arg.process()),*, &mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
}
// Macro to call_with_error_msg methods with arbitrary amount of arguments,
// First argument to the macro is context,
// second is the actual method on `RLNWrapper`
// rest are all other arguments to the method
macro_rules! call_with_error_msg {
($instance:expr, $method:ident, $error_msg:expr $(, $arg:expr)*) => {
{
let new_instance: &mut RLNWrapper = $instance.process();
if let Err(err) = new_instance.instance.$method($($arg.process()),*) {
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
Ok(())
}
}
}
}
macro_rules! call {
($instance:expr, $method:ident $(, $arg:expr)*) => {
{
let new_instance: &mut RLNWrapper = $instance.process();
new_instance.instance.$method($($arg.process()),*)
}
}
}
macro_rules! call_bool_method_with_error_msg {
($instance:expr, $method:ident, $error_msg:expr $(, $arg:expr)*) => {
{
let new_instance: &RLNWrapper = $instance.process();
new_instance.instance.$method($($arg.process()),*).map_err(|err| format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
}
}
}
// Macro to execute a function with arbitrary amount of arguments,
// First argument is the function to execute
// Rest are all other arguments to the method
macro_rules! fn_call_with_output_and_error_msg {
// this variant is needed for the case when
// there are zero other arguments
($func:ident, $error_msg:expr) => {
{
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = $func(&mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
($func:ident, $error_msg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
if let Err(err) = $func($($arg.process()),*, &mut output_data) {
std::mem::forget(output_data);
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
} else {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
}
}
};
}
trait ProcessArg {
type ReturnType;
fn process(self) -> Self::ReturnType;
}
impl ProcessArg for usize {
type ReturnType = usize;
fn process(self) -> Self::ReturnType {
self
}
}
impl<T> ProcessArg for Vec<T> {
type ReturnType = Vec<T>;
fn process(self) -> Self::ReturnType {
self
}
}
impl<'a> ProcessArg for *const RLN<'a> {
type ReturnType = &'a RLN<'a>;
fn process(self) -> Self::ReturnType {
unsafe { &*self }
}
}
impl ProcessArg for *const RLNWrapper {
type ReturnType = &'static RLNWrapper;
fn process(self) -> Self::ReturnType {
unsafe { &*self }
}
}
impl ProcessArg for *mut RLNWrapper {
type ReturnType = &'static mut RLNWrapper;
fn process(self) -> Self::ReturnType {
unsafe { &mut *self }
}
}
impl<'a> ProcessArg for &'a [u8] {
type ReturnType = &'a [u8];
fn process(self) -> Self::ReturnType {
self
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = newRLN)]
pub fn wasm_new(tree_height: usize, zkey: Uint8Array, vk: Uint8Array) -> *mut RLNWrapper {
let instance = RLN::new_with_params(tree_height, zkey.to_vec(), vk.to_vec());
pub fn wasm_new(
tree_height: usize,
zkey: Uint8Array,
vk: Uint8Array,
) -> Result<*mut RLNWrapper, String> {
let instance = RLN::new_with_params(tree_height, zkey.to_vec(), vk.to_vec())
.map_err(|err| format!("{:#?}", err))?;
let wrapper = RLNWrapper { instance };
Box::into_raw(Box::new(wrapper))
Ok(Box::into_raw(Box::new(wrapper)))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = getSerializedRLNWitness)]
pub fn wasm_get_serialized_rln_witness(ctx: *mut RLNWrapper, input: Uint8Array) -> Uint8Array {
let wrapper = unsafe { &mut *ctx };
let rln_witness = wrapper
.instance
.get_serialized_rln_witness(&input.to_vec()[..]);
Uint8Array::from(&rln_witness[..])
pub fn wasm_get_serialized_rln_witness(
ctx: *mut RLNWrapper,
input: Uint8Array,
) -> Result<Uint8Array, String> {
let rln_witness = call!(ctx, get_serialized_rln_witness, &input.to_vec()[..])
.map_err(|err| format!("{:#?}", err))?;
Ok(Uint8Array::from(&rln_witness[..]))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = insertMember)]
pub fn wasm_set_next_leaf(ctx: *mut RLNWrapper, input: Uint8Array) -> Result<(), String> {
let wrapper = unsafe { &mut *ctx };
if wrapper.instance.set_next_leaf(&input.to_vec()[..]).is_ok() {
Ok(())
} else {
Err("could not insert member into merkle tree".into())
}
call_with_error_msg!(
ctx,
set_next_leaf,
"could not insert member into merkle tree".to_string(),
&input.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -57,45 +221,42 @@ pub fn wasm_set_leaves_from(
index: usize,
input: Uint8Array,
) -> Result<(), String> {
let wrapper = unsafe { &mut *ctx };
if wrapper
.instance
.set_leaves_from(index as usize, &input.to_vec()[..])
.is_ok()
{
Ok(())
} else {
Err("could not set multiple leaves".into())
}
call_with_error_msg!(
ctx,
set_leaves_from,
"could not set multiple leaves".to_string(),
index,
&*input.to_vec()
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = deleteLeaf)]
pub fn wasm_delete_leaf(ctx: *mut RLNWrapper, index: usize) -> Result<(), String> {
call_with_error_msg!(ctx, delete_leaf, "could not delete leaf".to_string(), index)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = initTreeWithLeaves)]
pub fn wasm_init_tree_with_leaves(ctx: *mut RLNWrapper, input: Uint8Array) -> Result<(), String> {
let wrapper = unsafe { &mut *ctx };
if wrapper
.instance
.init_tree_with_leaves(&input.to_vec()[..])
.is_ok()
{
Ok(())
} else {
Err("could not init merkle tree".into())
}
call_with_error_msg!(
ctx,
init_tree_with_leaves,
"could not init merkle tree".to_string(),
&*input.to_vec()
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = RLNWitnessToJson)]
pub fn rln_witness_to_json(ctx: *mut RLNWrapper, serialized_witness: Uint8Array) -> Object {
let wrapper = unsafe { &mut *ctx };
let inputs = wrapper
.instance
.get_rln_witness_json(&serialized_witness.to_vec()[..])
.unwrap();
let js_value = serde_wasm_bindgen::to_value(&inputs).unwrap();
let obj = Object::from_entries(&js_value);
obj.unwrap()
pub fn rln_witness_to_json(
ctx: *mut RLNWrapper,
serialized_witness: Uint8Array,
) -> Result<Object, String> {
let inputs = call!(ctx, get_rln_witness_json, &serialized_witness.to_vec()[..])
.map_err(|err| err.to_string())?;
let js_value = serde_wasm_bindgen::to_value(&inputs).map_err(|err| err.to_string())?;
Object::from_entries(&js_value).map_err(|err| format!("{:#?}", err))
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -105,83 +266,49 @@ pub fn generate_rln_proof_with_witness(
calculated_witness: Vec<JsBigInt>,
serialized_witness: Uint8Array,
) -> Result<Uint8Array, String> {
let wrapper = unsafe { &mut *ctx };
let mut witness_vec: Vec<BigInt> = vec![];
let witness_vec: Vec<BigInt> = calculated_witness
.iter()
.map(|v| {
for v in calculated_witness {
witness_vec.push(
v.to_string(10)
.unwrap()
.map_err(|err| format!("{:#?}", err))?
.as_string()
.unwrap()
.ok_or("not a string error")?
.parse::<BigInt>()
.unwrap()
})
.collect();
let mut output_data: Vec<u8> = Vec::new();
if wrapper
.instance
.generate_rln_proof_with_witness(witness_vec, serialized_witness.to_vec(), &mut output_data)
.is_ok()
{
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not generate proof".into())
.map_err(|err| format!("{:#?}", err))?,
);
}
call_with_output_and_error_msg!(
ctx,
generate_rln_proof_with_witness,
"could not generate proof",
witness_vec,
serialized_witness.to_vec()
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateMembershipKey)]
pub fn wasm_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
let wrapper = unsafe { &*ctx };
let mut output_data: Vec<u8> = Vec::new();
if wrapper.instance.key_gen(&mut output_data).is_ok() {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not generate membership keys".into())
}
call_with_output_and_error_msg!(ctx, key_gen, "could not generate membership keys")
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
pub fn wasm_extended_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
let wrapper = unsafe { &*ctx };
let mut output_data: Vec<u8> = Vec::new();
if wrapper.instance.extended_key_gen(&mut output_data).is_ok() {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not generate membership keys".into())
}
call_with_output_and_error_msg!(ctx, extended_key_gen, "could not generate membership keys")
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
pub fn wasm_seeded_key_gen(ctx: *const RLNWrapper, seed: Uint8Array) -> Result<Uint8Array, String> {
let wrapper = unsafe { &*ctx };
let mut output_data: Vec<u8> = Vec::new();
if wrapper
.instance
.seeded_key_gen(&seed.to_vec()[..], &mut output_data)
.is_ok()
{
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not generate membership key".into())
}
call_with_output_and_error_msg!(
ctx,
seeded_key_gen,
"could not generate membership key",
&seed.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -190,20 +317,12 @@ pub fn wasm_seeded_extended_key_gen(
ctx: *const RLNWrapper,
seed: Uint8Array,
) -> Result<Uint8Array, String> {
let wrapper = unsafe { &*ctx };
let mut output_data: Vec<u8> = Vec::new();
if wrapper
.instance
.seeded_extended_key_gen(&seed.to_vec()[..], &mut output_data)
.is_ok()
{
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not generate membership key".into())
}
call_with_output_and_error_msg!(
ctx,
seeded_extended_key_gen,
"could not generate membership key",
&seed.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -213,38 +332,24 @@ pub fn wasm_recover_id_secret(
input_proof_data_1: Uint8Array,
input_proof_data_2: Uint8Array,
) -> Result<Uint8Array, String> {
let wrapper = unsafe { &*ctx };
let mut output_data: Vec<u8> = Vec::new();
if wrapper
.instance
.recover_id_secret(
&input_proof_data_1.to_vec()[..],
&input_proof_data_2.to_vec()[..],
&mut output_data,
)
.is_ok()
{
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not recover id secret".into())
}
call_with_output_and_error_msg!(
ctx,
recover_id_secret,
"could not recover id secret",
&input_proof_data_1.to_vec()[..],
&input_proof_data_2.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = verifyRLNProof)]
pub fn wasm_verify_rln_proof(ctx: *const RLNWrapper, proof: Uint8Array) -> Result<bool, String> {
let wrapper = unsafe { &*ctx };
if match wrapper.instance.verify_rln_proof(&proof.to_vec()[..]) {
Ok(verified) => verified,
Err(_) => return Err("error while verifying rln proof".into()),
} {
return Ok(true);
}
Ok(false)
call_bool_method_with_error_msg!(
ctx,
verify_rln_proof,
"error while verifying rln proof".to_string(),
&proof.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -254,31 +359,31 @@ pub fn wasm_verify_with_roots(
proof: Uint8Array,
roots: Uint8Array,
) -> Result<bool, String> {
let wrapper = unsafe { &*ctx };
if match wrapper
.instance
.verify_with_roots(&proof.to_vec()[..], &roots.to_vec()[..])
{
Ok(verified) => verified,
Err(_) => return Err("error while verifying proof with roots".into()),
} {
return Ok(true);
}
Ok(false)
call_bool_method_with_error_msg!(
ctx,
verify_with_roots,
"error while verifying proof with roots".to_string(),
&proof.to_vec()[..],
&roots.to_vec()[..]
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[wasm_bindgen(js_name = getRoot)]
pub fn wasm_get_root(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
let wrapper = unsafe { &*ctx };
let mut output_data: Vec<u8> = Vec::new();
if wrapper.instance.get_root(&mut output_data).is_ok() {
let result = Uint8Array::from(&output_data[..]);
std::mem::forget(output_data);
Ok(result)
} else {
std::mem::forget(output_data);
Err("could not obtain root".into())
}
call_with_output_and_error_msg!(ctx, get_root, "could not obtain root")
}
#[wasm_bindgen(js_name = hash)]
pub fn wasm_hash(input: Uint8Array) -> Result<Uint8Array, String> {
fn_call_with_output_and_error_msg!(hash, "could not generate hash", &input.to_vec()[..])
}
#[wasm_bindgen(js_name = poseidonHash)]
pub fn wasm_poseidon_hash(input: Uint8Array) -> Result<Uint8Array, String> {
fn_call_with_output_and_error_msg!(
poseidon_hash,
"could not generate poseidon hash",
&input.to_vec()[..]
)
}

View File

@@ -4,6 +4,7 @@
mod tests {
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
use rln::circuit::TEST_TREE_HEIGHT;
use rln::utils::normalize_usize;
use rln_wasm::*;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsValue;
@@ -41,20 +42,19 @@ mod tests {
// Prepare the message
let signal = "Hello World".as_bytes();
let signal_len: u64 = signal.len() as u64;
// Setting up the epoch (With 0s for the test)
let epoch = Uint8Array::new_with_length(32);
epoch.fill(0, 0, 32);
let identity_index: u64 = 0;
let identity_index: usize = 0;
// Serializing the message
let mut serialized_vec: Vec<u8> = Vec::new();
serialized_vec.append(&mut idkey.to_vec());
serialized_vec.append(&mut identity_index.to_le_bytes().to_vec());
serialized_vec.append(&mut normalize_usize(identity_index));
serialized_vec.append(&mut epoch.to_vec());
serialized_vec.append(&mut signal_len.to_le_bytes().to_vec());
serialized_vec.append(&mut normalize_usize(signal.len()));
serialized_vec.append(&mut signal.to_vec());
let serialized_message = Uint8Array::from(&serialized_vec[..]);
@@ -88,7 +88,7 @@ mod tests {
// Add signal_len | signal
let mut proof_bytes = proof.to_vec();
proof_bytes.append(&mut signal_len.to_le_bytes().to_vec());
proof_bytes.append(&mut normalize_usize(signal.len()));
proof_bytes.append(&mut signal.to_vec());
let proof_with_signal = Uint8Array::from(&proof_bytes[..]);

View File

@@ -2,9 +2,10 @@
name = "rln"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
[lib]
crate-type = ["cdylib", "rlib", "staticlib"]
crate-type = ["rlib", "staticlib"]
# This flag disable cargo doctests, i.e. testing example code-snippets in documentation
doctest = false
@@ -12,39 +13,40 @@ doctest = false
[dependencies]
# ZKP Generation
ark-ec = { version = "0.3.0", default-features = false }
ark-ff = { version = "0.3.0", default-features = false, features = [ "asm"] }
ark-std = { version = "0.3.0", default-features = false }
ark-bn254 = { version = "0.3.0" }
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", default-features = false }
ark-relations = { version = "0.3.0", default-features = false, features = [ "std" ] }
ark-serialize = { version = "0.3.0", default-features = false }
ark-circom = { git = "https://github.com/vacp2p/ark-circom", branch = "wasm", default-features = false, features = ["circom-2"] }
#ark-circom = { git = "https://github.com/vacp2p/ark-circom", branch = "no-ethers-core", features = ["circom-2"] }
ark-ec = { version = "=0.4.1", default-features = false }
ark-ff = { version = "=0.4.1", default-features = false, features = [ "asm"] }
ark-std = { version = "=0.4.0", default-features = false }
ark-bn254 = { version = "=0.4.0" }
ark-groth16 = { version = "=0.4.0", features = ["parallel"], default-features = false }
ark-relations = { version = "=0.4.0", default-features = false, features = [ "std" ] }
ark-serialize = { version = "=0.4.1", default-features = false }
ark-circom = { git = "https://github.com/gakonst/ark-circom", default-features = false, features = ["circom-2"] }
# WASM
wasmer = { version = "2.3.0", default-features = false }
# error handling
color-eyre = "0.5.11"
thiserror = "1.0.0"
color-eyre = "=0.6.2"
thiserror = "=1.0.38"
# utilities
cfg-if = "1.0"
num-bigint = { version = "0.4.3", default-features = false, features = ["rand"] }
cfg-if = "=1.0"
num-bigint = { version = "=0.4.3", default-features = false, features = ["rand"] }
num-traits = "0.2.11"
once_cell = "1.14.0"
rand = "0.8"
rand_chacha = "0.3.1"
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
rand = "=0.8.5"
rand_chacha = "=0.3.1"
tiny-keccak = { version = "=2.0.2", features = ["keccak"] }
utils = { path = "../utils/", default-features = false }
pmtree = { git = "https://github.com/Rate-Limiting-Nullifier/pmtree", rev = "f6d1a1fecad72cd39e6808e78085091d541dc882", optional = true}
# serialization
serde_json = "1.0.48"
include_dir = "=0.7.3"
[dev-dependencies]
pmtree = { git = "https://github.com/Rate-Limiting-Nullifier/pmtree" }
sled = "0.34.7"
sled = "=0.34.7"
[features]
default = ["parallel", "wasmer/sys-default"]
@@ -53,4 +55,4 @@ wasm = ["wasmer/js", "wasmer/std"]
fullmerkletree = ["default"]
# Note: pmtree feature is still experimental
pmtree = ["default"]
pmtree-ft = ["default", "pmtree"]

View File

@@ -3,15 +3,21 @@
This module provides APIs to manage, compute and verify [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and RLN primitives.
## Pre-requisites
### Install
### Install dependencies and clone repo
```sh
make installdeps
git clone https://github.com/vacp2p/zerokit.git
cd zerokit/rln
```
Implemented tests can be executed by running within the module folder
`cargo test --release`
### Build and Test
To build and test, run the following commands within the module folder
```bash
cargo make build
cargo make test
```
### Compile ZK circuits

View File

@@ -8,11 +8,10 @@ use ark_circom::read_zkey;
use ark_groth16::{ProvingKey, VerifyingKey};
use ark_relations::r1cs::ConstraintMatrices;
use cfg_if::cfg_if;
use color_eyre::{Report, Result};
use num_bigint::BigUint;
use serde_json::Value;
use std::fs::File;
use std::io::{Cursor, Error, ErrorKind, Result};
use std::path::Path;
use std::io::Cursor;
use std::str::FromStr;
cfg_if! {
@@ -21,11 +20,13 @@ cfg_if! {
use once_cell::sync::OnceCell;
use std::sync::Mutex;
use wasmer::{Module, Store};
use include_dir::{include_dir, Dir};
use std::path::Path;
}
}
const ZKEY_FILENAME: &str = "rln_final.zkey";
const VK_FILENAME: &str = "verifying_key.json";
const VK_FILENAME: &str = "verification_key.json";
const WASM_FILENAME: &str = "rln.wasm";
// These parameters are used for tests
@@ -33,11 +34,11 @@ const WASM_FILENAME: &str = "rln.wasm";
// Changing these parameters to other values than these defaults will cause zkSNARK proof verification to fail
pub const TEST_PARAMETERS_INDEX: usize = 2;
pub const TEST_TREE_HEIGHT: usize = [15, 19, 20][TEST_PARAMETERS_INDEX];
pub const TEST_RESOURCES_FOLDER: &str = [
"./resources/tree_height_15/",
"./resources/tree_height_19/",
"./resources/tree_height_20/",
][TEST_PARAMETERS_INDEX];
pub const TEST_RESOURCES_FOLDER: &str =
["tree_height_15", "tree_height_19", "tree_height_20"][TEST_PARAMETERS_INDEX];
#[cfg(not(target_arch = "wasm32"))]
static RESOURCES_DIR: Dir<'_> = include_dir!("$CARGO_MANIFEST_DIR/resources");
// The following types define the pairing friendly elliptic curve, the underlying finite fields and groups default to this module
// Note that proofs are serialized assuming Fr to be 4x8 = 32 bytes in size. Hence, changing to a curve with different encoding will make proof verification to fail
@@ -57,21 +58,22 @@ pub fn zkey_from_raw(zkey_data: &Vec<u8>) -> Result<(ProvingKey<Curve>, Constrai
let proving_key_and_matrices = read_zkey(&mut c)?;
Ok(proving_key_and_matrices)
} else {
Err(Error::new(ErrorKind::NotFound, "No proving key found!"))
Err(Report::msg("No proving key found!"))
}
}
// Loads the proving key
#[cfg(not(target_arch = "wasm32"))]
pub fn zkey_from_folder(
resources_folder: &str,
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
let zkey_path = format!("{resources_folder}{ZKEY_FILENAME}");
if Path::new(&zkey_path).exists() {
let mut file = File::open(&zkey_path)?;
let proving_key_and_matrices = read_zkey(&mut file)?;
let zkey = RESOURCES_DIR.get_file(Path::new(resources_folder).join(ZKEY_FILENAME));
if let Some(zkey) = zkey {
let mut c = Cursor::new(zkey.contents());
let proving_key_and_matrices = read_zkey(&mut c)?;
Ok(proving_key_and_matrices)
} else {
Err(Error::new(ErrorKind::NotFound, "No proving key found!"))
Err(Report::msg("No proving key found!"))
}
}
@@ -80,39 +82,35 @@ pub fn vk_from_raw(vk_data: &Vec<u8>, zkey_data: &Vec<u8>) -> Result<VerifyingKe
let verifying_key: VerifyingKey<Curve>;
if !vk_data.is_empty() {
verifying_key = vk_from_vector(vk_data);
verifying_key = vk_from_vector(vk_data)?;
Ok(verifying_key)
} else if !zkey_data.is_empty() {
let (proving_key, _matrices) = zkey_from_raw(zkey_data)?;
verifying_key = proving_key.vk;
Ok(verifying_key)
} else {
Err(Error::new(
ErrorKind::NotFound,
"No proving/verification key found!",
))
Err(Report::msg("No proving/verification key found!"))
}
}
// Loads the verification key
#[cfg(not(target_arch = "wasm32"))]
pub fn vk_from_folder(resources_folder: &str) -> Result<VerifyingKey<Curve>> {
let vk_path = format!("{resources_folder}{VK_FILENAME}");
let zkey_path = format!("{resources_folder}{ZKEY_FILENAME}");
let vk = RESOURCES_DIR.get_file(Path::new(resources_folder).join(VK_FILENAME));
let zkey = RESOURCES_DIR.get_file(Path::new(resources_folder).join(ZKEY_FILENAME));
let verifying_key: VerifyingKey<Curve>;
if Path::new(&vk_path).exists() {
verifying_key = vk_from_json(&vk_path);
if let Some(vk) = vk {
verifying_key = vk_from_json(vk.contents_utf8().ok_or(Report::msg(
"Could not read verification key from JSON file!",
))?)?;
Ok(verifying_key)
} else if Path::new(&zkey_path).exists() {
} else if let Some(_zkey) = zkey {
let (proving_key, _matrices) = zkey_from_folder(resources_folder)?;
verifying_key = proving_key.vk;
Ok(verifying_key)
} else {
Err(Error::new(
ErrorKind::NotFound,
"No proving/verification key found!",
))
Err(Report::msg("No proving/verification key found!"))
}
}
@@ -121,129 +119,150 @@ static WITNESS_CALCULATOR: OnceCell<Mutex<WitnessCalculator>> = OnceCell::new();
// Initializes the witness calculator using a bytes vector
#[cfg(not(target_arch = "wasm32"))]
pub fn circom_from_raw(wasm_buffer: Vec<u8>) -> &'static Mutex<WitnessCalculator> {
WITNESS_CALCULATOR.get_or_init(|| {
pub fn circom_from_raw(wasm_buffer: Vec<u8>) -> Result<&'static Mutex<WitnessCalculator>> {
WITNESS_CALCULATOR.get_or_try_init(|| {
let store = Store::default();
let module = Module::new(&store, wasm_buffer).unwrap();
let result =
WitnessCalculator::from_module(module).expect("Failed to create witness calculator");
Mutex::new(result)
let module = Module::new(&store, wasm_buffer)?;
let result = WitnessCalculator::from_module(module)?;
Ok::<Mutex<WitnessCalculator>, Report>(Mutex::new(result))
})
}
// Initializes the witness calculator
#[cfg(not(target_arch = "wasm32"))]
pub fn circom_from_folder(resources_folder: &str) -> &'static Mutex<WitnessCalculator> {
pub fn circom_from_folder(resources_folder: &str) -> Result<&'static Mutex<WitnessCalculator>> {
// We read the wasm file
let wasm_path = format!("{resources_folder}{WASM_FILENAME}");
let wasm_buffer = std::fs::read(&wasm_path).unwrap();
circom_from_raw(wasm_buffer)
let wasm = RESOURCES_DIR.get_file(Path::new(resources_folder).join(WASM_FILENAME));
if let Some(wasm) = wasm {
let wasm_buffer = wasm.contents();
circom_from_raw(wasm_buffer.to_vec())
} else {
Err(Report::msg("No wasm file found!"))
}
}
// The following function implementations are taken/adapted from https://github.com/gakonst/ark-circom/blob/1732e15d6313fe176b0b1abb858ac9e095d0dbd7/src/zkey.rs
// Utilities to convert a json verification key in a groth16::VerificationKey
fn fq_from_str(s: &str) -> Fq {
Fq::try_from(BigUint::from_str(s).unwrap()).unwrap()
fn fq_from_str(s: &str) -> Result<Fq> {
Ok(Fq::try_from(BigUint::from_str(s)?)?)
}
// Extracts the element in G1 corresponding to its JSON serialization
fn json_to_g1(json: &Value, key: &str) -> G1Affine {
fn json_to_g1(json: &Value, key: &str) -> Result<G1Affine> {
let els: Vec<String> = json
.get(key)
.unwrap()
.ok_or(Report::msg("no json value"))?
.as_array()
.unwrap()
.ok_or(Report::msg("value not an array"))?
.iter()
.map(|i| i.as_str().unwrap().to_string())
.collect();
G1Affine::from(G1Projective::new(
fq_from_str(&els[0]),
fq_from_str(&els[1]),
fq_from_str(&els[2]),
))
.map(|i| i.as_str().ok_or(Report::msg("element is not a string")))
.map(|x| x.map(|v| v.to_owned()))
.collect::<Result<Vec<String>>>()?;
Ok(G1Affine::from(G1Projective::new(
fq_from_str(&els[0])?,
fq_from_str(&els[1])?,
fq_from_str(&els[2])?,
)))
}
// Extracts the vector of G1 elements corresponding to its JSON serialization
fn json_to_g1_vec(json: &Value, key: &str) -> Vec<G1Affine> {
fn json_to_g1_vec(json: &Value, key: &str) -> Result<Vec<G1Affine>> {
let els: Vec<Vec<String>> = json
.get(key)
.unwrap()
.ok_or(Report::msg("no json value"))?
.as_array()
.unwrap()
.ok_or(Report::msg("value not an array"))?
.iter()
.map(|i| {
i.as_array()
.unwrap()
.iter()
.map(|x| x.as_str().unwrap().to_string())
.collect::<Vec<String>>()
.ok_or(Report::msg("element is not an array"))
.and_then(|array| {
array
.iter()
.map(|x| x.as_str().ok_or(Report::msg("element is not a string")))
.map(|x| x.map(|v| v.to_owned()))
.collect::<Result<Vec<String>>>()
})
})
.collect();
.collect::<Result<Vec<Vec<String>>>>()?;
els.iter()
.map(|coords| {
G1Affine::from(G1Projective::new(
fq_from_str(&coords[0]),
fq_from_str(&coords[1]),
fq_from_str(&coords[2]),
))
})
.collect()
let mut res = vec![];
for coords in els {
res.push(G1Affine::from(G1Projective::new(
fq_from_str(&coords[0])?,
fq_from_str(&coords[1])?,
fq_from_str(&coords[2])?,
)))
}
Ok(res)
}
// Extracts the element in G2 corresponding to its JSON serialization
fn json_to_g2(json: &Value, key: &str) -> G2Affine {
fn json_to_g2(json: &Value, key: &str) -> Result<G2Affine> {
let els: Vec<Vec<String>> = json
.get(key)
.unwrap()
.ok_or(Report::msg("no json value"))?
.as_array()
.unwrap()
.ok_or(Report::msg("value not an array"))?
.iter()
.map(|i| {
i.as_array()
.unwrap()
.iter()
.map(|x| x.as_str().unwrap().to_string())
.collect::<Vec<String>>()
.ok_or(Report::msg("element is not an array"))
.and_then(|array| {
array
.iter()
.map(|x| x.as_str().ok_or(Report::msg("element is not a string")))
.map(|x| x.map(|v| v.to_owned()))
.collect::<Result<Vec<String>>>()
})
})
.collect();
.collect::<Result<Vec<Vec<String>>>>()?;
let x = Fq2::new(fq_from_str(&els[0][0]), fq_from_str(&els[0][1]));
let y = Fq2::new(fq_from_str(&els[1][0]), fq_from_str(&els[1][1]));
let z = Fq2::new(fq_from_str(&els[2][0]), fq_from_str(&els[2][1]));
G2Affine::from(G2Projective::new(x, y, z))
let x = Fq2::new(fq_from_str(&els[0][0])?, fq_from_str(&els[0][1])?);
let y = Fq2::new(fq_from_str(&els[1][0])?, fq_from_str(&els[1][1])?);
let z = Fq2::new(fq_from_str(&els[2][0])?, fq_from_str(&els[2][1])?);
Ok(G2Affine::from(G2Projective::new(x, y, z)))
}
// Converts JSON to a VerifyingKey
fn to_verifying_key(json: serde_json::Value) -> VerifyingKey<Curve> {
VerifyingKey {
alpha_g1: json_to_g1(&json, "vk_alpha_1"),
beta_g2: json_to_g2(&json, "vk_beta_2"),
gamma_g2: json_to_g2(&json, "vk_gamma_2"),
delta_g2: json_to_g2(&json, "vk_delta_2"),
gamma_abc_g1: json_to_g1_vec(&json, "IC"),
}
fn to_verifying_key(json: serde_json::Value) -> Result<VerifyingKey<Curve>> {
Ok(VerifyingKey {
alpha_g1: json_to_g1(&json, "vk_alpha_1")?,
beta_g2: json_to_g2(&json, "vk_beta_2")?,
gamma_g2: json_to_g2(&json, "vk_gamma_2")?,
delta_g2: json_to_g2(&json, "vk_delta_2")?,
gamma_abc_g1: json_to_g1_vec(&json, "IC")?,
})
}
// Computes the verification key from its JSON serialization
fn vk_from_json(vk_path: &str) -> VerifyingKey<Curve> {
let json = std::fs::read_to_string(vk_path).unwrap();
let json: Value = serde_json::from_str(&json).unwrap();
fn vk_from_json(vk: &str) -> Result<VerifyingKey<Curve>> {
let json: Value = serde_json::from_str(vk)?;
to_verifying_key(json)
}
// Computes the verification key from a bytes vector containing its JSON serialization
fn vk_from_vector(vk: &[u8]) -> VerifyingKey<Curve> {
let json = String::from_utf8(vk.to_vec()).expect("Found invalid UTF-8");
let json: Value = serde_json::from_str(&json).unwrap();
fn vk_from_vector(vk: &[u8]) -> Result<VerifyingKey<Curve>> {
let json = String::from_utf8(vk.to_vec())?;
let json: Value = serde_json::from_str(&json)?;
to_verifying_key(json)
}
// Checks verification key to be correct with respect to proving key
pub fn check_vk_from_zkey(resources_folder: &str, verifying_key: VerifyingKey<Curve>) {
let (proving_key, _matrices) = zkey_from_folder(resources_folder).unwrap();
assert_eq!(proving_key.vk, verifying_key);
#[cfg(not(target_arch = "wasm32"))]
pub fn check_vk_from_zkey(
resources_folder: &str,
verifying_key: VerifyingKey<Curve>,
) -> Result<()> {
let (proving_key, _matrices) = zkey_from_folder(resources_folder)?;
if proving_key.vk == verifying_key {
Ok(())
} else {
Err(Report::msg("verifying_keys are not equal"))
}
}

View File

@@ -2,7 +2,7 @@
use std::slice;
use crate::public::RLN;
use crate::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
// Macro to call methods with arbitrary amount of arguments,
// First argument to the macro is context,
@@ -53,6 +53,28 @@ macro_rules! call_with_output_arg {
false
}
}
};
}
// Macro to call methods with arbitrary amount of arguments,
// which are not implemented in a ctx RLN object
// First argument is the method to call
// Second argument is the output buffer argument
// The remaining arguments are all other inputs to the method
macro_rules! no_ctx_call_with_output_arg {
($method:ident, $output_arg:expr, $( $arg:expr ),* ) => {
{
let mut output_data: Vec<u8> = Vec::new();
if $method($($arg.process()),*, &mut output_data).is_ok() {
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
std::mem::forget(output_data);
true
} else {
std::mem::forget(output_data);
false
}
}
}
}
@@ -149,9 +171,12 @@ impl<'a> From<&Buffer> for &'a [u8] {
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn new(tree_height: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
let rln = RLN::new(tree_height, input_buffer.process());
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
true
if let Ok(rln) = RLN::new(tree_height, input_buffer.process()) {
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
true
} else {
false
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -163,14 +188,17 @@ pub extern "C" fn new_with_params(
vk_buffer: *const Buffer,
ctx: *mut *mut RLN,
) -> bool {
let rln = RLN::new_with_params(
if let Ok(rln) = RLN::new_with_params(
tree_height,
circom_buffer.process().to_vec(),
zkey_buffer.process().to_vec(),
vk_buffer.process().to_vec(),
);
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
true
) {
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
true
} else {
false
}
}
////////////////////////////////////////////////////////
@@ -342,10 +370,12 @@ pub extern "C" fn recover_id_secret(
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn hash(
ctx: *mut RLN,
input_buffer: *const Buffer,
output_buffer: *mut Buffer,
) -> bool {
call_with_output_arg!(ctx, hash, output_buffer, input_buffer)
pub extern "C" fn hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn poseidon_hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
no_ctx_call_with_output_arg!(public_poseidon_hash, output_buffer, input_buffer)
}

View File

@@ -1,7 +1,8 @@
// This crate instantiate the Poseidon hash algorithm
use crate::circuit::Fr;
use crate::{circuit::Fr, utils::bytes_le_to_fr};
use once_cell::sync::Lazy;
use tiny_keccak::{Hasher, Keccak};
use utils::poseidon::Poseidon;
// These indexed constants hardcodes the supported round parameters tuples (t, RF, RN, SKIP_MATRICES) for the Bn254 scalar field
@@ -26,3 +27,17 @@ pub fn poseidon_hash(input: &[Fr]) -> Fr {
.hash(input.to_vec())
.expect("hash with fixed input size can't fail")
}
// Hashes arbitrary signal to the underlying prime field
pub fn hash_to_field(signal: &[u8]) -> Fr {
// We hash the input signal using Keccak256
// (note that a bigger curve order might require a bigger hash blocksize)
let mut hash = [0; 32];
let mut hasher = Keccak::v256();
hasher.update(signal);
hasher.finalize(&mut hash);
// We export the hash as a field element
let (el, _) = bytes_le_to_fr(hash.as_ref());
el
}

View File

@@ -1,7 +1,7 @@
#![allow(dead_code)]
pub mod circuit;
pub mod poseidon_hash;
pub mod hashers;
pub mod poseidon_tree;
pub mod protocol;
pub mod public;

View File

@@ -3,10 +3,15 @@
// Implementation inspired by https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/poseidon_tree.rs (no differences)
use crate::circuit::Fr;
use crate::poseidon_hash::poseidon_hash;
use crate::hashers::poseidon_hash;
use cfg_if::cfg_if;
use utils::merkle_tree::*;
#[cfg(feature = "pmtree-ft")]
use crate::utils::{bytes_le_to_fr, fr_to_bytes_le};
#[cfg(feature = "pmtree-ft")]
use pmtree::*;
// The zerokit RLN default Merkle tree implementation is the OptimalMerkleTree.
// To switch to FullMerkleTree implementation, it is enough to enable the fullmerkletree feature
@@ -36,3 +41,26 @@ impl utils::merkle_tree::Hasher for PoseidonHash {
poseidon_hash(inputs)
}
}
#[cfg(feature = "pmtree-ft")]
// The pmtree Hasher trait used by pmtree Merkle tree
impl pmtree::Hasher for PoseidonHash {
type Fr = Fr;
fn default_leaf() -> Self::Fr {
Fr::from(0)
}
fn serialize(value: Self::Fr) -> Value {
fr_to_bytes_le(&value)
}
fn deserialize(value: Value) -> Self::Fr {
let (fr, _) = bytes_le_to_fr(&value);
fr
}
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
poseidon_hash(inputs)
}
}

View File

@@ -1,14 +1,11 @@
// This crate collects all the underlying primitives used to implement RLN
use ark_circom::{CircomReduction, WitnessCalculator};
use ark_groth16::{
create_proof_with_reduction_and_matrices, prepare_verifying_key,
verify_proof as ark_verify_proof, Proof as ArkProof, ProvingKey, VerifyingKey,
};
use ark_groth16::{prepare_verifying_key, Groth16, Proof as ArkProof, ProvingKey, VerifyingKey};
use ark_relations::r1cs::ConstraintMatrices;
use ark_relations::r1cs::SynthesisError;
use ark_std::{rand::thread_rng, UniformRand};
use color_eyre::Result;
use color_eyre::{Report, Result};
use num_bigint::BigInt;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha20Rng;
@@ -20,11 +17,13 @@ use thiserror::Error;
use tiny_keccak::{Hasher as _, Keccak};
use crate::circuit::{Curve, Fr};
use crate::poseidon_hash::poseidon_hash;
use crate::hashers::hash_to_field;
use crate::hashers::poseidon_hash;
use crate::poseidon_tree::*;
use crate::public::RLN_IDENTIFIER;
use crate::utils::*;
use cfg_if::cfg_if;
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
///////////////////////////////////////////////////////
// RLN Witness data structure and utility functions
@@ -53,82 +52,84 @@ pub struct RLNProofValues {
}
pub fn serialize_field_element(element: Fr) -> Vec<u8> {
return fr_to_bytes_le(&element);
fr_to_bytes_le(&element)
}
pub fn deserialize_field_element(serialized: Vec<u8>) -> Fr {
let (element, _) = bytes_le_to_fr(&serialized);
return element;
element
}
pub fn deserialize_identity_pair(serialized: Vec<u8>) -> (Fr, Fr) {
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized);
let (id_commitment, _) = bytes_le_to_fr(&serialized[read..].to_vec());
let (id_commitment, _) = bytes_le_to_fr(&serialized[read..]);
return (identity_secret_hash, id_commitment);
(identity_secret_hash, id_commitment)
}
pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
let mut all_read = 0;
let (identity_trapdoor, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (identity_trapdoor, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_nullifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (identity_nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (identity_commitment, _) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (identity_commitment, _) = bytes_le_to_fr(&serialized[all_read..]);
return (
(
identity_trapdoor,
identity_nullifier,
identity_secret_hash,
identity_commitment,
);
)
}
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Vec<u8> {
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&rln_witness.identity_secret));
serialized.append(&mut vec_fr_to_bytes_le(&rln_witness.path_elements));
serialized.append(&mut vec_u8_to_bytes_le(&rln_witness.identity_path_index));
serialized.append(&mut vec_fr_to_bytes_le(&rln_witness.path_elements)?);
serialized.append(&mut vec_u8_to_bytes_le(&rln_witness.identity_path_index)?);
serialized.append(&mut fr_to_bytes_le(&rln_witness.x));
serialized.append(&mut fr_to_bytes_le(&rln_witness.epoch));
serialized.append(&mut fr_to_bytes_le(&rln_witness.rln_identifier));
serialized
Ok(serialized)
}
pub fn deserialize_witness(serialized: &[u8]) -> (RLNWitnessInput, usize) {
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)> {
let mut all_read: usize = 0;
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (path_elements, read) = bytes_le_to_vec_fr(&serialized[all_read..].to_vec());
let (path_elements, read) = bytes_le_to_vec_fr(&serialized[all_read..])?;
all_read += read;
let (identity_path_index, read) = bytes_le_to_vec_u8(&serialized[all_read..].to_vec());
let (identity_path_index, read) = bytes_le_to_vec_u8(&serialized[all_read..])?;
all_read += read;
let (x, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (x, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (rln_identifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (rln_identifier, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
// TODO: check rln_identifier against public::RLN_IDENTIFIER
assert_eq!(serialized.len(), all_read);
if serialized.len() != all_read {
return Err(Report::msg("serialized length is not equal to all_read"));
}
(
Ok((
RLNWitnessInput {
identity_secret,
path_elements,
@@ -138,7 +139,7 @@ pub fn deserialize_witness(serialized: &[u8]) -> (RLNWitnessInput, usize) {
rln_identifier,
},
all_read,
)
))
}
// This function deserializes input for kilic's rln generate_proof public API
@@ -148,24 +149,24 @@ pub fn deserialize_witness(serialized: &[u8]) -> (RLNWitnessInput, usize) {
pub fn proof_inputs_to_rln_witness(
tree: &mut PoseidonTree,
serialized: &[u8],
) -> (RLNWitnessInput, usize) {
) -> Result<(RLNWitnessInput, usize)> {
let mut all_read: usize = 0;
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let id_index = u64::from_le_bytes(serialized[all_read..all_read + 8].try_into().unwrap());
let id_index = usize::from_le_bytes(serialized[all_read..all_read + 8].try_into()?);
all_read += 8;
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let signal_len = u64::from_le_bytes(serialized[all_read..all_read + 8].try_into().unwrap());
let signal_len = usize::from_le_bytes(serialized[all_read..all_read + 8].try_into()?);
all_read += 8;
let signal: Vec<u8> = serialized[all_read..all_read + (signal_len as usize)].to_vec();
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
let merkle_proof = tree.proof(id_index as usize).expect("proof should exist");
let merkle_proof = tree.proof(id_index).expect("proof should exist");
let path_elements = merkle_proof.get_path_elements();
let identity_path_index = merkle_proof.get_path_index();
@@ -173,7 +174,7 @@ pub fn proof_inputs_to_rln_witness(
let rln_identifier = hash_to_field(RLN_IDENTIFIER);
(
Ok((
RLNWitnessInput {
identity_secret,
path_elements,
@@ -183,45 +184,48 @@ pub fn proof_inputs_to_rln_witness(
rln_identifier,
},
all_read,
)
))
}
pub fn rln_witness_from_json(input_json_str: &str) -> RLNWitnessInput {
pub fn rln_witness_from_json(input_json_str: &str) -> Result<RLNWitnessInput> {
let input_json: serde_json::Value =
serde_json::from_str(input_json_str).expect("JSON was not well-formatted");
let identity_secret = str_to_fr(&input_json["identity_secret"].to_string(), 10);
let identity_secret = str_to_fr(&input_json["identity_secret"].to_string(), 10)?;
let path_elements = input_json["path_elements"]
.as_array()
.unwrap()
.ok_or(Report::msg("not an array"))?
.iter()
.map(|v| str_to_fr(&v.to_string(), 10))
.collect();
.collect::<Result<_>>()?;
let identity_path_index = input_json["identity_path_index"]
let identity_path_index_array = input_json["identity_path_index"]
.as_array()
.unwrap()
.iter()
.map(|v| v.as_u64().unwrap() as u8)
.collect();
.ok_or(Report::msg("not an arrray"))?;
let x = str_to_fr(&input_json["x"].to_string(), 10);
let mut identity_path_index: Vec<u8> = vec![];
let epoch = str_to_fr(&input_json["epoch"].to_string(), 16);
for v in identity_path_index_array {
identity_path_index.push(v.as_u64().ok_or(Report::msg("not a u64 value"))? as u8);
}
let rln_identifier = str_to_fr(&input_json["rln_identifier"].to_string(), 10);
let x = str_to_fr(&input_json["x"].to_string(), 10)?;
let epoch = str_to_fr(&input_json["epoch"].to_string(), 16)?;
let rln_identifier = str_to_fr(&input_json["rln_identifier"].to_string(), 10)?;
// TODO: check rln_identifier against public::RLN_IDENTIFIER
RLNWitnessInput {
Ok(RLNWitnessInput {
identity_secret,
path_elements,
identity_path_index,
x,
epoch,
rln_identifier,
}
})
}
pub fn rln_witness_from_values(
@@ -317,22 +321,22 @@ pub fn serialize_proof_values(rln_proof_values: &RLNProofValues) -> Vec<u8> {
pub fn deserialize_proof_values(serialized: &[u8]) -> (RLNProofValues, usize) {
let mut all_read: usize = 0;
let (root, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (root, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (epoch, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (x, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (x, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (y, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (y, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (nullifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (nullifier, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
let (rln_identifier, read) = bytes_le_to_fr(&serialized[all_read..].to_vec());
let (rln_identifier, read) = bytes_le_to_fr(&serialized[all_read..]);
all_read += read;
(
@@ -354,29 +358,26 @@ pub fn prepare_prove_input(
epoch: Fr,
signal: &[u8],
) -> Vec<u8> {
let signal_len = u64::try_from(signal.len()).unwrap();
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret));
serialized.append(&mut id_index.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(id_index));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
return serialized;
serialized
}
#[allow(clippy::redundant_clone)]
pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
let signal_len = u64::try_from(signal.len()).unwrap();
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut proof_data.clone());
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
return serialized;
serialized
}
///////////////////////////////////////////////////////
@@ -483,20 +484,6 @@ pub fn extended_seeded_keygen(signal: &[u8]) -> (Fr, Fr, Fr, Fr) {
)
}
// Hashes arbitrary signal to the underlying prime field
pub fn hash_to_field(signal: &[u8]) -> Fr {
// We hash the input signal using Keccak256
// (note that a bigger curve order might require a bigger hash blocksize)
let mut hash = [0; 32];
let mut hasher = Keccak::v256();
hasher.update(signal);
hasher.finalize(&mut hash);
// We export the hash as a field element
let (el, _) = bytes_le_to_fr(hash.as_ref());
el
}
pub fn compute_id_secret(
share1: (Fr, Fr),
share2: (Fr, Fr),
@@ -519,9 +506,9 @@ pub fn compute_id_secret(
if a_1 == computed_a_1 {
// We successfully recovered the identity secret
return Ok(a_0);
Ok(a_0)
} else {
return Err("Cannot recover identity_secret_hash from provided shares".into());
Err("Cannot recover identity_secret_hash from provided shares".into())
}
}
@@ -532,33 +519,36 @@ pub fn compute_id_secret(
#[derive(Error, Debug)]
pub enum ProofError {
#[error("Error reading circuit key: {0}")]
CircuitKeyError(#[from] std::io::Error),
CircuitKeyError(#[from] Report),
#[error("Error producing witness: {0}")]
WitnessError(color_eyre::Report),
WitnessError(Report),
#[error("Error producing proof: {0}")]
SynthesisError(#[from] SynthesisError),
}
fn calculate_witness_element<E: ark_ec::PairingEngine>(witness: Vec<BigInt>) -> Result<Vec<E::Fr>> {
use ark_ff::{FpParameters, PrimeField};
let modulus = <<E::Fr as PrimeField>::Params as FpParameters>::MODULUS;
fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
witness: Vec<BigInt>,
) -> Result<Vec<E::ScalarField>> {
use ark_ff::PrimeField;
let modulus = <E::ScalarField as PrimeField>::MODULUS;
// convert it to field elements
use num_traits::Signed;
let witness = witness
.into_iter()
.map(|w| {
let w = if w.sign() == num_bigint::Sign::Minus {
// Need to negate the witness element if negative
modulus.into() - w.abs().to_biguint().unwrap()
} else {
w.to_biguint().unwrap()
};
E::Fr::from(w)
})
.collect::<Vec<_>>();
let mut witness_vec = vec![];
for w in witness.into_iter() {
let w = if w.sign() == num_bigint::Sign::Minus {
// Need to negate the witness element if negative
modulus.into()
- w.abs()
.to_biguint()
.ok_or(Report::msg("not a biguint value"))?
} else {
w.to_biguint().ok_or(Report::msg("not a biguint value"))?
};
witness_vec.push(E::ScalarField::from(w))
}
Ok(witness)
Ok(witness_vec)
}
pub fn generate_proof_with_witness(
@@ -569,9 +559,8 @@ pub fn generate_proof_with_witness(
#[cfg(debug_assertions)]
let now = Instant::now();
let full_assignment = calculate_witness_element::<Curve>(witness)
.map_err(ProofError::WitnessError)
.unwrap();
let full_assignment =
calculate_witness_element::<Curve>(witness).map_err(ProofError::WitnessError)?;
#[cfg(debug_assertions)]
println!("witness generation took: {:.2?}", now.elapsed());
@@ -585,7 +574,7 @@ pub fn generate_proof_with_witness(
#[cfg(debug_assertions)]
let now = Instant::now();
let proof = create_proof_with_reduction_and_matrices::<_, CircomReduction>(
let proof = Groth16::<_, CircomReduction>::create_proof_with_reduction_and_matrices(
&proving_key.0,
r,
s,
@@ -593,8 +582,7 @@ pub fn generate_proof_with_witness(
proving_key.1.num_instance_variables,
proving_key.1.num_constraints,
full_assignment.as_slice(),
)
.unwrap();
)?;
#[cfg(debug_assertions)]
println!("proof generation took: {:.2?}", now.elapsed());
@@ -602,14 +590,16 @@ pub fn generate_proof_with_witness(
Ok(proof)
}
pub fn inputs_for_witness_calculation(rln_witness: &RLNWitnessInput) -> [(&str, Vec<BigInt>); 6] {
pub fn inputs_for_witness_calculation(
rln_witness: &RLNWitnessInput,
) -> Result<[(&str, Vec<BigInt>); 6]> {
// We confert the path indexes to field elements
// TODO: check if necessary
let mut path_elements = Vec::new();
rln_witness
.path_elements
.iter()
.for_each(|v| path_elements.push(to_bigint(v)));
for v in rln_witness.path_elements.iter() {
path_elements.push(to_bigint(v)?);
}
let mut identity_path_index = Vec::new();
rln_witness
@@ -617,20 +607,20 @@ pub fn inputs_for_witness_calculation(rln_witness: &RLNWitnessInput) -> [(&str,
.iter()
.for_each(|v| identity_path_index.push(BigInt::from(*v)));
[
Ok([
(
"identity_secret",
vec![to_bigint(&rln_witness.identity_secret)],
vec![to_bigint(&rln_witness.identity_secret)?],
),
("path_elements", path_elements),
("identity_path_index", identity_path_index),
("x", vec![to_bigint(&rln_witness.x)]),
("epoch", vec![to_bigint(&rln_witness.epoch)]),
("x", vec![to_bigint(&rln_witness.x)?]),
("epoch", vec![to_bigint(&rln_witness.epoch)?]),
(
"rln_identifier",
vec![to_bigint(&rln_witness.rln_identifier)],
vec![to_bigint(&rln_witness.rln_identifier)?],
),
]
])
}
/// Generates a RLN proof
@@ -644,7 +634,7 @@ pub fn generate_proof(
proving_key: &(ProvingKey<Curve>, ConstraintMatrices<Fr>),
rln_witness: &RLNWitnessInput,
) -> Result<ArkProof<Curve>, ProofError> {
let inputs = inputs_for_witness_calculation(rln_witness)
let inputs = inputs_for_witness_calculation(rln_witness)?
.into_iter()
.map(|(name, values)| (name.to_string(), values));
@@ -678,7 +668,7 @@ pub fn generate_proof(
#[cfg(debug_assertions)]
let now = Instant::now();
let proof = create_proof_with_reduction_and_matrices::<_, CircomReduction>(
let proof = Groth16::<_, CircomReduction>::create_proof_with_reduction_and_matrices(
&proving_key.0,
r,
s,
@@ -723,7 +713,7 @@ pub fn verify_proof(
#[cfg(debug_assertions)]
let now = Instant::now();
let verified = ark_verify_proof(&pvk, proof, &inputs)?;
let verified = Groth16::<_, CircomReduction>::verify_proof(&pvk, proof, &inputs)?;
#[cfg(debug_assertions)]
println!("verify took: {:.2?}", now.elapsed());
@@ -735,12 +725,12 @@ pub fn verify_proof(
///
/// Returns a JSON object containing the inputs necessary to calculate
/// the witness with CIRCOM on javascript
pub fn get_json_inputs(rln_witness: &RLNWitnessInput) -> serde_json::Value {
pub fn get_json_inputs(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
let mut path_elements = Vec::new();
rln_witness
.path_elements
.iter()
.for_each(|v| path_elements.push(to_bigint(v).to_str_radix(10)));
for v in rln_witness.path_elements.iter() {
path_elements.push(to_bigint(v)?.to_str_radix(10));
}
let mut identity_path_index = Vec::new();
rln_witness
@@ -749,13 +739,13 @@ pub fn get_json_inputs(rln_witness: &RLNWitnessInput) -> serde_json::Value {
.for_each(|v| identity_path_index.push(BigInt::from(*v).to_str_radix(10)));
let inputs = serde_json::json!({
"identity_secret": to_bigint(&rln_witness.identity_secret).to_str_radix(10),
"identity_secret": to_bigint(&rln_witness.identity_secret)?.to_str_radix(10),
"path_elements": path_elements,
"identity_path_index": identity_path_index,
"x": to_bigint(&rln_witness.x).to_str_radix(10),
"epoch": format!("0x{:064x}", to_bigint(&rln_witness.epoch)),
"rln_identifier": to_bigint(&rln_witness.rln_identifier).to_str_radix(10),
"x": to_bigint(&rln_witness.x)?.to_str_radix(10),
"epoch": format!("0x{:064x}", to_bigint(&rln_witness.epoch)?),
"rln_identifier": to_bigint(&rln_witness.rln_identifier)?.to_str_radix(10),
});
inputs
Ok(inputs)
}

View File

@@ -1,5 +1,5 @@
use crate::circuit::{vk_from_raw, zkey_from_raw, Curve, Fr};
use crate::poseidon_hash::poseidon_hash;
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
use crate::poseidon_tree::PoseidonTree;
use crate::protocol::*;
use crate::utils::*;
@@ -10,9 +10,11 @@ use ark_groth16::{ProvingKey, VerifyingKey};
use ark_relations::r1cs::ConstraintMatrices;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, Write};
use cfg_if::cfg_if;
use color_eyre::Result;
use num_bigint::BigInt;
use std::io::Cursor;
use std::io::{self, Result};
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
// use rkyv::Deserialize;
cfg_if! {
if #[cfg(not(target_arch = "wasm32"))] {
@@ -36,8 +38,8 @@ pub const RLN_IDENTIFIER: &[u8] = b"zerokit/rln/010203040506070809";
///
/// I/O is mostly done using writers and readers implementing `std::io::Write` and `std::io::Read`, respectively.
pub struct RLN<'a> {
proving_key: Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)>,
verification_key: Result<VerifyingKey<Curve>>,
proving_key: (ProvingKey<Curve>, ConstraintMatrices<Fr>),
verification_key: VerifyingKey<Curve>,
tree: PoseidonTree,
// The witness calculator can't be loaded in zerokit. Since this struct
@@ -61,35 +63,35 @@ impl RLN<'_> {
/// use std::io::Cursor;
///
/// let tree_height = 20;
/// let resources = Cursor::new("./resources/tree_height_20/");
/// let resources = Cursor::new("tree_height_20");
///
/// // We create a new RLN instance
/// let mut rln = RLN::new(tree_height, resources);
/// ```
#[cfg(not(target_arch = "wasm32"))]
pub fn new<R: Read>(tree_height: usize, mut input_data: R) -> RLN<'static> {
pub fn new<R: Read>(tree_height: usize, mut input_data: R) -> Result<RLN<'static>> {
// We read input
let mut input: Vec<u8> = Vec::new();
input_data.read_to_end(&mut input).unwrap();
input_data.read_to_end(&mut input)?;
let resources_folder = String::from_utf8(input).expect("Found invalid UTF-8");
let resources_folder = String::from_utf8(input)?;
let witness_calculator = circom_from_folder(&resources_folder);
let witness_calculator = circom_from_folder(&resources_folder)?;
let proving_key = zkey_from_folder(&resources_folder);
let verification_key = vk_from_folder(&resources_folder);
let proving_key = zkey_from_folder(&resources_folder)?;
let verification_key = vk_from_folder(&resources_folder)?;
// We compute a default empty tree
let tree = PoseidonTree::default(tree_height);
RLN {
Ok(RLN {
witness_calculator,
proving_key,
verification_key,
tree,
#[cfg(target_arch = "wasm32")]
_marker: PhantomData,
}
})
}
/// Creates a new RLN object by passing circuit resources as byte vectors.
@@ -130,17 +132,17 @@ impl RLN<'_> {
#[cfg(not(target_arch = "wasm32"))] circom_vec: Vec<u8>,
zkey_vec: Vec<u8>,
vk_vec: Vec<u8>,
) -> RLN<'static> {
) -> Result<RLN<'static>> {
#[cfg(not(target_arch = "wasm32"))]
let witness_calculator = circom_from_raw(circom_vec);
let witness_calculator = circom_from_raw(circom_vec)?;
let proving_key = zkey_from_raw(&zkey_vec);
let verification_key = vk_from_raw(&vk_vec, &zkey_vec);
let proving_key = zkey_from_raw(&zkey_vec)?;
let verification_key = vk_from_raw(&vk_vec, &zkey_vec)?;
// We compute a default empty tree
let tree = PoseidonTree::default(tree_height);
RLN {
Ok(RLN {
#[cfg(not(target_arch = "wasm32"))]
witness_calculator,
proving_key,
@@ -148,7 +150,7 @@ impl RLN<'_> {
tree,
#[cfg(target_arch = "wasm32")]
_marker: PhantomData,
}
})
}
////////////////////////////////////////////////////////
@@ -160,7 +162,7 @@ impl RLN<'_> {
///
/// Input values are:
/// - `tree_height`: the height of the Merkle tree.
pub fn set_tree(&mut self, tree_height: usize) -> io::Result<()> {
pub fn set_tree(&mut self, tree_height: usize) -> Result<()> {
// We compute a default empty tree of desired height
self.tree = PoseidonTree::default(tree_height);
@@ -187,7 +189,7 @@ impl RLN<'_> {
/// let mut buffer = Cursor::new(serialize_field_element(id_commitment));
/// rln.set_leaf(id_index, &mut buffer).unwrap();
/// ```
pub fn set_leaf<R: Read>(&mut self, index: usize, mut input_data: R) -> io::Result<()> {
pub fn set_leaf<R: Read>(&mut self, index: usize, mut input_data: R) -> Result<()> {
// We read input
let mut leaf_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut leaf_byte)?;
@@ -229,15 +231,15 @@ impl RLN<'_> {
/// let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
/// rln.set_leaves_from(index, &mut buffer).unwrap();
/// ```
pub fn set_leaves_from<R: Read>(&mut self, index: usize, mut input_data: R) -> io::Result<()> {
pub fn set_leaves_from<R: Read>(&mut self, index: usize, mut input_data: R) -> Result<()> {
// We read input
let mut leaves_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut leaves_byte)?;
let (leaves, _) = bytes_le_to_vec_fr(&leaves_byte);
let (leaves, _) = bytes_le_to_vec_fr(&leaves_byte)?;
// We set the leaves
return self.tree.set_range(index, leaves);
self.tree.set_range(index, leaves)
}
/// Resets the tree state to default and sets multiple leaves starting from index 0.
@@ -246,12 +248,12 @@ impl RLN<'_> {
///
/// Input values are:
/// - `input_data`: a reader for the serialization of multiple leaf values (serialization done with [`rln::utils::vec_fr_to_bytes_le`](crate::utils::vec_fr_to_bytes_le))
pub fn init_tree_with_leaves<R: Read>(&mut self, input_data: R) -> io::Result<()> {
pub fn init_tree_with_leaves<R: Read>(&mut self, input_data: R) -> Result<()> {
// reset the tree
// NOTE: this requires the tree to be initialized with the correct height initially
// TODO: accept tree_height as a parameter and initialize the tree with that height
self.set_tree(self.tree.depth())?;
return self.set_leaves_from(0, input_data);
self.set_leaves_from(0, input_data)
}
/// Sets a leaf value at the next available never-set leaf index.
@@ -295,7 +297,7 @@ impl RLN<'_> {
/// let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment));
/// rln.set_next_leaf(&mut buffer).unwrap();
/// ```
pub fn set_next_leaf<R: Read>(&mut self, mut input_data: R) -> io::Result<()> {
pub fn set_next_leaf<R: Read>(&mut self, mut input_data: R) -> Result<()> {
// We read input
let mut leaf_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut leaf_byte)?;
@@ -320,7 +322,7 @@ impl RLN<'_> {
/// let index = 10;
/// rln.delete_leaf(index).unwrap();
/// ```
pub fn delete_leaf(&mut self, index: usize) -> io::Result<()> {
pub fn delete_leaf(&mut self, index: usize) -> Result<()> {
self.tree.delete(index)?;
Ok(())
}
@@ -338,7 +340,7 @@ impl RLN<'_> {
/// rln.get_root(&mut buffer).unwrap();
/// let (root, _) = bytes_le_to_fr(&buffer.into_inner());
/// ```
pub fn get_root<W: Write>(&self, mut output_data: W) -> io::Result<()> {
pub fn get_root<W: Write>(&self, mut output_data: W) -> Result<()> {
let root = self.tree.root();
output_data.write_all(&fr_to_bytes_le(&root))?;
@@ -366,13 +368,13 @@ impl RLN<'_> {
/// let (path_elements, read) = bytes_le_to_vec_fr(&buffer_inner);
/// let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec());
/// ```
pub fn get_proof<W: Write>(&self, index: usize, mut output_data: W) -> io::Result<()> {
pub fn get_proof<W: Write>(&self, index: usize, mut output_data: W) -> Result<()> {
let merkle_proof = self.tree.proof(index).expect("proof should exist");
let path_elements = merkle_proof.get_path_elements();
let identity_path_index = merkle_proof.get_path_index();
output_data.write_all(&vec_fr_to_bytes_le(&path_elements))?;
output_data.write_all(&vec_u8_to_bytes_le(&identity_path_index))?;
output_data.write_all(&vec_fr_to_bytes_le(&path_elements)?)?;
output_data.write_all(&vec_u8_to_bytes_le(&identity_path_index)?)?;
Ok(())
}
@@ -406,11 +408,11 @@ impl RLN<'_> {
&mut self,
mut input_data: R,
mut output_data: W,
) -> io::Result<()> {
// We read input RLN witness and we deserialize it
) -> Result<()> {
// We read input RLN witness and we serialize_compressed it
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (rln_witness, _) = deserialize_witness(&serialized);
let (rln_witness, _) = deserialize_witness(&serialized)?;
/*
if self.witness_calculator.is_none() {
@@ -418,15 +420,10 @@ impl RLN<'_> {
}
*/
let proof = generate_proof(
&mut self.witness_calculator,
self.proving_key.as_ref().unwrap(),
&rln_witness,
)
.unwrap();
let proof = generate_proof(self.witness_calculator, &self.proving_key, &rln_witness)?;
// Note: we export a serialization of ark-groth16::Proof not semaphore::Proof
proof.serialize(&mut output_data).unwrap();
proof.serialize_compressed(&mut output_data)?;
Ok(())
}
@@ -466,22 +463,17 @@ impl RLN<'_> {
///
/// assert!(verified);
/// ```
pub fn verify<R: Read>(&self, mut input_data: R) -> io::Result<bool> {
pub fn verify<R: Read>(&self, mut input_data: R) -> Result<bool> {
// Input data is serialized for Curve as:
// serialized_proof (compressed, 4*32 bytes) || serialized_proof_values (6*32 bytes), i.e.
// [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
let mut input_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut input_byte)?;
let proof = ArkProof::deserialize(&mut Cursor::new(&input_byte[..128].to_vec())).unwrap();
let proof = ArkProof::deserialize_compressed(&mut Cursor::new(&input_byte[..128]))?;
let (proof_values, _) = deserialize_proof_values(&input_byte[128..].to_vec());
let (proof_values, _) = deserialize_proof_values(&input_byte[128..]);
let verified = verify_proof(
self.verification_key.as_ref().unwrap(),
&proof,
&proof_values,
)
.unwrap();
let verified = verify_proof(&self.verification_key, &proof, &proof_values)?;
Ok(verified)
}
@@ -510,8 +502,6 @@ impl RLN<'_> {
/// // We generate a random signal
/// let mut rng = rand::thread_rng();
/// let signal: [u8; 32] = rng.gen();
/// let signal_len = u64::try_from(signal.len()).unwrap();
///
/// // We generate a random epoch
/// let epoch = hash_to_field(b"test-epoch");
///
@@ -519,9 +509,9 @@ impl RLN<'_> {
/// // input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
/// let mut serialized: Vec<u8> = Vec::new();
/// serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
/// serialized.append(&mut identity_index.to_le_bytes().to_vec());
/// serialized.append(&mut normalize_usize(identity_index));
/// serialized.append(&mut fr_to_bytes_le(&epoch));
/// serialized.append(&mut signal_len.to_le_bytes().to_vec());
/// serialized.append(&mut normalize_usize(signal_len).resize(8,0));
/// serialized.append(&mut signal.to_vec());
///
/// let mut input_buffer = Cursor::new(serialized);
@@ -537,23 +527,18 @@ impl RLN<'_> {
&mut self,
mut input_data: R,
mut output_data: W,
) -> io::Result<()> {
// We read input RLN witness and we deserialize it
) -> Result<()> {
// We read input RLN witness and we serialize_compressed it
let mut witness_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut witness_byte)?;
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut self.tree, &witness_byte);
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut self.tree, &witness_byte)?;
let proof_values = proof_values_from_witness(&rln_witness);
let proof = generate_proof(
self.witness_calculator,
self.proving_key.as_ref().unwrap(),
&rln_witness,
)
.unwrap();
let proof = generate_proof(self.witness_calculator, &self.proving_key, &rln_witness)?;
// Note: we export a serialization of ark-groth16::Proof not semaphore::Proof
// This proof is compressed, i.e. 128 bytes long
proof.serialize(&mut output_data).unwrap();
proof.serialize_compressed(&mut output_data)?;
output_data.write_all(&serialize_proof_values(&proof_values))?;
Ok(())
@@ -570,17 +555,15 @@ impl RLN<'_> {
calculated_witness: Vec<BigInt>,
rln_witness_vec: Vec<u8>,
mut output_data: W,
) -> io::Result<()> {
let (rln_witness, _) = deserialize_witness(&rln_witness_vec[..]);
) -> Result<()> {
let (rln_witness, _) = deserialize_witness(&rln_witness_vec[..])?;
let proof_values = proof_values_from_witness(&rln_witness);
let proof =
generate_proof_with_witness(calculated_witness, self.proving_key.as_ref().unwrap())
.unwrap();
let proof = generate_proof_with_witness(calculated_witness, &self.proving_key).unwrap();
// Note: we export a serialization of ark-groth16::Proof not semaphore::Proof
// This proof is compressed, i.e. 128 bytes long
proof.serialize(&mut output_data).unwrap();
proof.serialize_compressed(&mut output_data)?;
output_data.write_all(&serialize_proof_values(&proof_values))?;
Ok(())
}
@@ -604,7 +587,7 @@ impl RLN<'_> {
/// // We prepare input for verify_rln_proof API
/// // input_data is [ proof<128> | share_y<32> | nullifier<32> | root<32> | epoch<32> | share_x<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
/// // that is [ proof_data || signal_len<8> | signal<var> ]
/// proof_data.append(&mut signal_len.to_le_bytes().to_vec());
/// proof_data.append(&mut normalize_usize(signal_len));
/// proof_data.append(&mut signal.to_vec());
///
/// let mut input_buffer = Cursor::new(proof_data);
@@ -612,27 +595,22 @@ impl RLN<'_> {
///
/// assert!(verified);
/// ```
pub fn verify_rln_proof<R: Read>(&self, mut input_data: R) -> io::Result<bool> {
pub fn verify_rln_proof<R: Read>(&self, mut input_data: R) -> Result<bool> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let mut all_read = 0;
let proof = ArkProof::deserialize(&mut Cursor::new(&serialized[..128].to_vec())).unwrap();
let proof =
ArkProof::deserialize_compressed(&mut Cursor::new(&serialized[..128].to_vec()))?;
all_read += 128;
let (proof_values, read) = deserialize_proof_values(&serialized[all_read..].to_vec());
let (proof_values, read) = deserialize_proof_values(&serialized[all_read..]);
all_read += read;
let signal_len =
u64::from_le_bytes(serialized[all_read..all_read + 8].try_into().unwrap()) as usize;
let signal_len = usize::from_le_bytes(serialized[all_read..all_read + 8].try_into()?);
all_read += 8;
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
let verified = verify_proof(
self.verification_key.as_ref().unwrap(),
&proof,
&proof_values,
)
.unwrap();
let verified = verify_proof(&self.verification_key, &proof, &proof_values)?;
// Consistency checks to counter proof tampering
let x = hash_to_field(&signal);
@@ -693,31 +671,22 @@ impl RLN<'_> {
///
/// assert!(verified);
/// ```
pub fn verify_with_roots<R: Read>(
&self,
mut input_data: R,
mut roots_data: R,
) -> io::Result<bool> {
pub fn verify_with_roots<R: Read>(&self, mut input_data: R, mut roots_data: R) -> Result<bool> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let mut all_read = 0;
let proof = ArkProof::deserialize(&mut Cursor::new(&serialized[..128].to_vec())).unwrap();
let proof =
ArkProof::deserialize_compressed(&mut Cursor::new(&serialized[..128].to_vec()))?;
all_read += 128;
let (proof_values, read) = deserialize_proof_values(&serialized[all_read..].to_vec());
let (proof_values, read) = deserialize_proof_values(&serialized[all_read..]);
all_read += read;
let signal_len =
u64::from_le_bytes(serialized[all_read..all_read + 8].try_into().unwrap()) as usize;
let signal_len = usize::from_le_bytes(serialized[all_read..all_read + 8].try_into()?);
all_read += 8;
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
let verified = verify_proof(
self.verification_key.as_ref().unwrap(),
&proof,
&proof_values,
)
.unwrap();
let verified = verify_proof(&self.verification_key, &proof, &proof_values)?;
// First consistency checks to counter proof tampering
let x = hash_to_field(&signal);
@@ -726,7 +695,7 @@ impl RLN<'_> {
&& (proof_values.rln_identifier == hash_to_field(RLN_IDENTIFIER));
// We skip root validation if proof is already invalid
if partial_result == false {
if !partial_result {
return Ok(partial_result);
}
@@ -749,14 +718,13 @@ impl RLN<'_> {
}
// We validate the root
let roots_verified: bool;
if roots.is_empty() {
let roots_verified: bool = if roots.is_empty() {
// If no root is passed in roots_buffer, we skip proof's root check
roots_verified = true;
true
} else {
// Otherwise we check if proof's root is contained in the passed buffer
roots_verified = roots.contains(&proof_values.root);
}
roots.contains(&proof_values.root)
};
// We combine all checks
Ok(partial_result && roots_verified)
@@ -781,10 +749,10 @@ impl RLN<'_> {
/// let mut buffer = Cursor::new(Vec::<u8>::new());
/// rln.key_gen(&mut buffer).unwrap();
///
/// // We deserialize the keygen output
/// // We serialize_compressed the keygen output
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair(buffer.into_inner());
/// ```
pub fn key_gen<W: Write>(&self, mut output_data: W) -> io::Result<()> {
pub fn key_gen<W: Write>(&self, mut output_data: W) -> Result<()> {
let (identity_secret_hash, id_commitment) = keygen();
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
output_data.write_all(&fr_to_bytes_le(&id_commitment))?;
@@ -811,10 +779,10 @@ impl RLN<'_> {
/// let mut buffer = Cursor::new(Vec::<u8>::new());
/// rln.extended_key_gen(&mut buffer).unwrap();
///
/// // We deserialize the keygen output
/// // We serialize_compressed the keygen output
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
/// ```
pub fn extended_key_gen<W: Write>(&self, mut output_data: W) -> io::Result<()> {
pub fn extended_key_gen<W: Write>(&self, mut output_data: W) -> Result<()> {
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
extended_keygen();
output_data.write_all(&fr_to_bytes_le(&identity_trapdoor))?;
@@ -846,14 +814,14 @@ impl RLN<'_> {
/// rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We deserialize the keygen output
/// // We serialize_compressed the keygen output
/// let (identity_secret_hash, id_commitment) = deserialize_identity_pair(output_buffer.into_inner());
/// ```
pub fn seeded_key_gen<R: Read, W: Write>(
&self,
mut input_data: R,
mut output_data: W,
) -> io::Result<()> {
) -> Result<()> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
@@ -889,14 +857,14 @@ impl RLN<'_> {
/// rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We deserialize the keygen output
/// // We serialize_compressed the keygen output
/// let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
/// ```
pub fn seeded_extended_key_gen<R: Read, W: Write>(
&self,
mut input_data: R,
mut output_data: W,
) -> io::Result<()> {
) -> Result<()> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
@@ -947,21 +915,21 @@ impl RLN<'_> {
mut input_proof_data_1: R,
mut input_proof_data_2: R,
mut output_data: W,
) -> io::Result<()> {
// We deserialize the two proofs and we get the corresponding RLNProofValues objects
) -> Result<()> {
// We serialize_compressed the two proofs and we get the corresponding RLNProofValues objects
let mut serialized: Vec<u8> = Vec::new();
input_proof_data_1.read_to_end(&mut serialized)?;
// We skip deserialization of the zk-proof at the beginning
let (proof_values_1, _) = deserialize_proof_values(&serialized[128..].to_vec());
let (proof_values_1, _) = deserialize_proof_values(&serialized[128..]);
let external_nullifier_1 =
poseidon_hash(&[proof_values_1.epoch, proof_values_1.rln_identifier]);
utils_poseidon_hash(&[proof_values_1.epoch, proof_values_1.rln_identifier]);
let mut serialized: Vec<u8> = Vec::new();
input_proof_data_2.read_to_end(&mut serialized)?;
// We skip deserialization of the zk-proof at the beginning
let (proof_values_2, _) = deserialize_proof_values(&serialized[128..].to_vec());
let (proof_values_2, _) = deserialize_proof_values(&serialized[128..]);
let external_nullifier_2 =
poseidon_hash(&[proof_values_2.epoch, proof_values_2.rln_identifier]);
utils_poseidon_hash(&[proof_values_2.epoch, proof_values_2.rln_identifier]);
// We continue only if the proof values are for the same epoch
// The idea is that proof values that go as input to this function are verified first (with zk-proof verify), hence ensuring validity of epoch and other fields.
@@ -977,8 +945,7 @@ impl RLN<'_> {
compute_id_secret(share1, share2, external_nullifier_1);
// If an identity secret hash is recovered, we write it to output_data, otherwise nothing will be written.
if recovered_identity_secret_hash.is_ok() {
let identity_secret_hash = recovered_identity_secret_hash.unwrap();
if let Ok(identity_secret_hash) = recovered_identity_secret_hash {
output_data.write_all(&fr_to_bytes_le(&identity_secret_hash))?;
}
}
@@ -986,49 +953,17 @@ impl RLN<'_> {
Ok(())
}
/// Hashes an input signal to an element in the working prime field.
///
/// The result is computed as the Keccak256 of the input signal modulo the prime field characteristic.
///
/// Input values are:
/// - `input_data`: a reader for the byte vector containing the input signal.
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the resulting field element (serialization done with [`rln::utils::fr_to_bytes_le`](crate::utils::fr_to_bytes_le))
///
/// Example
/// ```
/// let signal: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
///
/// let mut input_buffer = Cursor::new(&signal);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// rln.hash(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We deserialize the keygen output
/// let field_element = deserialize_field_element(output_buffer.into_inner());
/// ```
pub fn hash<R: Read, W: Write>(&self, mut input_data: R, mut output_data: W) -> io::Result<()> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let hash = hash_to_field(&serialized);
output_data.write_all(&fr_to_bytes_le(&hash))?;
Ok(())
}
/// Returns the serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) populated from the identity secret, the Merkle tree index, the epoch and signal.
///
/// Input values are:
/// - `input_data`: a reader for the serialization of `[ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]`
///
/// The function returns the corresponding [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object serialized using [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
pub fn get_serialized_rln_witness<R: Read>(&mut self, mut input_data: R) -> Vec<u8> {
// We read input RLN witness and we deserialize it
pub fn get_serialized_rln_witness<R: Read>(&mut self, mut input_data: R) -> Result<Vec<u8>> {
// We read input RLN witness and we serialize_compressed it
let mut witness_byte: Vec<u8> = Vec::new();
input_data.read_to_end(&mut witness_byte).unwrap();
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut self.tree, &witness_byte);
input_data.read_to_end(&mut witness_byte)?;
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut self.tree, &witness_byte)?;
serialize_witness(&rln_witness)
}
@@ -1039,12 +974,9 @@ impl RLN<'_> {
/// - `serialized_witness`: the byte serialization of a [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object (serialization done with [`rln::protocol::serialize_witness`](crate::protocol::serialize_witness)).
///
/// The function returns the corresponding JSON encoding of the input [`RLNWitnessInput`](crate::protocol::RLNWitnessInput) object.
pub fn get_rln_witness_json(
&mut self,
serialized_witness: &[u8],
) -> io::Result<serde_json::Value> {
let (rln_witness, _) = deserialize_witness(serialized_witness);
Ok(get_json_inputs(&rln_witness))
pub fn get_rln_witness_json(&mut self, serialized_witness: &[u8]) -> Result<serde_json::Value> {
let (rln_witness, _) = deserialize_witness(serialized_witness)?;
get_json_inputs(&rln_witness)
}
}
@@ -1053,15 +985,83 @@ impl Default for RLN<'_> {
fn default() -> Self {
let tree_height = TEST_TREE_HEIGHT;
let buffer = Cursor::new(TEST_RESOURCES_FOLDER);
Self::new(tree_height, buffer)
Self::new(tree_height, buffer).unwrap()
}
}
/// Hashes an input signal to an element in the working prime field.
///
/// The result is computed as the Keccak256 of the input signal modulo the prime field characteristic.
///
/// Input values are:
/// - `input_data`: a reader for the byte vector containing the input signal.
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the resulting field element (serialization done with [`rln::utils::fr_to_bytes_le`](crate::utils::fr_to_bytes_le))
///
/// Example
/// ```
/// let signal: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
///
/// let mut input_buffer = Cursor::new(&signal);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// hash(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We serialize_compressed the keygen output
/// let field_element = deserialize_field_element(output_buffer.into_inner());
/// ```
pub fn hash<R: Read, W: Write>(mut input_data: R, mut output_data: W) -> Result<()> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let hash = hash_to_field(&serialized);
output_data.write_all(&fr_to_bytes_le(&hash))?;
Ok(())
}
/// Hashes a set of elements to a single element in the working prime field, using Poseidon.
///
/// The result is computed as the Poseidon Hash of the input signal.
///
/// Input values are:
/// - `input_data`: a reader for the byte vector containing the input signal.
///
/// Output values are:
/// - `output_data`: a writer receiving the serialization of the resulting field element (serialization done with [`rln::utils::fr_to_bytes_le`](crate::utils::fr_to_bytes_le))
///
/// Example
/// ```
/// let data = vec![hash_to_field(b"foo")];
/// let signal = vec_fr_to_bytes_le(&data);
///
/// let mut input_buffer = Cursor::new(&signal);
/// let mut output_buffer = Cursor::new(Vec::<u8>::new());
/// poseidon_hash(&mut input_buffer, &mut output_buffer)
/// .unwrap();
///
/// // We serialize_compressed the hash output
/// let hash_result = deserialize_field_element(output_buffer.into_inner());
/// ```
pub fn poseidon_hash<R: Read, W: Write>(mut input_data: R, mut output_data: W) -> Result<()> {
let mut serialized: Vec<u8> = Vec::new();
input_data.read_to_end(&mut serialized)?;
let (inputs, _) = bytes_le_to_vec_fr(&serialized)?;
let hash = utils_poseidon_hash(inputs.as_ref());
output_data.write_all(&fr_to_bytes_le(&hash))?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use utils::ZerokitMerkleTree;
// use rkyv::Deserialize;
#[test]
// We test merkle batch Merkle tree additions
@@ -1078,7 +1078,7 @@ mod test {
// We create a new tree
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// We first add leaves one by one specifying the index
for (i, leaf) in leaves.iter().enumerate() {
@@ -1117,7 +1117,7 @@ mod test {
rln.set_tree(tree_height).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -1173,10 +1173,10 @@ mod test {
// We create a new tree
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -1190,11 +1190,11 @@ mod test {
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
// We add leaves in a batch starting from index 0..set_index
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]).unwrap());
rln.init_tree_with_leaves(&mut buffer).unwrap();
// We add the remaining n leaves in a batch starting from index m
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]).unwrap());
rln.set_leaves_from(set_index, &mut buffer).unwrap();
// We check if number of leaves set is consistent
@@ -1227,6 +1227,7 @@ mod test {
assert_eq!(root_batch_with_init, root_single_additions);
}
#[allow(unused_must_use)]
#[test]
// This test checks if `set_leaves_from` throws an error when the index is out of bounds
fn test_set_leaves_bad_index() {
@@ -1243,7 +1244,7 @@ mod test {
// We create a new tree
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// Get root of empty tree
let mut buffer = Cursor::new(Vec::<u8>::new());
@@ -1251,7 +1252,7 @@ mod test {
let (root_empty, _) = bytes_le_to_fr(&buffer.into_inner());
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
rln.set_leaves_from(bad_index, &mut buffer)
.expect_err("Should throw an error");
@@ -1272,25 +1273,21 @@ mod test {
let tree_height = TEST_TREE_HEIGHT;
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// Note: we only test Groth16 proof generation, so we ignore setting the tree in the RLN object
let rln_witness = random_rln_witness(tree_height);
let proof_values = proof_values_from_witness(&rln_witness);
// We compute a Groth16 proof
let mut input_buffer = Cursor::new(serialize_witness(&rln_witness));
let mut input_buffer = Cursor::new(serialize_witness(&rln_witness).unwrap());
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.prove(&mut input_buffer, &mut output_buffer).unwrap();
let serialized_proof = output_buffer.into_inner();
// Before checking public verify API, we check that the (deserialized) proof generated by prove is actually valid
let proof = ArkProof::deserialize(&mut Cursor::new(&serialized_proof)).unwrap();
let verified = verify_proof(
&rln.verification_key.as_ref().unwrap(),
&proof,
&proof_values,
);
let proof = ArkProof::deserialize_compressed(&mut Cursor::new(&serialized_proof)).unwrap();
let verified = verify_proof(&rln.verification_key, &proof, &proof_values);
assert!(verified.unwrap());
// We prepare the input to prove API, consisting of serialized_proof (compressed, 4*32 bytes) || serialized_proof_values (6*32 bytes)
@@ -1320,24 +1317,23 @@ mod test {
// We create a new RLN instance
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
rln.init_tree_with_leaves(&mut buffer).unwrap();
// Generate identity pair
let (identity_secret_hash, id_commitment) = keygen();
// We set as leaf id_commitment after storing its index
let identity_index = u64::try_from(rln.tree.leaves_set()).unwrap();
let identity_index = rln.tree.leaves_set();
let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment));
rln.set_next_leaf(&mut buffer).unwrap();
// We generate a random signal
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let signal_len = u64::try_from(signal.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -1346,9 +1342,9 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut identity_index.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(identity_index));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
let mut input_buffer = Cursor::new(serialized);
@@ -1362,7 +1358,7 @@ mod test {
// We prepare input for verify_rln_proof API
// input_data is [ proof<128> | share_y<32> | nullifier<32> | root<32> | epoch<32> | share_x<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
// that is [ proof_data || signal_len<8> | signal<var> ]
proof_data.append(&mut signal_len.to_le_bytes().to_vec());
proof_data.append(&mut normalize_usize(signal.len()));
proof_data.append(&mut signal.to_vec());
let mut input_buffer = Cursor::new(proof_data);
@@ -1385,24 +1381,23 @@ mod test {
// We create a new RLN instance
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
rln.init_tree_with_leaves(&mut buffer).unwrap();
// Generate identity pair
let (identity_secret_hash, id_commitment) = keygen();
// We set as leaf id_commitment after storing its index
let identity_index = u64::try_from(rln.tree.leaves_set()).unwrap();
let identity_index = rln.tree.leaves_set();
let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment));
rln.set_next_leaf(&mut buffer).unwrap();
// We generate a random signal
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let signal_len = u64::try_from(signal.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -1411,22 +1406,23 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut identity_index.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(identity_index));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
let mut input_buffer = Cursor::new(serialized);
// We read input RLN witness and we deserialize it
// We read input RLN witness and we serialize_compressed it
let mut witness_byte: Vec<u8> = Vec::new();
input_buffer.read_to_end(&mut witness_byte).unwrap();
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut rln.tree, &witness_byte);
let (rln_witness, _) = proof_inputs_to_rln_witness(&mut rln.tree, &witness_byte).unwrap();
let serialized_witness = serialize_witness(&rln_witness);
let serialized_witness = serialize_witness(&rln_witness).unwrap();
// Calculate witness outside zerokit (simulating what JS is doing)
let inputs = inputs_for_witness_calculation(&rln_witness)
.unwrap()
.into_iter()
.map(|(name, values)| (name.to_string(), values));
let calculated_witness = rln
@@ -1439,7 +1435,7 @@ mod test {
let calculated_witness_vec: Vec<BigInt> = calculated_witness
.into_iter()
.map(|v| to_bigint(&v))
.map(|v| to_bigint(&v).unwrap())
.collect();
// Generating the proof
@@ -1457,7 +1453,7 @@ mod test {
// We prepare input for verify_rln_proof API
// input_data is [ proof<128> | share_y<32> | nullifier<32> | root<32> | epoch<32> | share_x<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
// that is [ proof_data || signal_len<8> | signal<var> ]
proof_data.append(&mut signal_len.to_le_bytes().to_vec());
proof_data.append(&mut normalize_usize(signal.len()));
proof_data.append(&mut signal.to_vec());
let mut input_buffer = Cursor::new(proof_data);
@@ -1481,24 +1477,23 @@ mod test {
// We create a new RLN instance
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// We add leaves in a batch into the tree
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
rln.init_tree_with_leaves(&mut buffer).unwrap();
// Generate identity pair
let (identity_secret_hash, id_commitment) = keygen();
// We set as leaf id_commitment after storing its index
let identity_index = u64::try_from(rln.tree.leaves_set()).unwrap();
let identity_index = rln.tree.leaves_set();
let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment));
rln.set_next_leaf(&mut buffer).unwrap();
// We generate a random signal
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let signal_len = u64::try_from(signal.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -1507,9 +1502,9 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut identity_index.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(identity_index));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
let mut input_buffer = Cursor::new(serialized);
@@ -1523,7 +1518,7 @@ mod test {
// We prepare input for verify_rln_proof API
// input_data is [ proof<128> | share_y<32> | nullifier<32> | root<32> | epoch<32> | share_x<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
// that is [ proof_data || signal_len<8> | signal<var> ]
proof_data.append(&mut signal_len.to_le_bytes().to_vec());
proof_data.append(&mut normalize_usize(signal.len()));
proof_data.append(&mut signal.to_vec());
let input_buffer = Cursor::new(proof_data);
@@ -1568,23 +1563,21 @@ mod test {
// We create a new RLN instance
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// Generate identity pair
let (identity_secret_hash, id_commitment) = keygen();
// We set as leaf id_commitment after storing its index
let identity_index = u64::try_from(rln.tree.leaves_set()).unwrap();
let identity_index = rln.tree.leaves_set();
let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment));
rln.set_next_leaf(&mut buffer).unwrap();
// We generate two random signals
let mut rng = rand::thread_rng();
let signal1: [u8; 32] = rng.gen();
let signal1_len = u64::try_from(signal1.len()).unwrap();
let signal2: [u8; 32] = rng.gen();
let signal2_len = u64::try_from(signal2.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -1595,18 +1588,18 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized1: Vec<u8> = Vec::new();
serialized1.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized1.append(&mut identity_index.to_le_bytes().to_vec());
serialized1.append(&mut normalize_usize(identity_index));
serialized1.append(&mut fr_to_bytes_le(&epoch));
// The first part is the same for both proof input, so we clone
let mut serialized2 = serialized1.clone();
// We attach the first signal to the first proof input
serialized1.append(&mut signal1_len.to_le_bytes().to_vec());
serialized1.append(&mut normalize_usize(signal1.len()));
serialized1.append(&mut signal1.to_vec());
// We attach the second signal to the first proof input
serialized2.append(&mut signal2_len.to_le_bytes().to_vec());
serialized2.append(&mut normalize_usize(signal2.len()));
serialized2.append(&mut signal2.to_vec());
// We generate the first proof
@@ -1648,21 +1641,20 @@ mod test {
let (identity_secret_hash_new, id_commitment_new) = keygen();
// We add it to the tree
let identity_index_new = u64::try_from(rln.tree.leaves_set()).unwrap();
let identity_index_new = rln.tree.leaves_set();
let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment_new));
rln.set_next_leaf(&mut buffer).unwrap();
// We generate a random signals
let signal3: [u8; 32] = rng.gen();
let signal3_len = u64::try_from(signal3.len()).unwrap();
// We prepare proof input. Note that epoch is the same as before
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized3: Vec<u8> = Vec::new();
serialized3.append(&mut fr_to_bytes_le(&identity_secret_hash_new));
serialized3.append(&mut identity_index_new.to_le_bytes().to_vec());
serialized3.append(&mut normalize_usize(identity_index_new));
serialized3.append(&mut fr_to_bytes_le(&epoch));
serialized3.append(&mut signal3_len.to_le_bytes().to_vec());
serialized3.append(&mut normalize_usize(signal3.len()));
serialized3.append(&mut signal3.to_vec());
// We generate the proof

View File

@@ -2,22 +2,25 @@
use crate::circuit::Fr;
use ark_ff::PrimeField;
use color_eyre::{Report, Result};
use num_bigint::{BigInt, BigUint};
use num_traits::Num;
use std::iter::Extend;
pub fn to_bigint(el: &Fr) -> BigInt {
let res: BigUint = (*el).try_into().unwrap();
res.try_into().unwrap()
pub fn to_bigint(el: &Fr) -> Result<BigInt> {
let res: BigUint = (*el).try_into()?;
Ok(res.into())
}
pub fn fr_byte_size() -> usize {
let mbs = <Fr as PrimeField>::size_in_bits();
(mbs + 64 - (mbs % 64)) / 8
let mbs = <Fr as PrimeField>::MODULUS_BIT_SIZE;
((mbs + 64 - (mbs % 64)) / 8) as usize
}
pub fn str_to_fr(input: &str, radix: u32) -> Fr {
assert!((radix == 10) || (radix == 16));
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr> {
if !(radix == 10 || radix == 16) {
return Err(Report::msg("wrong radix"));
}
// We remove any quote present and we trim
let single_quote: char = '\"';
@@ -25,16 +28,10 @@ pub fn str_to_fr(input: &str, radix: u32) -> Fr {
input_clean = input_clean.trim().to_string();
if radix == 10 {
BigUint::from_str_radix(&input_clean, radix)
.unwrap()
.try_into()
.unwrap()
Ok(BigUint::from_str_radix(&input_clean, radix)?.try_into()?)
} else {
input_clean = input_clean.replace("0x", "");
BigUint::from_str_radix(&input_clean, radix)
.unwrap()
.try_into()
.unwrap()
Ok(BigUint::from_str_radix(&input_clean, radix)?.try_into()?)
}
}
@@ -75,99 +72,111 @@ pub fn fr_to_bytes_be(input: &Fr) -> Vec<u8> {
res
}
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Result<Vec<u8>> {
let mut bytes: Vec<u8> = Vec::new();
//We store the vector length
bytes.extend(u64::try_from(input.len()).unwrap().to_le_bytes().to_vec());
bytes.extend(input.len().to_le_bytes().to_vec());
// We store each element
input.iter().for_each(|el| bytes.extend(fr_to_bytes_le(el)));
bytes
Ok(bytes)
}
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Vec<u8> {
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Result<Vec<u8>> {
let mut bytes: Vec<u8> = Vec::new();
//We store the vector length
bytes.extend(u64::try_from(input.len()).unwrap().to_be_bytes().to_vec());
bytes.extend(input.len().to_be_bytes().to_vec());
// We store each element
input.iter().for_each(|el| bytes.extend(fr_to_bytes_be(el)));
bytes
Ok(bytes)
}
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Result<Vec<u8>> {
let mut bytes: Vec<u8> = Vec::new();
//We store the vector length
bytes.extend(u64::try_from(input.len()).unwrap().to_le_bytes().to_vec());
bytes.extend(input.len().to_le_bytes().to_vec());
bytes.extend(input);
bytes
Ok(bytes)
}
pub fn vec_u8_to_bytes_be(input: Vec<u8>) -> Vec<u8> {
pub fn vec_u8_to_bytes_be(input: Vec<u8>) -> Result<Vec<u8>> {
let mut bytes: Vec<u8> = Vec::new();
//We store the vector length
bytes.extend(u64::try_from(input.len()).unwrap().to_be_bytes().to_vec());
bytes.extend(input.len().to_be_bytes().to_vec());
bytes.extend(input);
bytes
Ok(bytes)
}
pub fn bytes_le_to_vec_u8(input: &[u8]) -> (Vec<u8>, usize) {
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
let mut read: usize = 0;
let len = u64::from_le_bytes(input[0..8].try_into().unwrap()) as usize;
let len = usize::from_le_bytes(input[0..8].try_into()?);
read += 8;
let res = input[8..8 + len].to_vec();
read += res.len();
(res, read)
Ok((res, read))
}
pub fn bytes_be_to_vec_u8(input: &[u8]) -> (Vec<u8>, usize) {
pub fn bytes_be_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
let mut read: usize = 0;
let len = u64::from_be_bytes(input[0..8].try_into().unwrap()) as usize;
let len = usize::from_be_bytes(input[0..8].try_into()?);
read += 8;
let res = input[8..8 + len].to_vec();
read += res.len();
(res, read)
Ok((res, read))
}
pub fn bytes_le_to_vec_fr(input: &[u8]) -> (Vec<Fr>, usize) {
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
let mut read: usize = 0;
let mut res: Vec<Fr> = Vec::new();
let len = u64::from_le_bytes(input[0..8].try_into().unwrap()) as usize;
let len = usize::from_le_bytes(input[0..8].try_into()?);
read += 8;
let el_size = fr_byte_size();
for i in 0..len {
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)].to_vec());
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
res.push(curr_el);
read += el_size;
}
(res, read)
Ok((res, read))
}
pub fn bytes_be_to_vec_fr(input: &[u8]) -> (Vec<Fr>, usize) {
pub fn bytes_be_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
let mut read: usize = 0;
let mut res: Vec<Fr> = Vec::new();
let len = u64::from_be_bytes(input[0..8].try_into().unwrap()) as usize;
let len = usize::from_be_bytes(input[0..8].try_into()?);
read += 8;
let el_size = fr_byte_size();
for i in 0..len {
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)].to_vec());
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
res.push(curr_el);
read += el_size;
}
(res, read)
Ok((res, read))
}
pub fn normalize_usize(input: usize) -> Vec<u8> {
let mut normalized_usize = input.to_le_bytes().to_vec();
normalized_usize.resize(8, 0);
normalized_usize
}
/* Old conversion utilities between different libraries data types

View File

@@ -3,8 +3,8 @@ mod test {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::*;
use rln::ffi::*;
use rln::poseidon_hash::poseidon_hash;
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::protocol::*;
use rln::public::RLN;
use rln::utils::*;
@@ -78,7 +78,7 @@ mod test {
assert!(success, "set tree call failed");
// We add leaves in a batch into the tree
let leaves_ser = vec_fr_to_bytes_le(&leaves);
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
let input_buffer = &Buffer::from(leaves_ser.as_ref());
let success = init_tree_with_leaves(rln_pointer, input_buffer);
assert!(success, "init tree with leaves call failed");
@@ -153,7 +153,7 @@ mod test {
let set_index = rng.gen_range(0..no_of_leaves) as usize;
// We add leaves in a batch into the tree
let leaves_ser = vec_fr_to_bytes_le(&leaves);
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
let input_buffer = &Buffer::from(leaves_ser.as_ref());
let success = init_tree_with_leaves(rln_pointer, input_buffer);
assert!(success, "init tree with leaves call failed");
@@ -170,13 +170,13 @@ mod test {
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
// We add leaves in a batch starting from index 0..set_index
let leaves_m = vec_fr_to_bytes_le(&leaves[0..set_index]);
let leaves_m = vec_fr_to_bytes_le(&leaves[0..set_index]).unwrap();
let buffer = &Buffer::from(leaves_m.as_ref());
let success = init_tree_with_leaves(rln_pointer, buffer);
assert!(success, "init tree with leaves call failed");
// We add the remaining n leaves in a batch starting from index set_index
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]);
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]).unwrap();
let buffer = &Buffer::from(leaves_n.as_ref());
let success = set_leaves_from(rln_pointer, set_index, buffer);
assert!(success, "set leaves from call failed");
@@ -248,7 +248,7 @@ mod test {
let (root_empty, _) = bytes_le_to_fr(&result_data);
// We add leaves in a batch into the tree
let leaves = vec_fr_to_bytes_le(&leaves);
let leaves = vec_fr_to_bytes_le(&leaves).unwrap();
let buffer = &Buffer::from(leaves.as_ref());
let success = set_leaves_from(rln_pointer, bad_index, buffer);
assert!(!success, "set leaves from call succeeded");
@@ -280,7 +280,7 @@ mod test {
// generate identity
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
let id_commitment = poseidon_hash(&vec![identity_secret_hash]);
let id_commitment = utils_poseidon_hash(&vec![identity_secret_hash]);
// We prepare id_commitment and we set the leaf at provided index
let leaf_ser = fr_to_bytes_le(&id_commitment);
@@ -303,71 +303,86 @@ mod test {
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (path_elements, read) = bytes_le_to_vec_fr(&result_data);
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..].to_vec());
let (path_elements, read) = bytes_le_to_vec_fr(&result_data).unwrap();
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..].to_vec()).unwrap();
// We check correct computation of the path and indexes
let mut expected_path_elements = vec![
str_to_fr(
"0x0000000000000000000000000000000000000000000000000000000000000000",
16,
),
)
.unwrap(),
str_to_fr(
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
16,
),
)
.unwrap(),
str_to_fr(
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
16,
),
)
.unwrap(),
str_to_fr(
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
16,
),
)
.unwrap(),
str_to_fr(
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
16,
),
)
.unwrap(),
str_to_fr(
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
16,
),
)
.unwrap(),
str_to_fr(
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
16,
),
)
.unwrap(),
str_to_fr(
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
16,
),
)
.unwrap(),
str_to_fr(
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
16,
),
)
.unwrap(),
str_to_fr(
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
16,
),
)
.unwrap(),
str_to_fr(
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
16,
),
)
.unwrap(),
str_to_fr(
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
16,
),
)
.unwrap(),
str_to_fr(
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
16,
),
)
.unwrap(),
str_to_fr(
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
16,
),
)
.unwrap(),
str_to_fr(
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
16,
),
)
.unwrap(),
];
let mut expected_identity_path_index: Vec<u8> =
@@ -379,19 +394,23 @@ mod test {
str_to_fr(
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
16,
),
)
.unwrap(),
str_to_fr(
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
16,
),
)
.unwrap(),
str_to_fr(
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
16,
),
)
.unwrap(),
str_to_fr(
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
16,
),
)
.unwrap(),
]);
expected_identity_path_index.append(&mut vec![0, 0, 0, 0]);
}
@@ -400,7 +419,8 @@ mod test {
expected_path_elements.append(&mut vec![str_to_fr(
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
16,
)]);
)
.unwrap()]);
expected_identity_path_index.append(&mut vec![0]);
}
@@ -439,7 +459,7 @@ mod test {
let proof_values = proof_values_from_witness(&rln_witness);
// We prepare id_commitment and we set the leaf at provided index
let rln_witness_ser = serialize_witness(&rln_witness);
let rln_witness_ser = serialize_witness(&rln_witness).unwrap();
let input_buffer = &Buffer::from(rln_witness_ser.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let now = Instant::now();
@@ -569,7 +589,7 @@ mod test {
let rln_pointer = unsafe { &mut *rln_pointer.assume_init() };
// We add leaves in a batch into the tree
let leaves_ser = vec_fr_to_bytes_le(&leaves);
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
let input_buffer = &Buffer::from(leaves_ser.as_ref());
let success = init_tree_with_leaves(rln_pointer, input_buffer);
assert!(success, "init tree with leaves call failed");
@@ -589,12 +609,11 @@ mod test {
let success = set_next_leaf(rln_pointer, input_buffer);
assert!(success, "set next leaf call failed");
let identity_index: u64 = no_of_leaves;
let identity_index: usize = no_of_leaves;
// We generate a random signal
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let signal_len = u64::try_from(signal.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -603,9 +622,9 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut identity_index.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(identity_index));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
// We call generate_rln_proof
@@ -620,7 +639,7 @@ mod test {
// We prepare input for verify_rln_proof API
// input_data is [ proof<128> | share_y<32> | nullifier<32> | root<32> | epoch<32> | share_x<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
// that is [ proof_data | signal_len<8> | signal<var> ]
proof_data.append(&mut signal_len.to_le_bytes().to_vec());
proof_data.append(&mut normalize_usize(signal.len()));
proof_data.append(&mut signal.to_vec());
// We call verify_rln_proof
@@ -654,7 +673,7 @@ mod test {
let rln_pointer = unsafe { &mut *rln_pointer.assume_init() };
// We add leaves in a batch into the tree
let leaves_ser = vec_fr_to_bytes_le(&leaves);
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
let input_buffer = &Buffer::from(leaves_ser.as_ref());
let success = init_tree_with_leaves(rln_pointer, input_buffer);
assert!(success, "set leaves call failed");
@@ -674,12 +693,11 @@ mod test {
let success = set_next_leaf(rln_pointer, input_buffer);
assert!(success, "set next leaf call failed");
let identity_index: u64 = no_of_leaves;
let identity_index: usize = no_of_leaves;
// We generate a random signal
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
let signal_len = u64::try_from(signal.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -688,9 +706,9 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized.append(&mut identity_index.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(identity_index));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal.len()));
serialized.append(&mut signal.to_vec());
// We call generate_rln_proof
@@ -705,7 +723,7 @@ mod test {
// We prepare input for verify_rln_proof API
// input_data is [ proof<128> | share_y<32> | nullifier<32> | root<32> | epoch<32> | share_x<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
// that is [ proof_data | signal_len<8> | signal<var> ]
proof_data.append(&mut signal_len.to_le_bytes().to_vec());
proof_data.append(&mut normalize_usize(signal.len()));
proof_data.append(&mut signal.to_vec());
// We test verify_with_roots
@@ -787,18 +805,16 @@ mod test {
let success = set_next_leaf(rln_pointer, input_buffer);
assert!(success, "set next leaf call failed");
let identity_index: u64 = 0;
let identity_index: usize = 0;
// We generate two proofs using same epoch but different signals.
// We generate two random signals
let mut rng = rand::thread_rng();
let signal1: [u8; 32] = rng.gen();
let signal1_len = u64::try_from(signal1.len()).unwrap();
// We generate two random signals
let signal2: [u8; 32] = rng.gen();
let signal2_len = u64::try_from(signal2.len()).unwrap();
// We generate a random epoch
let epoch = hash_to_field(b"test-epoch");
@@ -807,18 +823,18 @@ mod test {
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let mut serialized1: Vec<u8> = Vec::new();
serialized1.append(&mut fr_to_bytes_le(&identity_secret_hash));
serialized1.append(&mut identity_index.to_le_bytes().to_vec());
serialized1.append(&mut normalize_usize(identity_index));
serialized1.append(&mut fr_to_bytes_le(&epoch));
// The first part is the same for both proof input, so we clone
let mut serialized2 = serialized1.clone();
// We attach the first signal to the first proof input
serialized1.append(&mut signal1_len.to_le_bytes().to_vec());
serialized1.append(&mut normalize_usize(signal1.len()));
serialized1.append(&mut signal1.to_vec());
// We attach the second signal to the first proof input
serialized2.append(&mut signal2_len.to_le_bytes().to_vec());
serialized2.append(&mut normalize_usize(signal2.len()));
serialized2.append(&mut signal2.to_vec());
// We call generate_rln_proof for first proof values
@@ -877,20 +893,19 @@ mod test {
let success = set_next_leaf(rln_pointer, input_buffer);
assert!(success, "set next leaf call failed");
let identity_index_new: u64 = 1;
let identity_index_new: usize = 1;
// We generate a random signals
let signal3: [u8; 32] = rng.gen();
let signal3_len = u64::try_from(signal3.len()).unwrap();
// We prepare input for generate_rln_proof API
// input_data is [ identity_secret<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
// Note that epoch is the same as before
let mut serialized: Vec<u8> = Vec::new();
serialized.append(&mut fr_to_bytes_le(&identity_secret_hash_new));
serialized.append(&mut identity_index_new.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(identity_index_new));
serialized.append(&mut fr_to_bytes_le(&epoch));
serialized.append(&mut signal3_len.to_le_bytes().to_vec());
serialized.append(&mut normalize_usize(signal3.len()));
serialized.append(&mut signal3.to_vec());
// We call generate_rln_proof
@@ -957,16 +972,15 @@ mod test {
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_seeded_extended_keygen_ffi() {
let tree_height = TEST_TREE_HEIGHT;
// We create a RLN instance
let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit();
let input_buffer = &Buffer::from(TEST_RESOURCES_FOLDER.as_bytes());
@@ -1004,34 +1018,31 @@ mod test {
16,
);
assert_eq!(identity_trapdoor, expected_identity_trapdoor_seed_bytes);
assert_eq!(identity_nullifier, expected_identity_nullifier_seed_bytes);
assert_eq!(
identity_trapdoor,
expected_identity_trapdoor_seed_bytes.unwrap()
);
assert_eq!(
identity_nullifier,
expected_identity_nullifier_seed_bytes.unwrap()
);
assert_eq!(
identity_secret_hash,
expected_identity_secret_hash_seed_bytes
expected_identity_secret_hash_seed_bytes.unwrap()
);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
}
#[test]
// Tests hash to field using FFI APIs
fn test_hash_to_field_ffi() {
let tree_height = TEST_TREE_HEIGHT;
// We create a RLN instance
let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit();
let input_buffer = &Buffer::from(TEST_RESOURCES_FOLDER.as_bytes());
let success = new(tree_height, input_buffer, rln_pointer.as_mut_ptr());
assert!(success, "RLN object creation failed");
let rln_pointer = unsafe { &mut *rln_pointer.assume_init() };
let mut rng = rand::thread_rng();
let signal: [u8; 32] = rng.gen();
// We prepare id_commitment and we set the leaf at provided index
let input_buffer = &Buffer::from(signal.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = hash(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
assert!(success, "hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
@@ -1043,4 +1054,30 @@ mod test {
assert_eq!(hash1, hash2);
}
#[test]
// Test Poseidon hash FFI
fn test_poseidon_hash_ffi() {
// generate random number between 1..ROUND_PARAMS.len()
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
let input_buffer = &Buffer::from(inputs_ser.as_ref());
let expected_hash = utils_poseidon_hash(inputs.as_ref());
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
assert!(success, "poseidon hash call failed");
let output_buffer = unsafe { output_buffer.assume_init() };
let result_data = <&[u8]>::from(&output_buffer).to_vec();
let (received_hash, _) = bytes_le_to_fr(&result_data);
assert_eq!(received_hash, expected_hash);
}
}

View File

@@ -6,7 +6,7 @@
mod test {
use rln::circuit::*;
use rln::poseidon_tree::*;
use utils::{FullMerkleTree, OptimalMerkleTree};
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
#[test]
/// A basic performance comparison between the two supported Merkle Tree implementations
@@ -82,56 +82,38 @@ mod test {
// Test module for testing pmtree integration and features in zerokit
// enabled only if the pmtree feature is enabled
#[cfg(feature = "pmtree")]
#[cfg(feature = "pmtree-ft")]
#[cfg(test)]
mod pmtree_test {
use pmtree::*;
use rln::circuit::Fr;
use rln::poseidon_hash::poseidon_hash;
use rln::hashers::{hash_to_field, poseidon_hash};
use rln::poseidon_tree::PoseidonHash;
use rln::protocol::hash_to_field;
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, str_to_fr};
use rln::utils::str_to_fr;
use sled::Db as Sled;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::{collections::HashMap, path::PathBuf};
use utils::{FullMerkleTree, OptimalMerkleTree};
// The pmtree Hasher trait used by pmtree Merkle tree
impl pmtree::Hasher for PoseidonHash {
type Fr = Fr;
fn default_leaf() -> Self::Fr {
Fr::from(0)
}
fn serialize(value: Self::Fr) -> Value {
fr_to_bytes_le(&value)
}
fn deserialize(value: Value) -> Self::Fr {
let (fr, _) = bytes_le_to_fr(&value);
fr
}
fn hash(inputs: &[Self::Fr]) -> Self::Fr {
poseidon_hash(inputs)
}
}
// pmtree supports in-memory and on-disk databases (Database trait) for storing the Merkle tree state
// We implement Database for hashmaps, an in-memory database
struct MemoryDB(HashMap<DBKey, Value>);
#[derive(Default)]
struct MemoryDBConfig {}
impl Database for MemoryDB {
fn new(_dbpath: &str) -> Result<Self> {
type Config = MemoryDBConfig;
fn new(_config: Self::Config) -> Result<Self> {
Ok(MemoryDB(HashMap::new()))
}
fn load(_dbpath: &str) -> Result<Self> {
Err(Error("Cannot load in-memory DB".to_string()))
fn load(_config: Self::Config) -> Result<Self> {
Err(Box::new(Error("Cannot load in-memory DB".to_string())))
}
fn get(&self, key: DBKey) -> Result<Option<Value>> {
@@ -143,36 +125,46 @@ mod pmtree_test {
Ok(())
}
fn put_batch(&mut self, subtree: HashMap<DBKey, Value>) -> Result<()> {
self.0.extend(subtree);
Ok(())
}
}
// We implement Database for sled DB, an on-disk database
struct SledDB(Sled);
impl Database for SledDB {
fn new(dbpath: &str) -> Result<Self> {
if Path::new(dbpath).exists() {
match fs::remove_dir_all(dbpath) {
type Config = sled::Config;
fn new(config: Self::Config) -> Result<Self> {
let dbpath = config.path;
if config.dbpath.exists() {
match fs::remove_dir_all(&config.dbpath) {
Ok(x) => x,
Err(e) => return Err(Error(e.to_string())),
Err(e) => return Err(Box::new(Error(e.to_string()))),
}
}
let db: Sled = match sled::open(dbpath) {
let db: Sled = match config.open() {
Ok(db) => db,
Err(e) => return Err(Error(e.to_string())),
Err(e) => return Err(Box::new(Error(e.to_string()))),
};
Ok(SledDB(db))
}
fn load(dbpath: &str) -> Result<Self> {
let db: Sled = match sled::open(dbpath) {
fn load(config: Self::Config) -> Result<Self> {
let db: Sled = match sled::open(config.dbpath) {
Ok(db) => db,
Err(e) => return Err(Error(e.to_string())),
Err(e) => return Err(Box::new(Error(e.to_string()))),
};
if !db.was_recovered() {
return Err(Error("Trying to load non-existing database!".to_string()));
return Err(Box::new(Error(
"Trying to load non-existing database!".to_string(),
)));
}
Ok(SledDB(db))
@@ -181,16 +173,27 @@ mod pmtree_test {
fn get(&self, key: DBKey) -> Result<Option<Value>> {
match self.0.get(key) {
Ok(value) => Ok(value.map(|val| val.to_vec())),
Err(e) => Err(Error(e.to_string())),
Err(e) => Err(Box::new(Error(e.to_string()))),
}
}
fn put(&mut self, key: DBKey, value: Value) -> Result<()> {
match self.0.insert(key, value) {
Ok(_) => Ok(()),
Err(e) => Err(Error(e.to_string())),
Err(e) => Err(Box::new(Error(e.to_string()))),
}
}
fn put_batch(&mut self, subtree: HashMap<DBKey, Value>) -> Result<()> {
let mut batch = sled::Batch::default();
for (key, value) in subtree {
batch.insert(&key, value);
}
self.0.apply_batch(batch)?;
Ok(())
}
}
#[test]
@@ -341,6 +344,7 @@ mod pmtree_test {
"0x21947ffd0bce0c385f876e7c97d6a42eec5b1fe935aab2f01c1f8a8cbcc356d2",
16
)
.unwrap()
);
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
@@ -353,83 +357,103 @@ mod pmtree_test {
str_to_fr(
"0x0000000000000000000000000000000000000000000000000000000000000000",
16,
),
)
.unwrap(),
str_to_fr(
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
16,
),
)
.unwrap(),
str_to_fr(
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
16,
),
)
.unwrap(),
str_to_fr(
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
16,
),
)
.unwrap(),
str_to_fr(
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
16,
),
)
.unwrap(),
str_to_fr(
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
16,
),
)
.unwrap(),
str_to_fr(
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
16,
),
)
.unwrap(),
str_to_fr(
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
16,
),
)
.unwrap(),
str_to_fr(
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
16,
),
)
.unwrap(),
str_to_fr(
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
16,
),
)
.unwrap(),
str_to_fr(
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
16,
),
)
.unwrap(),
str_to_fr(
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
16,
),
)
.unwrap(),
str_to_fr(
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
16,
),
)
.unwrap(),
str_to_fr(
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
16,
),
)
.unwrap(),
str_to_fr(
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
16,
),
)
.unwrap(),
str_to_fr(
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
16,
),
)
.unwrap(),
str_to_fr(
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
16,
),
)
.unwrap(),
str_to_fr(
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
16,
),
)
.unwrap(),
str_to_fr(
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
16,
),
)
.unwrap(),
str_to_fr(
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
16,
),
)
.unwrap(),
];
let expected_identity_path_index: Vec<u8> =
@@ -467,6 +491,7 @@ mod pmtree_test {
"0x21947ffd0bce0c385f876e7c97d6a42eec5b1fe935aab2f01c1f8a8cbcc356d2",
16
)
.unwrap()
);
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
@@ -479,83 +504,103 @@ mod pmtree_test {
str_to_fr(
"0x0000000000000000000000000000000000000000000000000000000000000000",
16,
),
)
.unwrap(),
str_to_fr(
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
16,
),
)
.unwrap(),
str_to_fr(
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
16,
),
)
.unwrap(),
str_to_fr(
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
16,
),
)
.unwrap(),
str_to_fr(
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
16,
),
)
.unwrap(),
str_to_fr(
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
16,
),
)
.unwrap(),
str_to_fr(
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
16,
),
)
.unwrap(),
str_to_fr(
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
16,
),
)
.unwrap(),
str_to_fr(
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
16,
),
)
.unwrap(),
str_to_fr(
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
16,
),
)
.unwrap(),
str_to_fr(
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
16,
),
)
.unwrap(),
str_to_fr(
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
16,
),
)
.unwrap(),
str_to_fr(
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
16,
),
)
.unwrap(),
str_to_fr(
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
16,
),
)
.unwrap(),
str_to_fr(
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
16,
),
)
.unwrap(),
str_to_fr(
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
16,
),
)
.unwrap(),
str_to_fr(
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
16,
),
)
.unwrap(),
str_to_fr(
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
16,
),
)
.unwrap(),
str_to_fr(
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
16,
),
)
.unwrap(),
str_to_fr(
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
16,
),
)
.unwrap(),
];
let expected_identity_path_index: Vec<u8> =

View File

@@ -4,10 +4,11 @@ mod test {
circom_from_folder, vk_from_folder, zkey_from_folder, Fr, TEST_RESOURCES_FOLDER,
TEST_TREE_HEIGHT,
};
use rln::poseidon_hash::poseidon_hash;
use rln::hashers::{hash_to_field, poseidon_hash};
use rln::poseidon_tree::PoseidonTree;
use rln::protocol::*;
use rln::utils::str_to_fr;
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
// Input generated with https://github.com/oskarth/zk-kit/commit/b6a872f7160c7c14e10a0ea40acab99cbb23c9a8
const WITNESS_JSON_15: &str = r#"
@@ -184,6 +185,7 @@ mod test {
"0x1984f2e01184aef5cb974640898a5f5c25556554e2b06d99d4841badb8b198cd",
16
)
.unwrap()
);
} else if TEST_TREE_HEIGHT == 19 {
assert_eq!(
@@ -192,6 +194,7 @@ mod test {
"0x219ceb53f2b1b7a6cf74e80d50d44d68ecb4a53c6cc65b25593c8d56343fb1fe",
16
)
.unwrap()
);
} else if TEST_TREE_HEIGHT == 20 {
assert_eq!(
@@ -200,6 +203,7 @@ mod test {
"0x21947ffd0bce0c385f876e7c97d6a42eec5b1fe935aab2f01c1f8a8cbcc356d2",
16
)
.unwrap()
);
}
@@ -213,63 +217,78 @@ mod test {
str_to_fr(
"0x0000000000000000000000000000000000000000000000000000000000000000",
16,
),
)
.unwrap(),
str_to_fr(
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
16,
),
)
.unwrap(),
str_to_fr(
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
16,
),
)
.unwrap(),
str_to_fr(
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
16,
),
)
.unwrap(),
str_to_fr(
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
16,
),
)
.unwrap(),
str_to_fr(
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
16,
),
)
.unwrap(),
str_to_fr(
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
16,
),
)
.unwrap(),
str_to_fr(
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
16,
),
)
.unwrap(),
str_to_fr(
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
16,
),
)
.unwrap(),
str_to_fr(
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
16,
),
)
.unwrap(),
str_to_fr(
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
16,
),
)
.unwrap(),
str_to_fr(
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
16,
),
)
.unwrap(),
str_to_fr(
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
16,
),
)
.unwrap(),
str_to_fr(
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
16,
),
)
.unwrap(),
str_to_fr(
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
16,
),
)
.unwrap(),
];
let mut expected_identity_path_index: Vec<u8> =
@@ -281,19 +300,23 @@ mod test {
str_to_fr(
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
16,
),
)
.unwrap(),
str_to_fr(
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
16,
),
)
.unwrap(),
str_to_fr(
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
16,
),
)
.unwrap(),
str_to_fr(
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
16,
),
)
.unwrap(),
]);
expected_identity_path_index.append(&mut vec![0, 0, 0, 0]);
}
@@ -302,7 +325,8 @@ mod test {
expected_path_elements.append(&mut vec![str_to_fr(
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
16,
)]);
)
.unwrap()]);
expected_identity_path_index.append(&mut vec![0]);
}
@@ -319,7 +343,7 @@ mod test {
// We generate all relevant keys
let proving_key = zkey_from_folder(TEST_RESOURCES_FOLDER).unwrap();
let verification_key = vk_from_folder(TEST_RESOURCES_FOLDER).unwrap();
let builder = circom_from_folder(TEST_RESOURCES_FOLDER);
let builder = circom_from_folder(TEST_RESOURCES_FOLDER).unwrap();
// We compute witness from the json input example
let mut witness_json: &str = "";
@@ -334,10 +358,12 @@ mod test {
let rln_witness = rln_witness_from_json(witness_json);
// Let's generate a zkSNARK proof
let proof = generate_proof(builder, &proving_key, &rln_witness).unwrap();
let rln_witness_unwrapped = rln_witness.unwrap();
let proof_values = proof_values_from_witness(&rln_witness);
// Let's generate a zkSNARK proof
let proof = generate_proof(builder, &proving_key, &rln_witness_unwrapped).unwrap();
let proof_values = proof_values_from_witness(&rln_witness_unwrapped);
// Let's verify the proof
let verified = verify_proof(&verification_key, &proof, &proof_values);
@@ -378,7 +404,7 @@ mod test {
// We generate all relevant keys
let proving_key = zkey_from_folder(TEST_RESOURCES_FOLDER).unwrap();
let verification_key = vk_from_folder(TEST_RESOURCES_FOLDER).unwrap();
let builder = circom_from_folder(TEST_RESOURCES_FOLDER);
let builder = circom_from_folder(TEST_RESOURCES_FOLDER).unwrap();
// Let's generate a zkSNARK proof
let proof = generate_proof(builder, &proving_key, &rln_witness).unwrap();
@@ -404,10 +430,10 @@ mod test {
witness_json = WITNESS_JSON_20;
}
let rln_witness = rln_witness_from_json(witness_json);
let rln_witness = rln_witness_from_json(witness_json).unwrap();
let ser = serialize_witness(&rln_witness);
let (deser, _) = deserialize_witness(&ser);
let ser = serialize_witness(&rln_witness).unwrap();
let (deser, _) = deserialize_witness(&ser).unwrap();
assert_eq!(rln_witness, deser);
// We test Proof values serialization
@@ -429,11 +455,13 @@ mod test {
let expected_identity_secret_hash_seed_phrase = str_to_fr(
"0x20df38f3f00496f19fe7c6535492543b21798ed7cb91aebe4af8012db884eda3",
16,
);
)
.unwrap();
let expected_id_commitment_seed_phrase = str_to_fr(
"0x1223a78a5d66043a7f9863e14507dc80720a5602b2a894923e5b5147d5a9c325",
16,
);
)
.unwrap();
assert_eq!(
identity_secret_hash,
@@ -449,11 +477,13 @@ mod test {
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
);
)
.unwrap();
assert_eq!(
identity_secret_hash,

View File

@@ -1,10 +1,11 @@
#[cfg(test)]
mod test {
use ark_std::{rand::thread_rng, UniformRand};
use rand::Rng;
use rln::circuit::{TEST_RESOURCES_FOLDER, TEST_TREE_HEIGHT};
use rln::poseidon_hash::poseidon_hash;
use rln::protocol::{compute_tree_root, deserialize_identity_tuple, hash_to_field};
use rln::public::RLN;
use rln::circuit::{Fr, TEST_RESOURCES_FOLDER, TEST_TREE_HEIGHT};
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
use rln::protocol::{compute_tree_root, deserialize_identity_tuple};
use rln::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
use rln::utils::*;
use std::io::Cursor;
@@ -15,11 +16,11 @@ mod test {
let leaf_index = 3;
let input_buffer = Cursor::new(TEST_RESOURCES_FOLDER);
let mut rln = RLN::new(tree_height, input_buffer);
let mut rln = RLN::new(tree_height, input_buffer).unwrap();
// generate identity
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
let id_commitment = poseidon_hash(&vec![identity_secret_hash]);
let id_commitment = utils_poseidon_hash(&vec![identity_secret_hash]);
// We pass id_commitment as Read buffer to RLN's set_leaf
let mut buffer = Cursor::new(fr_to_bytes_le(&id_commitment));
@@ -37,6 +38,7 @@ mod test {
"0x1984f2e01184aef5cb974640898a5f5c25556554e2b06d99d4841badb8b198cd",
16
)
.unwrap()
);
} else if TEST_TREE_HEIGHT == 19 {
assert_eq!(
@@ -45,6 +47,7 @@ mod test {
"0x219ceb53f2b1b7a6cf74e80d50d44d68ecb4a53c6cc65b25593c8d56343fb1fe",
16
)
.unwrap()
);
} else if TEST_TREE_HEIGHT == 20 {
assert_eq!(
@@ -53,6 +56,7 @@ mod test {
"0x21947ffd0bce0c385f876e7c97d6a42eec5b1fe935aab2f01c1f8a8cbcc356d2",
16
)
.unwrap()
);
}
@@ -61,71 +65,86 @@ mod test {
rln.get_proof(leaf_index, &mut buffer).unwrap();
let buffer_inner = buffer.into_inner();
let (path_elements, read) = bytes_le_to_vec_fr(&buffer_inner);
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec());
let (path_elements, read) = bytes_le_to_vec_fr(&buffer_inner).unwrap();
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec()).unwrap();
// We check correct computation of the path and indexes
let mut expected_path_elements = vec![
str_to_fr(
"0x0000000000000000000000000000000000000000000000000000000000000000",
16,
),
)
.unwrap(),
str_to_fr(
"0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864",
16,
),
)
.unwrap(),
str_to_fr(
"0x1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1",
16,
),
)
.unwrap(),
str_to_fr(
"0x18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238",
16,
),
)
.unwrap(),
str_to_fr(
"0x07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a",
16,
),
)
.unwrap(),
str_to_fr(
"0x2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55",
16,
),
)
.unwrap(),
str_to_fr(
"0x2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78",
16,
),
)
.unwrap(),
str_to_fr(
"0x078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d",
16,
),
)
.unwrap(),
str_to_fr(
"0x2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61",
16,
),
)
.unwrap(),
str_to_fr(
"0x0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747",
16,
),
)
.unwrap(),
str_to_fr(
"0x1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2",
16,
),
)
.unwrap(),
str_to_fr(
"0x1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636",
16,
),
)
.unwrap(),
str_to_fr(
"0x2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a",
16,
),
)
.unwrap(),
str_to_fr(
"0x14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0",
16,
),
)
.unwrap(),
str_to_fr(
"0x190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c",
16,
),
)
.unwrap(),
];
let mut expected_identity_path_index: Vec<u8> =
@@ -137,19 +156,23 @@ mod test {
str_to_fr(
"0x22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92",
16,
),
)
.unwrap(),
str_to_fr(
"0x2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323",
16,
),
)
.unwrap(),
str_to_fr(
"0x2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992",
16,
),
)
.unwrap(),
str_to_fr(
"0x0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f",
16,
),
)
.unwrap(),
]);
expected_identity_path_index.append(&mut vec![0, 0, 0, 0]);
}
@@ -158,7 +181,8 @@ mod test {
expected_path_elements.append(&mut vec![str_to_fr(
"0x1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca",
16,
)]);
)
.unwrap()]);
expected_identity_path_index.append(&mut vec![0]);
}
@@ -192,11 +216,13 @@ mod test {
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
16,
);
)
.unwrap();
assert_eq!(
identity_secret_hash,
@@ -225,19 +251,23 @@ mod test {
let expected_identity_trapdoor_seed_bytes = str_to_fr(
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
16,
);
)
.unwrap();
let expected_identity_nullifier_seed_bytes = str_to_fr(
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
16,
);
)
.unwrap();
let expected_identity_secret_hash_seed_bytes = str_to_fr(
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
16,
);
)
.unwrap();
let expected_id_commitment_seed_bytes = str_to_fr(
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
16,
);
)
.unwrap();
assert_eq!(identity_trapdoor, expected_identity_trapdoor_seed_bytes);
assert_eq!(identity_nullifier, expected_identity_nullifier_seed_bytes);
@@ -250,15 +280,13 @@ mod test {
#[test]
fn test_hash_to_field() {
let rln = RLN::default();
let mut rng = rand::thread_rng();
let mut rng = thread_rng();
let signal: [u8; 32] = rng.gen();
let mut input_buffer = Cursor::new(&signal);
let mut output_buffer = Cursor::new(Vec::<u8>::new());
rln.hash(&mut input_buffer, &mut output_buffer).unwrap();
public_hash(&mut input_buffer, &mut output_buffer).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
@@ -266,4 +294,24 @@ mod test {
assert_eq!(hash1, hash2);
}
#[test]
fn test_poseidon_hash() {
let mut rng = thread_rng();
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
let mut inputs = Vec::with_capacity(number_of_inputs);
for _ in 0..number_of_inputs {
inputs.push(Fr::rand(&mut rng));
}
let expected_hash = utils_poseidon_hash(&inputs);
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs).unwrap());
let mut output_buffer = Cursor::new(Vec::<u8>::new());
public_poseidon_hash(&mut input_buffer, &mut output_buffer).unwrap();
let serialized_hash = output_buffer.into_inner();
let (hash, _) = bytes_le_to_fr(&serialized_hash);
assert_eq!(hash, expected_hash);
}
}

View File

@@ -2,6 +2,7 @@
name = "semaphore-wrapper"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -11,13 +12,12 @@ dylib = [ "wasmer/dylib", "wasmer-engine-dylib", "wasmer-compiler-cranelift" ]
[dependencies]
ark-bn254 = { version = "0.3.0" }
ark-circom = { git = "https://github.com/gakonst/ark-circom", features=["circom-2"] }
ark-circom = { git = "https://github.com/gakonst/ark-circom", features=["circom-2"], rev = "35ce5a9" }
ark-ec = { version = "0.3.0", default-features = false, features = ["parallel"] }
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", features = ["parallel"] }
ark-relations = { version = "0.3.0", default-features = false }
ark-std = { version = "0.3.0", default-features = false, features = ["parallel"] }
color-eyre = "0.5"
num-bigint = { version = "0.4", default-features = false, features = ["rand"] }
color-eyre = "0.6.1"
once_cell = "1.8"
rand = "0.8.4"
semaphore = { git = "https://github.com/worldcoin/semaphore-rs", rev = "ee658c2"}
@@ -32,10 +32,10 @@ rand_chacha = "0.3.1"
serde_json = "1.0.79"
[build-dependencies]
color-eyre = "0.5"
color-eyre = "0.6.1"
wasmer = { version = "2.0" }
wasmer-engine-dylib = { version = "2.2.1", optional = true }
wasmer-compiler-cranelift = { version = "2.2.1", optional = true }
wasmer-compiler-cranelift = { version = "3.1.1", optional = true }
[profile.release]
codegen-units = 1
@@ -47,4 +47,4 @@ opt-level = 3
# Dependencies are optimized, even in a dev build. This improves dev performance
# while having neglible impact on incremental build times.
[profile.dev.package."*"]
opt-level = 3
opt-level = 3

View File

@@ -8,3 +8,11 @@ Goal is also to provide a basic FFI around protocol.rs, which is currently not
in scope for that project.
See that project for more information.
## Build and Test
To build and test, run the following commands within the module folder
```bash
cargo make build
cargo make test
```

View File

@@ -37,7 +37,7 @@ fn build_circuit() -> Result<()> {
.current_dir("./vendor/semaphore")
.status()?
.success()
.then(|| ())
.then_some(())
.ok_or(eyre!("procees returned failure"))?;
Ok(())
};

View File

@@ -50,7 +50,7 @@ fn from_dylib(path: &Path) -> Mutex<WitnessCalculator> {
#[must_use]
pub fn zkey() -> &'static (ProvingKey<Bn254>, ConstraintMatrices<Fr>) {
&*ZKEY
&ZKEY
}
#[cfg(feature = "dylib")]

View File

@@ -12,7 +12,7 @@ use ark_groth16::{
};
use ark_relations::r1cs::SynthesisError;
use ark_std::UniformRand;
use color_eyre::Result;
use color_eyre::{Report, Result};
use ethers_core::types::U256;
use rand::{thread_rng, Rng};
use semaphore::{
@@ -89,7 +89,7 @@ pub enum ProofError {
#[error("Error reading circuit key: {0}")]
CircuitKeyError(#[from] std::io::Error),
#[error("Error producing witness: {0}")]
WitnessError(color_eyre::Report),
WitnessError(Report),
#[error("Error producing proof: {0}")]
SynthesisError(#[from] SynthesisError),
#[error("Error converting public input: {0}")]

View File

@@ -2,13 +2,15 @@
name = "utils"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
[dependencies]
ark-ff = { version = "0.3.0", default-features = false, features = ["asm"] }
num-bigint = { version = "0.4.3", default-features = false, features = ["rand"] }
ark-ff = { version = "=0.4.1", default-features = false, features = ["asm"] }
num-bigint = { version = "=0.4.3", default-features = false, features = ["rand"] }
color-eyre = "=0.6.2"
[dev-dependencies]
ark-bn254 = { version = "0.3.0" }
ark-bn254 = "=0.4.0"
num-traits = "0.2.11"
hex-literal = "0.3.4"
tiny-keccak = { version = "2.0.2", features = ["keccak"] }

View File

@@ -0,0 +1,289 @@
use crate::merkle_tree::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
use color_eyre::{Report, Result};
use std::{
cmp::max,
fmt::Debug,
iter::{once, repeat, successors},
};
////////////////////////////////////////////////////////////
/// Full Merkle Tree Implementation
////////////////////////////////////////////////////////////
/// Merkle tree with all leaf and intermediate hashes stored
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FullMerkleTree<H: Hasher> {
/// The depth of the tree, i.e. the number of levels from leaf to root
depth: usize,
/// The nodes cached from the empty part of the tree (where leaves are set to default).
/// Since the rightmost part of the tree is usually changed much later than its creation,
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
/// and by caching few intermediate nodes to the root computed from default leaves
cached_nodes: Vec<H::Fr>,
/// The tree nodes
nodes: Vec<H::Fr>,
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
// (deletions leave next_index unchanged)
next_index: usize,
}
/// Element of a Merkle proof
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum FullMerkleBranch<H: Hasher> {
/// Left branch taken, value is the right sibling hash.
Left(H::Fr),
/// Right branch taken, value is the left sibling hash.
Right(H::Fr),
}
/// Merkle proof path, bottom to top.
#[derive(Clone, PartialEq, Eq)]
pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
/// Implementations
impl<H: Hasher> ZerokitMerkleTree<H> for FullMerkleTree<H> {
type Proof = FullMerkleProof<H>;
fn default(depth: usize) -> Self {
FullMerkleTree::<H>::new(depth, H::default_leaf())
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(depth: usize, initial_leaf: H::Fr) -> Self {
// Compute cache node values, leaf to root
let cached_nodes = successors(Some(initial_leaf), |prev| Some(H::hash(&[*prev, *prev])))
.take(depth + 1)
.collect::<Vec<_>>();
// Compute node values
let nodes = cached_nodes
.iter()
.rev()
.enumerate()
.flat_map(|(levels, hash)| repeat(hash).take(1 << levels))
.cloned()
.collect::<Vec<_>>();
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
let next_index = 0;
Self {
depth,
cached_nodes,
nodes,
next_index,
}
}
// Returns the depth of the tree
fn depth(&self) -> usize {
self.depth
}
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
fn capacity(&self) -> usize {
1 << self.depth
}
// Returns the total number of leaves set
fn leaves_set(&mut self) -> usize {
self.next_index
}
#[must_use]
// Returns the root of the tree
fn root(&self) -> H::Fr {
self.nodes[0]
}
// Sets a leaf at the specified tree index
fn set(&mut self, leaf: usize, hash: H::Fr) -> Result<()> {
self.set_range(leaf, once(hash))?;
self.next_index = max(self.next_index, leaf + 1);
Ok(())
}
// Sets tree nodes, starting from start index
// Function proper of FullMerkleTree implementation
fn set_range<I: IntoIterator<Item = H::Fr>>(&mut self, start: usize, hashes: I) -> Result<()> {
let index = self.capacity() + start - 1;
let mut count = 0;
// first count number of hashes, and check that they fit in the tree
// then insert into the tree
let hashes = hashes.into_iter().collect::<Vec<_>>();
if hashes.len() + start > self.capacity() {
return Err(Report::msg("provided hashes do not fit in the tree"));
}
hashes.into_iter().for_each(|hash| {
self.nodes[index + count] = hash;
count += 1;
});
if count != 0 {
self.update_nodes(index, index + (count - 1))?;
self.next_index = max(self.next_index, start + count);
}
Ok(())
}
// Sets a leaf at the next available index
fn update_next(&mut self, leaf: H::Fr) -> Result<()> {
self.set(self.next_index, leaf)?;
Ok(())
}
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
fn delete(&mut self, index: usize) -> Result<()> {
// We reset the leaf only if we previously set a leaf at that index
if index < self.next_index {
self.set(index, H::default_leaf())?;
}
Ok(())
}
// Computes a merkle proof the the leaf at the specified index
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>> {
if leaf >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
}
let mut index = self.capacity() + leaf - 1;
let mut path = Vec::with_capacity(self.depth + 1);
while let Some(parent) = self.parent(index) {
// Add proof for node at index to parent
path.push(match index & 1 {
1 => FullMerkleBranch::Left(self.nodes[index + 1]),
0 => FullMerkleBranch::Right(self.nodes[index - 1]),
_ => unreachable!(),
});
index = parent;
}
Ok(FullMerkleProof(path))
}
// Verifies a Merkle proof with respect to the input leaf and the tree root
fn verify(&self, hash: &H::Fr, proof: &FullMerkleProof<H>) -> Result<bool> {
Ok(proof.compute_root_from(hash) == self.root())
}
}
impl<H: Hasher> FullMerkleTree<H>
where
H: Hasher,
{
// Utilities for updating the tree nodes
/// For a given node index, return the parent node index
/// Returns None if there is no parent (root node)
fn parent(&self, index: usize) -> Option<usize> {
if index == 0 {
None
} else {
Some(((index + 1) >> 1) - 1)
}
}
/// For a given node index, return index of the first (left) child.
fn first_child(&self, index: usize) -> usize {
(index << 1) + 1
}
fn levels(&self, index: usize) -> usize {
// `n.next_power_of_two()` will return `n` iff `n` is a power of two.
// The extra offset corrects this.
(index + 2).next_power_of_two().trailing_zeros() as usize - 1
}
fn update_nodes(&mut self, start: usize, end: usize) -> Result<()> {
if self.levels(start) != self.levels(end) {
return Err(Report::msg("self.levels(start) != self.levels(end)"));
}
if let (Some(start), Some(end)) = (self.parent(start), self.parent(end)) {
for parent in start..=end {
let child = self.first_child(parent);
self.nodes[parent] = H::hash(&[self.nodes[child], self.nodes[child + 1]]);
}
self.update_nodes(start, end)?;
}
Ok(())
}
}
impl<H: Hasher> ZerokitMerkleProof<H> for FullMerkleProof<H> {
type Index = u8;
#[must_use]
// Returns the length of a Merkle proof
fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
fn leaf_index(&self) -> usize {
self.0.iter().rev().fold(0, |index, branch| match branch {
FullMerkleBranch::Left(_) => index << 1,
FullMerkleBranch::Right(_) => (index << 1) + 1,
})
}
#[must_use]
/// Returns the path elements forming a Merkle proof
fn get_path_elements(&self) -> Vec<H::Fr> {
self.0
.iter()
.map(|x| match x {
FullMerkleBranch::Left(value) | FullMerkleBranch::Right(value) => *value,
})
.collect()
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
fn get_path_index(&self) -> Vec<Self::Index> {
self.0
.iter()
.map(|branch| match branch {
FullMerkleBranch::Left(_) => 0,
FullMerkleBranch::Right(_) => 1,
})
.collect()
}
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
#[must_use]
fn compute_root_from(&self, hash: &H::Fr) -> H::Fr {
self.0.iter().fold(*hash, |hash, branch| match branch {
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),
FullMerkleBranch::Right(sibling) => H::hash(&[*sibling, hash]),
})
}
}
// Debug formatting for printing a (Full) Merkle Proof Branch
impl<H> Debug for FullMerkleBranch<H>
where
H: Hasher,
H::Fr: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Left(arg0) => f.debug_tuple("Left").field(arg0).finish(),
Self::Right(arg0) => f.debug_tuple("Right").field(arg0).finish(),
}
}
}
// Debug formatting for printing a (Full) Merkle Proof
impl<H> Debug for FullMerkleProof<H>
where
H: Hasher,
H::Fr: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Proof").field(&self.0).finish()
}
}

View File

@@ -13,21 +13,13 @@
//! * Disk based storage backend (using mmaped files should be easy)
//! * Implement serialization for tree and Merkle proof
#![allow(dead_code)]
use std::collections::HashMap;
use std::io;
use std::{
cmp::max,
fmt::Debug,
iter::{once, repeat, successors},
};
use color_eyre::Result;
/// In the Hasher trait we define the node type, the default leaf
/// and the hash function used to initialize a Merkle Tree implementation
pub trait Hasher {
/// Type of the leaf and tree node
type Fr: Copy + Clone + Eq;
type Fr: Clone + Copy + Eq;
/// Returns the default tree leaf
fn default_leaf() -> Self::Fr;
@@ -36,539 +28,39 @@ pub trait Hasher {
fn hash(input: &[Self::Fr]) -> Self::Fr;
}
////////////////////////////////////////////////////////////
/// Optimal Merkle Tree Implementation
////////////////////////////////////////////////////////////
/// The Merkle tree structure
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OptimalMerkleTree<H>
/// In the ZerokitMerkleTree trait we define the methods that are required to be implemented by a Merkle tree
/// Including, OptimalMerkleTree, FullMerkleTree, Pmtree
pub trait ZerokitMerkleTree<H: Hasher>
where
H: Hasher,
{
/// The depth of the tree, i.e. the number of levels from leaf to root
depth: usize,
type Proof;
/// The nodes cached from the empty part of the tree (where leaves are set to default).
/// Since the rightmost part of the tree is usually changed much later than its creation,
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
/// and by caching few intermediate nodes to the root computed from default leaves
cached_nodes: Vec<H::Fr>,
/// The tree nodes
nodes: HashMap<(usize, usize), H::Fr>,
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
// (deletions leave next_index unchanged)
next_index: usize,
fn default(depth: usize) -> Self;
fn new(depth: usize, default_leaf: H::Fr) -> Self;
fn depth(&self) -> usize;
fn capacity(&self) -> usize;
fn leaves_set(&mut self) -> usize;
fn root(&self) -> H::Fr;
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<()>;
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<()>
where
I: IntoIterator<Item = H::Fr>;
fn update_next(&mut self, leaf: H::Fr) -> Result<()>;
fn delete(&mut self, index: usize) -> Result<()>;
fn proof(&self, index: usize) -> Result<Self::Proof>;
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool>;
}
/// The Merkle proof
/// Contains a vector of (node, branch_index) that defines the proof path elements and branch direction (1 or 0)
#[derive(Clone, PartialEq, Eq)]
pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
/// Implementations
impl<H: Hasher> OptimalMerkleTree<H> {
pub fn default(depth: usize) -> Self {
OptimalMerkleTree::<H>::new(depth, H::default_leaf())
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
pub fn new(depth: usize, default_leaf: H::Fr) -> Self {
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
cached_nodes.push(default_leaf);
for i in 0..depth {
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
}
cached_nodes.reverse();
OptimalMerkleTree {
cached_nodes: cached_nodes.clone(),
depth: depth,
nodes: HashMap::new(),
next_index: 0,
}
}
// Returns the depth of the tree
pub fn depth(&self) -> usize {
self.depth
}
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
pub fn capacity(&self) -> usize {
1 << self.depth
}
// Returns the total number of leaves set
pub fn leaves_set(&mut self) -> usize {
self.next_index
}
#[must_use]
// Returns the root of the tree
pub fn root(&self) -> H::Fr {
self.get_node(0, 0)
}
// Sets a leaf at the specified tree index
pub fn set(&mut self, index: usize, leaf: H::Fr) -> io::Result<()> {
if index >= self.capacity() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"index exceeds set size",
));
}
self.nodes.insert((self.depth, index), leaf);
self.recalculate_from(index);
self.next_index = max(self.next_index, index + 1);
Ok(())
}
// Sets multiple leaves from the specified tree index
pub fn set_range<I: IntoIterator<Item = H::Fr>>(
&mut self,
start: usize,
leaves: I,
) -> io::Result<()> {
let leaves = leaves.into_iter().collect::<Vec<_>>();
// check if the range is valid
if start + leaves.len() > self.capacity() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"provided range exceeds set size",
));
}
for (i, leaf) in leaves.iter().enumerate() {
self.nodes.insert((self.depth, start + i), *leaf);
self.recalculate_from(start + i);
}
self.next_index = max(self.next_index, start + leaves.len());
Ok(())
}
// Sets a leaf at the next available index
pub fn update_next(&mut self, leaf: H::Fr) -> io::Result<()> {
self.set(self.next_index, leaf)?;
Ok(())
}
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
pub fn delete(&mut self, index: usize) -> io::Result<()> {
// We reset the leaf only if we previously set a leaf at that index
if index < self.next_index {
self.set(index, H::default_leaf())?;
}
Ok(())
}
// Computes a merkle proof the the leaf at the specified index
pub fn proof(&self, index: usize) -> io::Result<OptimalMerkleProof<H>> {
if index >= self.capacity() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"index exceeds set size",
));
}
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
let mut i = index;
let mut depth = self.depth;
loop {
i ^= 1;
witness.push((self.get_node(depth, i), (1 - (i & 1)).try_into().unwrap()));
i >>= 1;
depth -= 1;
if depth == 0 {
break;
}
}
assert_eq!(i, 0);
Ok(OptimalMerkleProof(witness))
}
// Verifies a Merkle proof with respect to the input leaf and the tree root
pub fn verify(&self, leaf: &H::Fr, witness: &OptimalMerkleProof<H>) -> io::Result<bool> {
if witness.length() != self.depth {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"witness length doesn't match tree depth",
));
}
let expected_root = witness.compute_root_from(leaf);
Ok(expected_root.eq(&self.root()))
}
// Utilities for updating the tree nodes
fn get_node(&self, depth: usize, index: usize) -> H::Fr {
let node = *self
.nodes
.get(&(depth, index))
.unwrap_or_else(|| &self.cached_nodes[depth]);
node
}
fn get_leaf(&self, index: usize) -> H::Fr {
self.get_node(self.depth, index)
}
fn hash_couple(&mut self, depth: usize, index: usize) -> H::Fr {
let b = index & !1;
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
}
fn recalculate_from(&mut self, index: usize) {
let mut i = index;
let mut depth = self.depth;
loop {
let h = self.hash_couple(depth, i);
i >>= 1;
depth -= 1;
self.nodes.insert((depth, i), h);
if depth == 0 {
break;
}
}
assert_eq!(depth, 0);
assert_eq!(i, 0);
}
}
impl<H: Hasher> OptimalMerkleProof<H> {
#[must_use]
// Returns the length of a Merkle proof
pub fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
pub fn leaf_index(&self) -> usize {
// In current implementation the path indexes in a proof correspond to the binary representation of the leaf index
let mut binary_repr = self.get_path_index();
binary_repr.reverse();
binary_repr
.into_iter()
.fold(0, |acc, digit| (acc << 1) + usize::from(digit))
}
#[must_use]
/// Returns the path elements forming a Merkle proof
pub fn get_path_elements(&self) -> Vec<H::Fr> {
self.0.iter().map(|x| x.0).collect()
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
pub fn get_path_index(&self) -> Vec<u8> {
self.0.iter().map(|x| x.1).collect()
}
#[must_use]
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
pub fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
let mut acc: H::Fr = *leaf;
for w in self.0.iter() {
if w.1 == 0 {
acc = H::hash(&[acc, w.0]);
} else {
acc = H::hash(&[w.0, acc]);
}
}
acc
}
}
// Debug formatting for printing a (Optimal) Merkle Proof
impl<H> Debug for OptimalMerkleProof<H>
pub trait ZerokitMerkleProof<H: Hasher>
where
H: Hasher,
H::Fr: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Proof").field(&self.0).finish()
}
}
////////////////////////////////////////////////////////////
/// Full Merkle Tree Implementation
////////////////////////////////////////////////////////////
/// Merkle tree with all leaf and intermediate hashes stored
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FullMerkleTree<H: Hasher> {
/// The depth of the tree, i.e. the number of levels from leaf to root
depth: usize,
/// The nodes cached from the empty part of the tree (where leaves are set to default).
/// Since the rightmost part of the tree is usually changed much later than its creation,
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
/// and by caching few intermediate nodes to the root computed from default leaves
cached_nodes: Vec<H::Fr>,
/// The tree nodes
nodes: Vec<H::Fr>,
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
// (deletions leave next_index unchanged)
next_index: usize,
}
/// Element of a Merkle proof
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum FullMerkleBranch<H: Hasher> {
/// Left branch taken, value is the right sibling hash.
Left(H::Fr),
/// Right branch taken, value is the left sibling hash.
Right(H::Fr),
}
/// Merkle proof path, bottom to top.
#[derive(Clone, PartialEq, Eq)]
pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
/// Implementations
impl<H: Hasher> FullMerkleTree<H> {
pub fn default(depth: usize) -> Self {
FullMerkleTree::<H>::new(depth, H::default_leaf())
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
pub fn new(depth: usize, initial_leaf: H::Fr) -> Self {
// Compute cache node values, leaf to root
let cached_nodes = successors(Some(initial_leaf), |prev| Some(H::hash(&[*prev, *prev])))
.take(depth + 1)
.collect::<Vec<_>>();
// Compute node values
let nodes = cached_nodes
.iter()
.rev()
.enumerate()
.flat_map(|(levels, hash)| repeat(hash).take(1 << levels))
.cloned()
.collect::<Vec<_>>();
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
let next_index = 0;
Self {
depth,
cached_nodes,
nodes,
next_index,
}
}
// Returns the depth of the tree
pub fn depth(&self) -> usize {
self.depth
}
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
pub fn capacity(&self) -> usize {
1 << self.depth
}
// Returns the total number of leaves set
pub fn leaves_set(&mut self) -> usize {
self.next_index
}
#[must_use]
// Returns the root of the tree
pub fn root(&self) -> H::Fr {
self.nodes[0]
}
// Sets a leaf at the specified tree index
pub fn set(&mut self, leaf: usize, hash: H::Fr) -> io::Result<()> {
self.set_range(leaf, once(hash))?;
self.next_index = max(self.next_index, leaf + 1);
Ok(())
}
// Sets tree nodes, starting from start index
// Function proper of FullMerkleTree implementation
fn set_range<I: IntoIterator<Item = H::Fr>>(
&mut self,
start: usize,
hashes: I,
) -> io::Result<()> {
let index = self.capacity() + start - 1;
let mut count = 0;
// first count number of hashes, and check that they fit in the tree
// then insert into the tree
let hashes = hashes.into_iter().collect::<Vec<_>>();
if hashes.len() + start > self.capacity() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"provided hashes do not fit in the tree",
));
}
hashes.into_iter().for_each(|hash| {
self.nodes[index + count] = hash;
count += 1;
});
if count != 0 {
self.update_nodes(index, index + (count - 1));
self.next_index = max(self.next_index, start + count);
}
Ok(())
}
// Sets a leaf at the next available index
pub fn update_next(&mut self, leaf: H::Fr) -> io::Result<()> {
self.set(self.next_index, leaf)?;
Ok(())
}
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
pub fn delete(&mut self, index: usize) -> io::Result<()> {
// We reset the leaf only if we previously set a leaf at that index
if index < self.next_index {
self.set(index, H::default_leaf())?;
}
Ok(())
}
// Computes a merkle proof the the leaf at the specified index
pub fn proof(&self, leaf: usize) -> io::Result<FullMerkleProof<H>> {
if leaf >= self.capacity() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"index exceeds set size",
));
}
let mut index = self.capacity() + leaf - 1;
let mut path = Vec::with_capacity(self.depth + 1);
while let Some(parent) = self.parent(index) {
// Add proof for node at index to parent
path.push(match index & 1 {
1 => FullMerkleBranch::Left(self.nodes[index + 1]),
0 => FullMerkleBranch::Right(self.nodes[index - 1]),
_ => unreachable!(),
});
index = parent;
}
Ok(FullMerkleProof(path))
}
// Verifies a Merkle proof with respect to the input leaf and the tree root
pub fn verify(&self, hash: &H::Fr, proof: &FullMerkleProof<H>) -> io::Result<bool> {
Ok(proof.compute_root_from(hash) == self.root())
}
// Utilities for updating the tree nodes
/// For a given node index, return the parent node index
/// Returns None if there is no parent (root node)
fn parent(&self, index: usize) -> Option<usize> {
if index == 0 {
None
} else {
Some(((index + 1) >> 1) - 1)
}
}
/// For a given node index, return index of the first (left) child.
fn first_child(&self, index: usize) -> usize {
(index << 1) + 1
}
fn levels(&self, index: usize) -> usize {
// `n.next_power_of_two()` will return `n` iff `n` is a power of two.
// The extra offset corrects this.
(index + 2).next_power_of_two().trailing_zeros() as usize - 1
}
fn update_nodes(&mut self, start: usize, end: usize) {
debug_assert_eq!(self.levels(start), self.levels(end));
if let (Some(start), Some(end)) = (self.parent(start), self.parent(end)) {
for parent in start..=end {
let child = self.first_child(parent);
self.nodes[parent] = H::hash(&[self.nodes[child], self.nodes[child + 1]]);
}
self.update_nodes(start, end);
}
}
}
impl<H: Hasher> FullMerkleProof<H> {
#[must_use]
// Returns the length of a Merkle proof
pub fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
pub fn leaf_index(&self) -> usize {
self.0.iter().rev().fold(0, |index, branch| match branch {
FullMerkleBranch::Left(_) => index << 1,
FullMerkleBranch::Right(_) => (index << 1) + 1,
})
}
#[must_use]
/// Returns the path elements forming a Merkle proof
pub fn get_path_elements(&self) -> Vec<H::Fr> {
self.0
.iter()
.map(|x| match x {
FullMerkleBranch::Left(value) | FullMerkleBranch::Right(value) => *value,
})
.collect()
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
pub fn get_path_index(&self) -> Vec<u8> {
self.0
.iter()
.map(|branch| match branch {
FullMerkleBranch::Left(_) => 0,
FullMerkleBranch::Right(_) => 1,
})
.collect()
}
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
#[must_use]
pub fn compute_root_from(&self, hash: &H::Fr) -> H::Fr {
self.0.iter().fold(*hash, |hash, branch| match branch {
FullMerkleBranch::Left(sibling) => H::hash(&[hash, *sibling]),
FullMerkleBranch::Right(sibling) => H::hash(&[*sibling, hash]),
})
}
}
// Debug formatting for printing a (Full) Merkle Proof Branch
impl<H> Debug for FullMerkleBranch<H>
where
H: Hasher,
H::Fr: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Left(arg0) => f.debug_tuple("Left").field(arg0).finish(),
Self::Right(arg0) => f.debug_tuple("Right").field(arg0).finish(),
}
}
}
// Debug formatting for printing a (Full) Merkle Proof
impl<H> Debug for FullMerkleProof<H>
where
H: Hasher,
H::Fr: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Proof").field(&self.0).finish()
}
type Index;
fn length(&self) -> usize;
fn leaf_index(&self) -> usize;
fn get_path_elements(&self) -> Vec<H::Fr>;
fn get_path_index(&self) -> Vec<Self::Index>;
fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr;
}

View File

@@ -1,2 +1,7 @@
pub mod full_merkle_tree;
#[allow(clippy::module_inception)]
pub mod merkle_tree;
pub mod optimal_merkle_tree;
pub use self::full_merkle_tree::*;
pub use self::merkle_tree::*;
pub use self::optimal_merkle_tree::*;

View File

@@ -0,0 +1,266 @@
use crate::merkle_tree::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
use color_eyre::{Report, Result};
use std::collections::HashMap;
use std::{cmp::max, fmt::Debug};
////////////////////////////////////////////////////////////
/// Optimal Merkle Tree Implementation
////////////////////////////////////////////////////////////
/// The Merkle tree structure
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OptimalMerkleTree<H>
where
H: Hasher,
{
/// The depth of the tree, i.e. the number of levels from leaf to root
depth: usize,
/// The nodes cached from the empty part of the tree (where leaves are set to default).
/// Since the rightmost part of the tree is usually changed much later than its creation,
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
/// and by caching few intermediate nodes to the root computed from default leaves
cached_nodes: Vec<H::Fr>,
/// The tree nodes
nodes: HashMap<(usize, usize), H::Fr>,
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
// (deletions leave next_index unchanged)
next_index: usize,
}
/// The Merkle proof
/// Contains a vector of (node, branch_index) that defines the proof path elements and branch direction (1 or 0)
#[derive(Clone, PartialEq, Eq)]
pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
/// Implementations
impl<H: Hasher> ZerokitMerkleTree<H> for OptimalMerkleTree<H>
where
H: Hasher,
{
type Proof = OptimalMerkleProof<H>;
fn default(depth: usize) -> Self {
OptimalMerkleTree::<H>::new(depth, H::default_leaf())
}
/// Creates a new `MerkleTree`
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
fn new(depth: usize, default_leaf: H::Fr) -> Self {
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
cached_nodes.push(default_leaf);
for i in 0..depth {
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
}
cached_nodes.reverse();
OptimalMerkleTree {
cached_nodes: cached_nodes.clone(),
depth,
nodes: HashMap::new(),
next_index: 0,
}
}
// Returns the depth of the tree
fn depth(&self) -> usize {
self.depth
}
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
fn capacity(&self) -> usize {
1 << self.depth
}
// Returns the total number of leaves set
fn leaves_set(&mut self) -> usize {
self.next_index
}
#[must_use]
// Returns the root of the tree
fn root(&self) -> H::Fr {
self.get_node(0, 0)
}
// Sets a leaf at the specified tree index
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<()> {
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
}
self.nodes.insert((self.depth, index), leaf);
self.recalculate_from(index)?;
self.next_index = max(self.next_index, index + 1);
Ok(())
}
// Sets multiple leaves from the specified tree index
fn set_range<I: IntoIterator<Item = H::Fr>>(&mut self, start: usize, leaves: I) -> Result<()> {
let leaves = leaves.into_iter().collect::<Vec<_>>();
// check if the range is valid
if start + leaves.len() > self.capacity() {
return Err(Report::msg("provided range exceeds set size"));
}
for (i, leaf) in leaves.iter().enumerate() {
self.nodes.insert((self.depth, start + i), *leaf);
self.recalculate_from(start + i)?;
}
self.next_index = max(self.next_index, start + leaves.len());
Ok(())
}
// Sets a leaf at the next available index
fn update_next(&mut self, leaf: H::Fr) -> Result<()> {
self.set(self.next_index, leaf)?;
Ok(())
}
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
fn delete(&mut self, index: usize) -> Result<()> {
// We reset the leaf only if we previously set a leaf at that index
if index < self.next_index {
self.set(index, H::default_leaf())?;
}
Ok(())
}
// Computes a merkle proof the the leaf at the specified index
fn proof(&self, index: usize) -> Result<Self::Proof> {
if index >= self.capacity() {
return Err(Report::msg("index exceeds set size"));
}
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
let mut i = index;
let mut depth = self.depth;
loop {
i ^= 1;
witness.push((self.get_node(depth, i), (1 - (i & 1)).try_into().unwrap()));
i >>= 1;
depth -= 1;
if depth == 0 {
break;
}
}
if i != 0 {
Err(Report::msg("i != 0"))
} else {
Ok(OptimalMerkleProof(witness))
}
}
// Verifies a Merkle proof with respect to the input leaf and the tree root
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool> {
if witness.length() != self.depth {
return Err(Report::msg("witness length doesn't match tree depth"));
}
let expected_root = witness.compute_root_from(leaf);
Ok(expected_root.eq(&self.root()))
}
}
impl<H: Hasher> OptimalMerkleTree<H>
where
H: Hasher,
{
// Utilities for updating the tree nodes
fn get_node(&self, depth: usize, index: usize) -> H::Fr {
let node = *self
.nodes
.get(&(depth, index))
.unwrap_or_else(|| &self.cached_nodes[depth]);
node
}
pub fn get_leaf(&self, index: usize) -> H::Fr {
self.get_node(self.depth, index)
}
fn hash_couple(&mut self, depth: usize, index: usize) -> H::Fr {
let b = index & !1;
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
}
fn recalculate_from(&mut self, index: usize) -> Result<()> {
let mut i = index;
let mut depth = self.depth;
loop {
let h = self.hash_couple(depth, i);
i >>= 1;
depth -= 1;
self.nodes.insert((depth, i), h);
if depth == 0 {
break;
}
}
if depth != 0 {
return Err(Report::msg("did not reach the depth"));
}
if i != 0 {
return Err(Report::msg("did not go through all indexes"));
}
Ok(())
}
}
impl<H: Hasher> ZerokitMerkleProof<H> for OptimalMerkleProof<H>
where
H: Hasher,
{
type Index = u8;
#[must_use]
// Returns the length of a Merkle proof
fn length(&self) -> usize {
self.0.len()
}
/// Computes the leaf index corresponding to a Merkle proof
#[must_use]
fn leaf_index(&self) -> usize {
// In current implementation the path indexes in a proof correspond to the binary representation of the leaf index
let mut binary_repr = self.get_path_index();
binary_repr.reverse();
binary_repr
.into_iter()
.fold(0, |acc, digit| (acc << 1) + usize::from(digit))
}
#[must_use]
/// Returns the path elements forming a Merkle proof
fn get_path_elements(&self) -> Vec<H::Fr> {
self.0.iter().map(|x| x.0).collect()
}
/// Returns the path indexes forming a Merkle proof
#[must_use]
fn get_path_index(&self) -> Vec<u8> {
self.0.iter().map(|x| x.1).collect()
}
#[must_use]
/// Computes the Merkle root corresponding by iteratively hashing a Merkle proof with a given input leaf
fn compute_root_from(&self, leaf: &H::Fr) -> H::Fr {
let mut acc: H::Fr = *leaf;
for w in self.0.iter() {
if w.1 == 0 {
acc = H::hash(&[acc, w.0]);
} else {
acc = H::hash(&[w.0, acc]);
}
}
acc
}
}
// Debug formatting for printing a (Optimal) Merkle Proof
impl<H> Debug for OptimalMerkleProof<H>
where
H: Hasher,
H::Fr: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Proof").field(&self.0).finish()
}
}

View File

@@ -11,7 +11,7 @@
#![allow(dead_code)]
use ark_ff::{FpParameters, PrimeField};
use ark_ff::PrimeField;
use num_bigint::BigUint;
pub struct PoseidonGrainLFSR {
@@ -88,8 +88,8 @@ impl PoseidonGrainLFSR {
}
// b50, ..., b79 are set to 1
for i in 50..=79 {
state[i] = true;
for item in state.iter_mut().skip(50) {
*item = true;
}
let head = 0;
@@ -111,7 +111,7 @@ impl PoseidonGrainLFSR {
let mut new_bit = self.update();
// Loop until the first bit is true
while new_bit == false {
while !new_bit {
// Discard the second bit
let _ = self.update();
// Obtain another first bit
@@ -129,8 +129,8 @@ impl PoseidonGrainLFSR {
&mut self,
num_elems: usize,
) -> Vec<F> {
assert_eq!(F::Params::MODULUS_BITS as u64, self.prime_num_bits);
let modulus: BigUint = F::Params::MODULUS.into();
assert_eq!(F::MODULUS_BIT_SIZE as u64, self.prime_num_bits);
let modulus: BigUint = F::MODULUS.into();
let mut res = Vec::new();
for _ in 0..num_elems {
@@ -163,7 +163,7 @@ impl PoseidonGrainLFSR {
}
pub fn get_field_elements_mod_p<F: PrimeField>(&mut self, num_elems: usize) -> Vec<F> {
assert_eq!(F::Params::MODULUS_BITS as u64, self.prime_num_bits);
assert_eq!(F::MODULUS_BIT_SIZE as u64, self.prime_num_bits);
let mut res = Vec::new();
for _ in 0..num_elems {
@@ -263,8 +263,8 @@ pub fn find_poseidon_ark_and_mds<F: PrimeField>(
let ys = lfsr.get_field_elements_mod_p::<F>(rate);
for i in 0..(rate) {
for j in 0..(rate) {
mds[i][j] = (xs[i] + &ys[j]).inverse().unwrap();
for (j, ys_item) in ys.iter().enumerate().take(rate) {
mds[i][j] = (xs[i] + ys_item).inverse().unwrap();
}
}

View File

@@ -4,7 +4,7 @@
// and adapted to work over arkworks field traits and custom data structures
use crate::poseidon_constants::find_poseidon_ark_and_mds;
use ark_ff::{FpParameters, PrimeField};
use ark_ff::PrimeField;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RoundParamenters<F: PrimeField> {
@@ -28,22 +28,21 @@ impl<F: PrimeField> Poseidon<F> {
pub fn from(poseidon_params: &[(usize, usize, usize, usize)]) -> Self {
let mut read_params = Vec::<RoundParamenters<F>>::new();
for i in 0..poseidon_params.len() {
let (t, n_rounds_f, n_rounds_p, skip_matrices) = poseidon_params[i];
for &(t, n_rounds_f, n_rounds_p, skip_matrices) in poseidon_params {
let (ark, mds) = find_poseidon_ark_and_mds::<F>(
1, // is_field = 1
0, // is_sbox_inverse = 0
F::Params::MODULUS_BITS as u64,
F::MODULUS_BIT_SIZE as u64,
t,
n_rounds_f as u64,
n_rounds_p as u64,
skip_matrices,
);
let rp = RoundParamenters {
t: t,
n_rounds_p: n_rounds_p,
n_rounds_f: n_rounds_f,
skip_matrices: skip_matrices,
t,
n_rounds_p,
n_rounds_f,
skip_matrices,
c: ark,
m: mds,
};
@@ -67,11 +66,11 @@ impl<F: PrimeField> Poseidon<F> {
pub fn sbox(&self, n_rounds_f: usize, n_rounds_p: usize, state: &mut [F], i: usize) {
if (i < n_rounds_f / 2) || (i >= n_rounds_f / 2 + n_rounds_p) {
for j in 0..state.len() {
let aux = state[j];
state[j] *= state[j];
state[j] *= state[j];
state[j] *= aux;
for current_state in &mut state.iter_mut() {
let aux = *current_state;
*current_state *= *current_state;
*current_state *= *current_state;
*current_state *= aux;
}
} else {
let aux = state[0];
@@ -85,9 +84,9 @@ impl<F: PrimeField> Poseidon<F> {
let mut new_state: Vec<F> = Vec::new();
for i in 0..state.len() {
new_state.push(F::zero());
for j in 0..state.len() {
for (j, state_item) in state.iter().enumerate() {
let mut mij = m[i][j];
mij *= state[j];
mij *= state_item;
new_state[i] += mij;
}
}
@@ -116,7 +115,7 @@ impl<F: PrimeField> Poseidon<F> {
self.ark(
&mut state,
&self.round_params[param_index].c,
(i as usize) * self.round_params[param_index].t,
i * self.round_params[param_index].t,
);
self.sbox(
self.round_params[param_index].n_rounds_f,

View File

@@ -3,7 +3,7 @@
mod test {
use hex_literal::hex;
use tiny_keccak::{Hasher as _, Keccak};
use utils::{FullMerkleTree, Hasher, OptimalMerkleTree};
use utils::{FullMerkleTree, Hasher, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
struct Keccak256;