mirror of
https://github.com/vacp2p/zerokit.git
synced 2026-01-09 13:47:58 -05:00
Compare commits
28 Commits
eyre-remov
...
v0.9.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4bb3feb50 | ||
|
|
2386e8732f | ||
|
|
44c6cf3cdd | ||
|
|
eb8eedfdb4 | ||
|
|
57b694db5d | ||
|
|
0b00c639a0 | ||
|
|
7c801a804e | ||
|
|
9da80dd807 | ||
|
|
bcbd6a97af | ||
|
|
6965cf2852 | ||
|
|
578e0507b3 | ||
|
|
bf1e184da9 | ||
|
|
4473688efa | ||
|
|
c80569d518 | ||
|
|
fd99b6af74 | ||
|
|
65f53e3da3 | ||
|
|
042f8a9739 | ||
|
|
baf474e747 | ||
|
|
dc0b31752c | ||
|
|
36013bf4ba | ||
|
|
211b2d4830 | ||
|
|
5f4bcb74ce | ||
|
|
de5fd36add | ||
|
|
19c0f551c8 | ||
|
|
4133f1f8c3 | ||
|
|
149096f7a6 | ||
|
|
7023e85fce | ||
|
|
a4cafa6adc |
191
.github/workflows/ci.yml
vendored
191
.github/workflows/ci.yml
vendored
@@ -9,7 +9,9 @@ on:
|
||||
- "!rln/src/**"
|
||||
- "!rln/resources/**"
|
||||
- "!utils/src/**"
|
||||
- "!rln-wasm-utils/**"
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "!.github/workflows/*.yml"
|
||||
@@ -17,59 +19,56 @@ on:
|
||||
- "!rln/src/**"
|
||||
- "!rln/resources/**"
|
||||
- "!utils/src/**"
|
||||
- "!rln-wasm-utils/**"
|
||||
|
||||
name: Tests
|
||||
name: CI
|
||||
|
||||
jobs:
|
||||
utils-test:
|
||||
# skip tests on draft PRs
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest, macos-latest ]
|
||||
crate: [ utils ]
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
crate: [utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cargo-make test
|
||||
- name: Test utils
|
||||
run: |
|
||||
cargo make test --release
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
rln-test:
|
||||
# skip tests on draft PRs
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest, macos-latest ]
|
||||
crate: [ rln ]
|
||||
feature: [ "default", "arkzkey", "stateless" ]
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
crate: [rln]
|
||||
feature: ["default", "stateless"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
|
||||
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cargo-make test
|
||||
- name: Test rln
|
||||
run: |
|
||||
if [ ${{ matrix.feature }} == default ]; then
|
||||
cargo make test --release
|
||||
@@ -78,91 +77,133 @@ jobs:
|
||||
fi
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
rln-wasm:
|
||||
rln-wasm-test:
|
||||
# skip tests on draft PRs
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest, macos-latest ]
|
||||
feature: [ "default", "arkzkey" ]
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
crate: [rln-wasm]
|
||||
feature: ["default"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test - rln-wasm - ${{ matrix.platform }} - ${{ matrix.feature }}
|
||||
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install Dependencies
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cargo-make build
|
||||
run: |
|
||||
if [ ${{ matrix.feature }} == default ]; then
|
||||
cargo make build
|
||||
else
|
||||
cargo make build_${{ matrix.feature }}
|
||||
fi
|
||||
working-directory: rln-wasm
|
||||
- name: cargo-make test
|
||||
run: |
|
||||
if [ ${{ matrix.feature }} == default ]; then
|
||||
cargo make test --release
|
||||
else
|
||||
cargo make test_${{ matrix.feature }} --release
|
||||
fi
|
||||
working-directory: rln-wasm
|
||||
- name: Build rln-wasm
|
||||
run: cargo make build
|
||||
working-directory: ${{ matrix.crate }}
|
||||
- name: Test rln-wasm on node
|
||||
run: cargo make test --release
|
||||
working-directory: ${{ matrix.crate }}
|
||||
- name: Test rln-wasm on browser
|
||||
run: cargo make test_browser --release
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
rln-wasm-parallel-test:
|
||||
# skip tests on draft PRs
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
crate: [rln-wasm]
|
||||
feature: ["parallel"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install nightly toolchain
|
||||
uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
components: rust-src
|
||||
targets: wasm32-unknown-unknown
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: Build rln-wasm in parallel mode
|
||||
run: cargo make build_parallel
|
||||
working-directory: ${{ matrix.crate }}
|
||||
- name: Test rln-wasm in parallel mode on browser
|
||||
run: cargo make test_parallel --release
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
rln-wasm-utils-test:
|
||||
# skip tests on draft PRs
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest, macos-latest]
|
||||
crate: [rln-wasm-utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: Test - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: Test rln-wasm-utils
|
||||
run: cargo make test --release
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
lint:
|
||||
# run on both ready and draft PRs
|
||||
if: github.event_name == 'push' || (github.event_name == 'pull_request' && !github.event.pull_request.draft)
|
||||
strategy:
|
||||
matrix:
|
||||
# we run lint tests only on ubuntu
|
||||
platform: [ ubuntu-latest ]
|
||||
crate: [ rln, rln-wasm, utils ]
|
||||
platform: [ubuntu-latest]
|
||||
crate: [rln, rln-wasm, rln-wasm-utils, utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: lint - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
name: Lint - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install Dependencies
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cargo fmt
|
||||
- name: Check formatting
|
||||
if: success() || failure()
|
||||
run: cargo fmt -- --check
|
||||
working-directory: ${{ matrix.crate }}
|
||||
- name: cargo clippy
|
||||
- name: Check clippy
|
||||
if: success() || failure()
|
||||
run: |
|
||||
cargo clippy --release
|
||||
cargo clippy --all-targets --release -- -D warnings
|
||||
working-directory: ${{ matrix.crate }}
|
||||
|
||||
benchmark-utils:
|
||||
# run only in pull requests
|
||||
if: github.event_name == 'pull_request'
|
||||
# run only on ready pull requests
|
||||
if: github.event_name == 'pull_request' && !github.event.pull_request.draft
|
||||
strategy:
|
||||
matrix:
|
||||
# we run benchmark tests only on ubuntu
|
||||
platform: [ ubuntu-latest ]
|
||||
crate: [ utils ]
|
||||
platform: [ubuntu-latest]
|
||||
crate: [utils]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: benchmark - ${{ matrix.platform }} - ${{ matrix.crate }}
|
||||
name: Benchmark - ${{ matrix.crate }} - ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: boa-dev/criterion-compare-action@v3
|
||||
with:
|
||||
@@ -170,24 +211,24 @@ jobs:
|
||||
cwd: ${{ matrix.crate }}
|
||||
|
||||
benchmark-rln:
|
||||
# run only in pull requests
|
||||
if: github.event_name == 'pull_request'
|
||||
# run only on ready pull requests
|
||||
if: github.event_name == 'pull_request' && !github.event.pull_request.draft
|
||||
strategy:
|
||||
matrix:
|
||||
# we run benchmark tests only on ubuntu
|
||||
platform: [ ubuntu-latest ]
|
||||
crate: [ rln ]
|
||||
feature: [ "default", "arkzkey" ]
|
||||
platform: [ubuntu-latest]
|
||||
crate: [rln]
|
||||
feature: ["default"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: benchmark - ${{ matrix.platform }} - ${{ matrix.crate }} - ${{ matrix.feature }}
|
||||
name: Benchmark - ${{ matrix.crate }} - ${{ matrix.platform }} - ${{ matrix.feature }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: boa-dev/criterion-compare-action@v3
|
||||
with:
|
||||
branchName: ${{ github.base_ref }}
|
||||
cwd: ${{ matrix.crate }}
|
||||
features: ${{ matrix.feature }}
|
||||
features: ${{ matrix.feature }}
|
||||
|
||||
180
.github/workflows/nightly-release.yml
vendored
180
.github/workflows/nightly-release.yml
vendored
@@ -6,43 +6,45 @@ on:
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
strategy:
|
||||
matrix:
|
||||
feature: [ "default", "arkzkey", "stateless" ]
|
||||
target:
|
||||
- x86_64-unknown-linux-gnu
|
||||
- aarch64-unknown-linux-gnu
|
||||
# - i686-unknown-linux-gnu
|
||||
include:
|
||||
- feature: stateless
|
||||
cargo_args: --exclude rln-cli
|
||||
name: Linux build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
features:
|
||||
- ["stateless"]
|
||||
- ["stateless", "parallel"]
|
||||
- ["pmtree-ft"]
|
||||
- ["pmtree-ft", "parallel"]
|
||||
- ["fullmerkletree"]
|
||||
- ["fullmerkletree", "parallel"]
|
||||
- ["optimalmerkletree"]
|
||||
- ["optimalmerkletree", "parallel"]
|
||||
target: [x86_64-unknown-linux-gnu, aarch64-unknown-linux-gnu]
|
||||
env:
|
||||
FEATURES_CARGO: ${{ join(matrix.features, ',') }}
|
||||
FEATURES_TAG: ${{ join(matrix.features, '-') }}
|
||||
TARGET: ${{ matrix.target }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: ${{ matrix.target }}
|
||||
target: ${{ env.TARGET }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cross build
|
||||
- name: Cross build
|
||||
run: |
|
||||
cross build --release --target ${{ matrix.target }} --features ${{ matrix.feature }} --workspace ${{ matrix.cargo_args }}
|
||||
cross build --release --target $TARGET --no-default-features --features "$FEATURES_CARGO" --workspace
|
||||
mkdir release
|
||||
cp target/${{ matrix.target }}/release/librln* release/
|
||||
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
|
||||
|
||||
cp target/$TARGET/release/librln* release/
|
||||
tar -czvf $TARGET-$FEATURES_TAG-rln.tar.gz release/
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}-${{ matrix.feature }}-archive
|
||||
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
|
||||
name: ${{ env.TARGET }}-${{ env.FEATURES_TAG }}-archive
|
||||
path: ${{ env.TARGET }}-${{ env.FEATURES_TAG }}-rln.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
macos:
|
||||
@@ -50,82 +52,136 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
feature: [ "default", "arkzkey", "stateless" ]
|
||||
target:
|
||||
- x86_64-apple-darwin
|
||||
- aarch64-apple-darwin
|
||||
include:
|
||||
- feature: stateless
|
||||
cargo_args: --exclude rln-cli
|
||||
features:
|
||||
- ["stateless"]
|
||||
- ["stateless", "parallel"]
|
||||
- ["pmtree-ft"]
|
||||
- ["pmtree-ft", "parallel"]
|
||||
- ["fullmerkletree"]
|
||||
- ["fullmerkletree", "parallel"]
|
||||
- ["optimalmerkletree"]
|
||||
- ["optimalmerkletree", "parallel"]
|
||||
target: [x86_64-apple-darwin, aarch64-apple-darwin]
|
||||
env:
|
||||
FEATURES_CARGO: ${{ join(matrix.features, ',') }}
|
||||
FEATURES_TAG: ${{ join(matrix.features, '-') }}
|
||||
TARGET: ${{ matrix.target }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: ${{ matrix.target }}
|
||||
target: ${{ env.TARGET }}
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cross build
|
||||
- name: Cross build
|
||||
run: |
|
||||
cross build --release --target ${{ matrix.target }} --features ${{ matrix.feature }} --workspace ${{ matrix.cargo_args }}
|
||||
cross build --release --target $TARGET --no-default-features --features "$FEATURES_CARGO" --workspace
|
||||
mkdir release
|
||||
cp target/${{ matrix.target }}/release/librln* release/
|
||||
tar -czvf ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz release/
|
||||
|
||||
cp target/$TARGET/release/librln* release/
|
||||
tar -czvf $TARGET-$FEATURES_TAG-rln.tar.gz release/
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}-${{ matrix.feature }}-archive
|
||||
path: ${{ matrix.target }}-${{ matrix.feature }}-rln.tar.gz
|
||||
name: ${{ env.TARGET }}-${{ env.FEATURES_TAG }}-archive
|
||||
path: ${{ env.TARGET }}-${{ env.FEATURES_TAG }}-rln.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
browser-rln-wasm:
|
||||
name: Browser build (RLN WASM)
|
||||
rln-wasm:
|
||||
name: Build rln-wasm
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
feature:
|
||||
- "default"
|
||||
- "parallel"
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: wasm32-unknown-unknown
|
||||
- name: Install nightly toolchain
|
||||
uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
components: rust-src
|
||||
targets: wasm32-unknown-unknown
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: Build rln-wasm package
|
||||
run: |
|
||||
if [[ ${{ matrix.feature }} == *parallel* ]]; then
|
||||
env RUSTFLAGS="-C target-feature=+atomics,+bulk-memory,+mutable-globals" \
|
||||
rustup run nightly wasm-pack build --release --target web --scope waku \
|
||||
--features ${{ matrix.feature }} -Z build-std=panic_abort,std
|
||||
else
|
||||
wasm-pack build --release --target web --scope waku
|
||||
fi
|
||||
|
||||
sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak
|
||||
|
||||
wasm-opt pkg/rln_wasm_bg.wasm -Oz --strip-debug --strip-dwarf \
|
||||
--remove-unused-module-elements --vacuum -o pkg/rln_wasm_bg.wasm
|
||||
|
||||
mkdir release
|
||||
cp -r pkg/* release/
|
||||
tar -czvf rln-wasm-${{ matrix.feature }}.tar.gz release/
|
||||
working-directory: rln-wasm
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: rln-wasm-${{ matrix.feature }}-archive
|
||||
path: rln-wasm/rln-wasm-${{ matrix.feature }}.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
rln-wasm-utils:
|
||||
name: Build rln-wasm-utils
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
targets: wasm32-unknown-unknown
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install dependencies
|
||||
run: make installdeps
|
||||
- name: cross make build
|
||||
- name: Build rln-wasm-utils package
|
||||
run: |
|
||||
cross make build
|
||||
mkdir release
|
||||
cp pkg/** release/
|
||||
tar -czvf browser-rln-wasm.tar.gz release/
|
||||
working-directory: rln-wasm
|
||||
wasm-pack build --release --target web --scope waku
|
||||
|
||||
sed -i.bak 's/rln-wasm-utils/zerokit-rln-wasm-utils/g' pkg/package.json && rm pkg/package.json.bak
|
||||
|
||||
wasm-opt pkg/rln_wasm_utils_bg.wasm -Oz --strip-debug --strip-dwarf \
|
||||
--remove-unused-module-elements --vacuum -o pkg/rln_wasm_utils_bg.wasm
|
||||
|
||||
mkdir release
|
||||
cp -r pkg/* release/
|
||||
tar -czvf rln-wasm-utils.tar.gz release/
|
||||
working-directory: rln-wasm-utils
|
||||
- name: Upload archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: browser-rln-wasm-archive
|
||||
path: rln-wasm/browser-rln-wasm.tar.gz
|
||||
name: rln-wasm-utils-archive
|
||||
path: rln-wasm-utils/rln-wasm-utils.tar.gz
|
||||
retention-days: 2
|
||||
|
||||
prepare-prerelease:
|
||||
name: Prepare pre-release
|
||||
needs: [ linux, macos, browser-rln-wasm ]
|
||||
needs: [linux, macos, rln-wasm, rln-wasm-utils]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
- name: Delete tag
|
||||
uses: dev-drprasad/delete-tag-and-release@v0.2.1
|
||||
with:
|
||||
@@ -133,7 +189,6 @@ jobs:
|
||||
tag_name: nightly
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create prerelease
|
||||
run: |
|
||||
start_tag=$(gh release list -L 2 --exclude-drafts | grep -v nightly | cut -d$'\t' -f3 | sed -n '1p')
|
||||
@@ -145,7 +200,6 @@ jobs:
|
||||
*-archive/*.tar.gz \
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete artifacts
|
||||
uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
|
||||
2
.github/workflows/sync-labels.yml
vendored
2
.github/workflows/sync-labels.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: micnncim/action-label-syncer@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,17 +1,17 @@
|
||||
# Common files to ignore in Rust projects
|
||||
.DS_Store
|
||||
.idea
|
||||
*.log
|
||||
tmp/
|
||||
rln/pmtree_db
|
||||
|
||||
# Generated by Cargo will have compiled files and executables
|
||||
/target
|
||||
|
||||
# Generated by rln-cli
|
||||
rln-cli/database
|
||||
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Generated by Nix
|
||||
result/
|
||||
result
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
205
CONTRIBUTING.md
Normal file
205
CONTRIBUTING.md
Normal file
@@ -0,0 +1,205 @@
|
||||
# Contributing to Zerokit
|
||||
|
||||
Thank you for your interest in contributing to Zerokit!
|
||||
This guide will discuss how the Zerokit team handles [Commits](#commits),
|
||||
[Pull Requests](#pull-requests) and [Merging](#merging).
|
||||
|
||||
**Note:** We won't force external contributors to follow this verbatim.
|
||||
Following these guidelines definitely helps us in accepting your contributions.
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository
|
||||
2. Create a feature branch: `git checkout -b fix/your-bug-fix` or `git checkout -b feat/your-feature-name`
|
||||
3. Make your changes following our guidelines
|
||||
4. Ensure relevant tests pass (see [testing guidelines](#building-and-testing))
|
||||
5. Commit your changes (signed commits are highly encouraged - see [commit guidelines](#commits))
|
||||
6. Push and create a Pull Request
|
||||
|
||||
## Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Install the required dependencies:
|
||||
|
||||
```bash
|
||||
make installdeps
|
||||
```
|
||||
|
||||
Or use Nix:
|
||||
|
||||
```bash
|
||||
nix develop
|
||||
```
|
||||
|
||||
### Building and Testing
|
||||
|
||||
```bash
|
||||
# Build all crates
|
||||
make build
|
||||
|
||||
# Run standard tests
|
||||
make test
|
||||
|
||||
# Module-specific testing
|
||||
cd rln && cargo make test_stateless # Test stateless features
|
||||
cd rln-wasm && cargo make test_browser # Test in browser headless mode
|
||||
cd rln-wasm && cargo make test_parallel # Test parallel features
|
||||
```
|
||||
|
||||
Choose the appropriate test commands based on your changes:
|
||||
|
||||
- Core RLN changes: `make test`
|
||||
- Stateless features: `cargo make test_stateless`
|
||||
- WASM/browser features: `cargo make test_browser`
|
||||
- Parallel computation: `cargo make test_parallel`
|
||||
|
||||
### Tools
|
||||
|
||||
We recommend using the [markdownlint extension](https://marketplace.visualstudio.com/items?itemName=DavidAnson.vscode-markdownlint)
|
||||
for VS Code to maintain consistent documentation formatting.
|
||||
|
||||
## Commits
|
||||
|
||||
We want to keep our commits small and focused.
|
||||
This allows for easily reviewing individual commits and/or
|
||||
splitting up pull requests when they grow too big.
|
||||
Additionally, this allows us to merge smaller changes quicker and release more often.
|
||||
|
||||
**All commits must be GPG signed.**
|
||||
This ensures the authenticity and integrity of contributions.
|
||||
|
||||
### Conventional Commits
|
||||
|
||||
When making the commit, write the commit message
|
||||
following the [Conventional Commits (v1.0.0)](https://www.conventionalcommits.org/en/v1.0.0/) specification.
|
||||
Following this convention allows us to provide an automated release process
|
||||
that also generates a detailed Changelog.
|
||||
|
||||
As described by the specification, our commit messages should be written as:
|
||||
|
||||
```markdown
|
||||
<type>[optional scope]: <description>
|
||||
|
||||
[optional body]
|
||||
|
||||
[optional footer(s)]
|
||||
```
|
||||
|
||||
Some examples of this pattern include:
|
||||
|
||||
```markdown
|
||||
feat(rln): add parallel witness calculation support
|
||||
```
|
||||
|
||||
```markdown
|
||||
fix(rln-wasm): resolve memory leak in browser threading
|
||||
```
|
||||
|
||||
```markdown
|
||||
docs: update RLN protocol flow documentation
|
||||
```
|
||||
|
||||
#### Scopes
|
||||
|
||||
Use scopes to improve the Changelog:
|
||||
|
||||
- `rln` - Core RLN implementation
|
||||
- `rln-cli` - Command-line interface
|
||||
- `rln-wasm` - WebAssembly bindings
|
||||
- `rln-wasm-utils` - WebAssembly utilities
|
||||
- `utils` - Cryptographic utilities (Merkle trees, Poseidon hash)
|
||||
- `ci` - Continuous integration
|
||||
|
||||
#### Breaking Changes
|
||||
|
||||
Mark breaking changes by adding `!` after the type:
|
||||
|
||||
```markdown
|
||||
feat(rln)!: change proof generation API
|
||||
```
|
||||
|
||||
## Pull Requests
|
||||
|
||||
Before creating a pull request, search for related issues.
|
||||
If none exist, create an issue describing the problem you're solving.
|
||||
|
||||
### CI Flow
|
||||
|
||||
Our continuous integration automatically runs when you create a Pull Request:
|
||||
|
||||
- **Build verification**: All crates compile successfully
|
||||
- **Test execution**: Comprehensive testing across all modules and feature combinations
|
||||
- **Code formatting**: `cargo fmt` compliance
|
||||
- **Linting**: `cargo clippy` checks
|
||||
- **Cross-platform builds**: Testing on multiple platforms
|
||||
|
||||
Ensure the following commands pass before submitting:
|
||||
|
||||
```bash
|
||||
# Format code
|
||||
cargo fmt --all
|
||||
|
||||
# Check for common mistakes
|
||||
cargo clippy --all-targets
|
||||
|
||||
# Run all tests
|
||||
make test
|
||||
```
|
||||
|
||||
### Adding Tests
|
||||
|
||||
Include tests for new functionality:
|
||||
|
||||
- **Unit tests** for specific functions
|
||||
- **Integration tests** for broader functionality
|
||||
- **WASM tests** for browser compatibility
|
||||
|
||||
### Typos and Small Changes
|
||||
|
||||
For minor fixes like typos, please report them as issues instead of opening PRs.
|
||||
This helps us manage resources effectively and ensures meaningful contributions.
|
||||
|
||||
## Merging
|
||||
|
||||
We use "squash merging" for all pull requests.
|
||||
This combines all commits into one commit, so keep pull requests small and focused.
|
||||
|
||||
### Requirements
|
||||
|
||||
- CI checks must pass
|
||||
- At least one maintainer review and approval
|
||||
- All review feedback addressed
|
||||
|
||||
### Squash Guidelines
|
||||
|
||||
When squashing, update the commit title to be a proper Conventional Commit and
|
||||
include any other relevant commits in the body:
|
||||
|
||||
```markdown
|
||||
feat(rln): implement parallel witness calculation (#123)
|
||||
|
||||
fix(tests): resolve memory leak in test suite
|
||||
chore(ci): update rust toolchain version
|
||||
```
|
||||
|
||||
## Roadmap Alignment
|
||||
|
||||
Please refer to our [project roadmap](https://roadmap.vac.dev/) for current development priorities.
|
||||
Consider how your changes align with these strategic goals, when contributing.
|
||||
|
||||
## Getting Help
|
||||
|
||||
- **Issues**: Create a GitHub issue for bugs or feature requests
|
||||
- **Discussions**: Use GitHub Discussions for questions
|
||||
- **Documentation**: Check existing docs and unit tests for examples
|
||||
|
||||
## License
|
||||
|
||||
By contributing to Zerokit, you agree that your contributions will be licensed under both MIT and
|
||||
Apache 2.0 licenses, consistent with the project's dual licensing.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Conventional Commits Guide](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- [Project GitHub Repository](https://github.com/vacp2p/zerokit)
|
||||
992
Cargo.lock
generated
992
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[workspace]
|
||||
members = ["rln", "rln-cli", "rln-wasm", "utils"]
|
||||
default-members = ["rln", "rln-cli", "rln-wasm", "utils"]
|
||||
members = ["rln", "utils"]
|
||||
exclude = ["rln-cli", "rln-wasm", "rln-wasm-utils"]
|
||||
resolver = "2"
|
||||
|
||||
# Compilation profile for any non-workspace member.
|
||||
|
||||
32
Makefile
32
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: all installdeps build test bench clean
|
||||
|
||||
all: .pre-build build
|
||||
all: installdeps build
|
||||
|
||||
.fetch-submodules:
|
||||
@git submodule update --init --recursive
|
||||
@@ -13,30 +13,26 @@ endif
|
||||
|
||||
installdeps: .pre-build
|
||||
ifeq ($(shell uname),Darwin)
|
||||
@brew update
|
||||
@brew install cmake ninja
|
||||
@brew install ninja binaryen
|
||||
else ifeq ($(shell uname),Linux)
|
||||
@sudo apt-get update
|
||||
@sudo apt-get install -y cmake ninja-build
|
||||
endif
|
||||
@if [ ! -d "$$HOME/.nvm" ]; then \
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.2/install.sh | bash; \
|
||||
@if [ -f /etc/os-release ] && grep -q "ID=nixos" /etc/os-release; then \
|
||||
echo "Detected NixOS, skipping apt-get installation."; \
|
||||
else \
|
||||
sudo apt-get install -y cmake ninja-build binaryen; \
|
||||
fi
|
||||
@bash -c 'export NVM_DIR="$$HOME/.nvm" && \
|
||||
[ -s "$$NVM_DIR/nvm.sh" ] && \. "$$NVM_DIR/nvm.sh" && \
|
||||
nvm install 22.14.0 && \
|
||||
nvm use 22.14.0'
|
||||
@curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
|
||||
@echo "\033[1;32m>>> Now run this command to activate Node.js 22.14.0: \033[1;33msource $$HOME/.nvm/nvm.sh && nvm use 22.14.0\033[0m"
|
||||
endif
|
||||
@which wasm-pack > /dev/null && wasm-pack --version | grep -q "0.13.1" || cargo install wasm-pack --version=0.13.1
|
||||
@test -s "$$HOME/.nvm/nvm.sh" || curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.2/install.sh | bash
|
||||
@bash -c '. "$$HOME/.nvm/nvm.sh"; [ "$$(node -v 2>/dev/null)" = "v22.14.0" ] || nvm install 22.14.0; nvm use 22.14.0; nvm alias default 22.14.0'
|
||||
|
||||
build: .pre-build
|
||||
build: installdeps
|
||||
@cargo make build
|
||||
|
||||
test: .pre-build
|
||||
test: build
|
||||
@cargo make test
|
||||
|
||||
bench: .pre-build
|
||||
bench: build
|
||||
@cargo make bench
|
||||
|
||||
clean:
|
||||
@cargo clean
|
||||
@cargo clean
|
||||
|
||||
19
README.md
19
README.md
@@ -12,7 +12,8 @@ A collection of Zero Knowledge modules written in Rust and designed to be used i
|
||||
Zerokit provides zero-knowledge cryptographic primitives with a focus on performance, security, and usability.
|
||||
The current focus is on Rate-Limiting Nullifier [RLN](https://github.com/Rate-Limiting-Nullifier) implementation.
|
||||
|
||||
Current implementation is based on the following [specification](https://github.com/vacp2p/rfc-index/blob/main/vac/raw/rln-v2.md)
|
||||
Current implementation is based on the following
|
||||
[specification](https://github.com/vacp2p/rfc-index/blob/main/vac/raw/rln-v2.md)
|
||||
and focused on RLNv2 which allows to set a rate limit for the number of messages that can be sent by a user.
|
||||
|
||||
## Features
|
||||
@@ -24,19 +25,23 @@ and focused on RLNv2 which allows to set a rate limit for the number of messages
|
||||
|
||||
## Architecture
|
||||
|
||||
Zerokit currently focuses on RLN (Rate-Limiting Nullifier) implementation using [Circom](https://iden3.io/circom) circuits through ark-circom, providing an alternative to existing native Rust implementations.
|
||||
Zerokit currently focuses on RLN (Rate-Limiting Nullifier) implementation using [Circom](https://iden3.io/circom)
|
||||
circuits through ark-circom, providing an alternative to existing native Rust implementations.
|
||||
|
||||
## Build and Test
|
||||
|
||||
> [!IMPORTANT]
|
||||
> For WASM support or x32 architecture builds, use version `0.6.1`. The current version has dependency issues for these platforms. WASM support will return in a future release.
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
```bash
|
||||
make installdeps
|
||||
```
|
||||
|
||||
#### Use Nix to install dependencies
|
||||
|
||||
```bash
|
||||
nix develop
|
||||
```
|
||||
|
||||
### Build and Test All Crates
|
||||
|
||||
```bash
|
||||
@@ -69,8 +74,8 @@ The execution graph file used by this code has been generated by means of the sa
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The circom-witnesscalc code fragments have been borrowed instead of depending on this crate,
|
||||
because its types of input and output data were incompatible with the corresponding zerokit code fragments,
|
||||
and circom-witnesscalc has some dependencies, which are redundant for our purpose.
|
||||
> because its types of input and output data were incompatible with the corresponding zerokit code fragments,
|
||||
> and circom-witnesscalc has some dependencies, which are redundant for our purpose.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
23
flake.lock
generated
23
flake.lock
generated
@@ -18,7 +18,28 @@
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
"nixpkgs": "nixpkgs",
|
||||
"rust-overlay": "rust-overlay"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1748399823,
|
||||
"narHash": "sha256-kahD8D5hOXOsGbNdoLLnqCL887cjHkx98Izc37nDjlA=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "d68a69dc71bc19beb3479800392112c2f6218159",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
45
flake.nix
45
flake.nix
@@ -4,9 +4,13 @@
|
||||
inputs = {
|
||||
# Version 24.11
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
|
||||
rust-overlay = {
|
||||
url = "github:oxalica/rust-overlay";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
outputs = { self, nixpkgs, rust-overlay }:
|
||||
let
|
||||
stableSystems = [
|
||||
"x86_64-linux" "aarch64-linux"
|
||||
@@ -15,24 +19,51 @@
|
||||
"i686-windows"
|
||||
];
|
||||
forAllSystems = nixpkgs.lib.genAttrs stableSystems;
|
||||
pkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
|
||||
overlays = [
|
||||
(import rust-overlay)
|
||||
(f: p: { inherit rust-overlay; })
|
||||
];
|
||||
pkgsFor = forAllSystems (system: import nixpkgs { inherit system overlays; });
|
||||
in rec
|
||||
{
|
||||
packages = forAllSystems (system: let
|
||||
pkgs = pkgsFor.${system};
|
||||
buildPackage = pkgs.callPackage ./nix/default.nix;
|
||||
buildRln = (buildPackage { src = self; project = "rln"; }).override;
|
||||
in rec {
|
||||
zerokit-android-arm64 = pkgs.callPackage ./nix/default.nix { target-platform="aarch64-android-prebuilt"; rust-target= "aarch64-linux-android"; };
|
||||
default = zerokit-android-arm64;
|
||||
rln = buildRln { };
|
||||
|
||||
rln-linux-arm64 = buildRln {
|
||||
target-platform = "aarch64-multiplatform";
|
||||
rust-target = "aarch64-unknown-linux-gnu";
|
||||
};
|
||||
|
||||
rln-android-arm64 = buildRln {
|
||||
target-platform = "aarch64-android-prebuilt";
|
||||
rust-target = "aarch64-linux-android";
|
||||
};
|
||||
|
||||
rln-ios-arm64 = buildRln {
|
||||
target-platform = "aarch64-darwin";
|
||||
rust-target = "aarch64-apple-ios";
|
||||
};
|
||||
|
||||
# TODO: Remove legacy name for RLN android library
|
||||
zerokit-android-arm64 = rln-android-arm64;
|
||||
|
||||
default = rln;
|
||||
});
|
||||
|
||||
devShells = forAllSystems (system: let
|
||||
pkgs = pkgsFor.${system};
|
||||
in {
|
||||
default = pkgs.mkShell {
|
||||
inputsFrom = [
|
||||
packages.${system}.default
|
||||
buildInputs = with pkgs; [
|
||||
git cmake cargo-make rustup
|
||||
binaryen ninja gnuplot
|
||||
rust-bin.stable.latest.default
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,35 +1,64 @@
|
||||
{
|
||||
{
|
||||
pkgs,
|
||||
target-platform ? "aarch64-android-prebuilt",
|
||||
rust-target ? "aarch64-linux-android",
|
||||
rust-overlay,
|
||||
project,
|
||||
src ? ../.,
|
||||
release ? true,
|
||||
target-platform ? null,
|
||||
rust-target ? null,
|
||||
features ? null,
|
||||
}:
|
||||
|
||||
pkgs.pkgsCross.${target-platform}.rustPlatform.buildRustPackage {
|
||||
pname = "zerokit";
|
||||
version = "nightly";
|
||||
let
|
||||
# Use cross-compilation if target-platform is specified.
|
||||
targetPlatformPkgs = if target-platform != null
|
||||
then pkgs.pkgsCross.${target-platform}
|
||||
else pkgs;
|
||||
|
||||
src = ../.;
|
||||
rust-bin = rust-overlay.lib.mkRustBin { } targetPlatformPkgs.buildPackages;
|
||||
|
||||
# Use Rust and Cargo versions from rust-overlay.
|
||||
rustPlatform = targetPlatformPkgs.makeRustPlatform {
|
||||
cargo = rust-bin.stable.latest.minimal;
|
||||
rustc = rust-bin.stable.latest.minimal;
|
||||
};
|
||||
in rustPlatform.buildRustPackage {
|
||||
pname = "zerokit";
|
||||
version = if src ? rev then src.rev else "nightly";
|
||||
|
||||
# Improve caching of sources
|
||||
src = builtins.path { path = src; name = "zerokit"; };
|
||||
|
||||
cargoLock = {
|
||||
lockFile = ../Cargo.lock;
|
||||
lockFile = src + "/Cargo.lock";
|
||||
allowBuiltinFetchGit = true;
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkgs.rust-cbindgen ];
|
||||
|
||||
doCheck = false;
|
||||
|
||||
CARGO_HOME = "/tmp";
|
||||
|
||||
buildPhase = ''
|
||||
pushd rln
|
||||
cargo rustc --crate-type=cdylib --release --lib --target=${rust-target}
|
||||
popd
|
||||
cargo build --lib \
|
||||
${if release then "--release" else ""} \
|
||||
${if rust-target != null then "--target=${rust-target}" else ""} \
|
||||
${if features != null then "--features=${features}" else ""} \
|
||||
--manifest-path ${project}/Cargo.toml
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/
|
||||
cp ./target/${rust-target}/release/librln.so $out/
|
||||
set -eu
|
||||
mkdir -p $out/lib
|
||||
find target -type f -name 'librln.*' -not -path '*/deps/*' -exec cp -v '{}' "$out/lib/" \;
|
||||
mkdir -p $out/include
|
||||
cbindgen ${src}/rln -l c > "$out/include/rln.h"
|
||||
'';
|
||||
|
||||
|
||||
meta = with pkgs.lib; {
|
||||
description = "Zerokit";
|
||||
license = licenses.mit;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "rln-cli"
|
||||
version = "0.4.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
|
||||
[[example]]
|
||||
@@ -13,15 +13,13 @@ path = "src/examples/stateless.rs"
|
||||
required-features = ["stateless"]
|
||||
|
||||
[dependencies]
|
||||
rln = { path = "../rln", default-features = false }
|
||||
zerokit_utils = { path = "../utils" }
|
||||
clap = { version = "4.5.35", features = ["cargo", "derive", "env"] }
|
||||
clap_derive = { version = "4.5.32" }
|
||||
color-eyre = "0.6.3"
|
||||
serde_json = "1.0"
|
||||
rln = { path = "../rln", version = "0.9.0", default-features = false }
|
||||
zerokit_utils = { path = "../utils", version = "0.7.0", default-features = false }
|
||||
clap = { version = "4.5.41", features = ["cargo", "derive", "env"] }
|
||||
color-eyre = "0.6.5"
|
||||
serde_json = "1.0.141"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
arkzkey = ["rln/arkzkey"]
|
||||
stateless = ["rln/stateless"]
|
||||
default = ["rln/pmtree-ft", "rln/parallel"]
|
||||
stateless = ["rln/stateless", "rln/parallel"]
|
||||
|
||||
@@ -27,16 +27,6 @@ If the configuration file is empty, default settings will be used, but the tree
|
||||
|
||||
We recommend using the example config, as all commands (except `new` and `create-with-params`) require an initialized RLN instance.
|
||||
|
||||
## Feature Flags
|
||||
|
||||
The CLI supports optional features. To enable the **arkzkey** feature, run:
|
||||
|
||||
```bash
|
||||
cargo run --features arkzkey -- <SUBCOMMAND> [OPTIONS]
|
||||
```
|
||||
|
||||
For more details, refer to the [Zerokit RLN Module](../rln/README.md) documentation.
|
||||
|
||||
## Relay Example
|
||||
|
||||
The following [Example](src/examples/relay.rs) demonstrates how RLN enables spam prevention in anonymous environments for multple users.
|
||||
@@ -47,15 +37,9 @@ You can run the example using the following command:
|
||||
cargo run --example relay
|
||||
```
|
||||
|
||||
or with the **arkzkey** feature flag:
|
||||
You can also change **MESSAGE_LIMIT** and **TREE_DEPTH** in the [relay.rs](src/examples/relay.rs) file to see how the RLN instance behaves with different parameters.
|
||||
|
||||
```bash
|
||||
cargo run --example relay --features arkzkey
|
||||
```
|
||||
|
||||
You can also change **MESSAGE_LIMIT** and **TREEE_HEIGHT** in the [relay.rs](src/examples/relay.rs) file to see how the RLN instance behaves with different parameters.
|
||||
|
||||
The customize **TREEE_HEIGHT** constant differs from the default value of `20` should follow [Custom Circuit Compilation](../rln/README.md#advanced-custom-circuit-compilation) instructions.
|
||||
The customize **TREE_DEPTH** constant differs from the default value of `20` should follow [Custom Circuit Compilation](../rln/README.md#advanced-custom-circuit-compilation) instructions.
|
||||
|
||||
## Stateless Example
|
||||
|
||||
@@ -66,13 +50,7 @@ This example function similarly to the [Relay Example](#relay-example) but uses
|
||||
You can run the example using the following command:
|
||||
|
||||
```bash
|
||||
cargo run --example stateless --features stateless
|
||||
```
|
||||
|
||||
or with the **arkzkey** feature flag:
|
||||
|
||||
```bash
|
||||
cargo run --example stateless --features stateless,arkzkey
|
||||
cargo run --example stateless --no-default-features --features stateless
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
@@ -82,19 +60,19 @@ cargo run --example stateless --features stateless,arkzkey
|
||||
To initialize a new RLN instance:
|
||||
|
||||
```bash
|
||||
cargo run new --tree-height <HEIGHT>
|
||||
cargo run new --tree-depth <DEPTH>
|
||||
```
|
||||
|
||||
To initialize an RLN instance with custom parameters:
|
||||
|
||||
```bash
|
||||
cargo run new-with-params --resources-path <PATH> --tree-height <HEIGHT>
|
||||
cargo run new-with-params --resources-path <PATH> --tree-depth <DEPTH>
|
||||
```
|
||||
|
||||
To update the Merkle tree height:
|
||||
To update the Merkle tree depth:
|
||||
|
||||
```bash
|
||||
cargo run set-tree --tree-height <HEIGHT>
|
||||
cargo run set-tree --tree-depth <DEPTH>
|
||||
```
|
||||
|
||||
### Leaf Operations
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
{
|
||||
"tree_config": {
|
||||
"path": "database",
|
||||
"temporary": false,
|
||||
"cache_capacity": 150000,
|
||||
"flush_every_ms": 12000,
|
||||
"mode": "HighThroughput",
|
||||
"use_compression": false
|
||||
},
|
||||
"tree_height": 20
|
||||
"path": "database",
|
||||
"temporary": false,
|
||||
"cache_capacity": 1073741824,
|
||||
"flush_every_ms": 500,
|
||||
"mode": "HighThroughput",
|
||||
"use_compression": false,
|
||||
"tree_depth": 20
|
||||
}
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::Subcommand;
|
||||
use rln::circuit::TEST_TREE_HEIGHT;
|
||||
use rln::circuit::TEST_TREE_DEPTH;
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub(crate) enum Commands {
|
||||
New {
|
||||
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
|
||||
tree_height: usize,
|
||||
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
|
||||
tree_depth: usize,
|
||||
},
|
||||
NewWithParams {
|
||||
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
|
||||
tree_height: usize,
|
||||
#[arg(short, long, default_value = "../rln/resources/tree_height_20")]
|
||||
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
|
||||
tree_depth: usize,
|
||||
#[arg(short, long, default_value = "../rln/resources/tree_depth_20")]
|
||||
resources_path: PathBuf,
|
||||
},
|
||||
SetTree {
|
||||
#[arg(short, long, default_value_t = TEST_TREE_HEIGHT)]
|
||||
tree_height: usize,
|
||||
#[arg(short, long, default_value_t = TEST_TREE_DEPTH)]
|
||||
tree_depth: usize,
|
||||
},
|
||||
SetLeaf {
|
||||
#[arg(short, long)]
|
||||
|
||||
@@ -6,15 +6,9 @@ use serde_json::Value;
|
||||
|
||||
pub const RLN_CONFIG_PATH: &str = "RLN_CONFIG_PATH";
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct Config {
|
||||
pub inner: Option<InnerConfig>,
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub(crate) struct InnerConfig {
|
||||
pub tree_height: usize,
|
||||
pub tree_config: Value,
|
||||
pub tree_config: Option<String>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -25,14 +19,13 @@ impl Config {
|
||||
let mut file = File::open(path)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
let inner: InnerConfig = serde_json::from_str(&contents)?;
|
||||
Ok(Config { inner: Some(inner) })
|
||||
let tree_config: Value = serde_json::from_str(&contents)?;
|
||||
println!("Initializing RLN with custom config");
|
||||
Ok(Config {
|
||||
tree_config: Some(tree_config.to_string()),
|
||||
})
|
||||
}
|
||||
Err(_) => Ok(Config::default()),
|
||||
Err(_) => Ok(Config { tree_config: None }),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_bytes(&self) -> Vec<u8> {
|
||||
serde_json::to_string(&self.inner).unwrap().into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,18 +6,18 @@ use std::{
|
||||
};
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use color_eyre::{eyre::eyre, Result};
|
||||
use color_eyre::{eyre::eyre, Report, Result};
|
||||
use rln::{
|
||||
circuit::Fr,
|
||||
hashers::{hash_to_field, poseidon_hash},
|
||||
hashers::{hash_to_field_le, poseidon_hash},
|
||||
protocol::{keygen, prepare_prove_input, prepare_verify_input},
|
||||
public::RLN,
|
||||
utils::{bytes_le_to_fr, fr_to_bytes_le, generate_input_buffer},
|
||||
utils::{fr_to_bytes_le, generate_input_buffer, IdSecret},
|
||||
};
|
||||
|
||||
const MESSAGE_LIMIT: u32 = 1;
|
||||
|
||||
const TREEE_HEIGHT: usize = 20;
|
||||
const TREE_DEPTH: usize = 20;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
@@ -44,7 +44,7 @@ enum Commands {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Identity {
|
||||
identity_secret_hash: Fr,
|
||||
identity_secret_hash: IdSecret,
|
||||
id_commitment: Fr,
|
||||
}
|
||||
|
||||
@@ -67,11 +67,8 @@ struct RLNSystem {
|
||||
impl RLNSystem {
|
||||
fn new() -> Result<Self> {
|
||||
let mut resources: Vec<Vec<u8>> = Vec::new();
|
||||
let resources_path: PathBuf = format!("../rln/resources/tree_height_{TREEE_HEIGHT}").into();
|
||||
#[cfg(feature = "arkzkey")]
|
||||
let resources_path: PathBuf = format!("../rln/resources/tree_depth_{TREE_DEPTH}").into();
|
||||
let filenames = ["rln_final.arkzkey", "graph.bin"];
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
let filenames = ["rln_final.zkey", "graph.bin"];
|
||||
for filename in filenames {
|
||||
let fullpath = resources_path.join(Path::new(filename));
|
||||
let mut file = File::open(&fullpath)?;
|
||||
@@ -81,7 +78,7 @@ impl RLNSystem {
|
||||
resources.push(output_buffer);
|
||||
}
|
||||
let rln = RLN::new_with_params(
|
||||
TREEE_HEIGHT,
|
||||
TREE_DEPTH,
|
||||
resources[0].clone(),
|
||||
resources[1].clone(),
|
||||
generate_input_buffer(),
|
||||
@@ -103,7 +100,7 @@ impl RLNSystem {
|
||||
println!("Registered users:");
|
||||
for (index, identity) in &self.local_identities {
|
||||
println!("User Index: {index}");
|
||||
println!("+ Identity Secret Hash: {}", identity.identity_secret_hash);
|
||||
println!("+ Identity Secret Hash: {}", *identity.identity_secret_hash);
|
||||
println!("+ Identity Commitment: {}", identity.id_commitment);
|
||||
println!();
|
||||
}
|
||||
@@ -118,12 +115,12 @@ impl RLNSystem {
|
||||
match self.rln.set_next_leaf(&mut buffer) {
|
||||
Ok(_) => {
|
||||
println!("Registered User Index: {index}");
|
||||
println!("+ Identity secret hash: {}", identity.identity_secret_hash);
|
||||
println!("+ Identity secret hash: {}", *identity.identity_secret_hash);
|
||||
println!("+ Identity commitment: {},", identity.id_commitment);
|
||||
self.local_identities.insert(index, identity);
|
||||
}
|
||||
Err(_) => {
|
||||
println!("Maximum user limit reached: 2^{TREEE_HEIGHT}");
|
||||
println!("Maximum user limit reached: 2^{TREE_DEPTH}");
|
||||
}
|
||||
};
|
||||
|
||||
@@ -143,7 +140,7 @@ impl RLNSystem {
|
||||
};
|
||||
|
||||
let serialized = prepare_prove_input(
|
||||
identity.identity_secret_hash,
|
||||
identity.identity_secret_hash.clone(),
|
||||
user_index,
|
||||
Fr::from(MESSAGE_LIMIT),
|
||||
Fr::from(message_id),
|
||||
@@ -182,7 +179,7 @@ impl RLNSystem {
|
||||
Ok(false) => {
|
||||
println!("Verification failed: message_id must be unique within the epoch and satisfy 0 <= message_id < MESSAGE_LIMIT: {MESSAGE_LIMIT}");
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
Err(err) => return Err(Report::new(err)),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -211,7 +208,7 @@ impl RLNSystem {
|
||||
{
|
||||
Ok(_) => {
|
||||
let output_data = output.into_inner();
|
||||
let (leaked_identity_secret_hash, _) = bytes_le_to_fr(&output_data);
|
||||
let (leaked_identity_secret_hash, _) = IdSecret::from_bytes_le(&output_data);
|
||||
|
||||
if let Some((user_index, identity)) = self
|
||||
.local_identities
|
||||
@@ -221,20 +218,21 @@ impl RLNSystem {
|
||||
})
|
||||
.map(|(index, identity)| (*index, identity))
|
||||
{
|
||||
let real_identity_secret_hash = identity.identity_secret_hash;
|
||||
let real_identity_secret_hash = identity.identity_secret_hash.clone();
|
||||
if leaked_identity_secret_hash != real_identity_secret_hash {
|
||||
Err(eyre!("identity secret hash mismatch {leaked_identity_secret_hash} != {real_identity_secret_hash}"))
|
||||
Err(eyre!("identity secret hash mismatch: leaked_identity_secret_hash != real_identity_secret_hash"))
|
||||
} else {
|
||||
println!("DUPLICATE message ID detected! Reveal identity secret hash: {leaked_identity_secret_hash}");
|
||||
println!(
|
||||
"DUPLICATE message ID detected! Reveal identity secret hash: {}",
|
||||
*leaked_identity_secret_hash
|
||||
);
|
||||
self.local_identities.remove(&user_index);
|
||||
self.rln.delete_leaf(user_index)?;
|
||||
println!("User index {user_index} has been SLASHED");
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
Err(eyre!(
|
||||
"user identity secret hash {leaked_identity_secret_hash} not found"
|
||||
))
|
||||
Err(eyre!("user identity secret hash ******** not found"))
|
||||
}
|
||||
}
|
||||
Err(err) => Err(eyre!("Failed to recover identity secret: {err}")),
|
||||
@@ -246,8 +244,8 @@ fn main() -> Result<()> {
|
||||
println!("Initializing RLN instance...");
|
||||
print!("\x1B[2J\x1B[1;1H");
|
||||
let mut rln_system = RLNSystem::new()?;
|
||||
let rln_epoch = hash_to_field(b"epoch");
|
||||
let rln_identifier = hash_to_field(b"rln-identifier");
|
||||
let rln_epoch = hash_to_field_le(b"epoch");
|
||||
let rln_identifier = hash_to_field_le(b"rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[rln_epoch, rln_identifier]);
|
||||
println!("RLN Relay Example:");
|
||||
println!("Message Limit: {MESSAGE_LIMIT}");
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#![cfg(feature = "stateless")]
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
io::{stdin, stdout, Cursor, Write},
|
||||
@@ -7,14 +8,13 @@ use std::{
|
||||
use clap::{Parser, Subcommand};
|
||||
use color_eyre::{eyre::eyre, Result};
|
||||
use rln::{
|
||||
circuit::{Fr, TEST_TREE_HEIGHT},
|
||||
hashers::{hash_to_field, poseidon_hash},
|
||||
poseidon_tree::PoseidonTree,
|
||||
circuit::{Fr, TEST_TREE_DEPTH},
|
||||
hashers::{hash_to_field_le, poseidon_hash, PoseidonHash},
|
||||
protocol::{keygen, prepare_verify_input, rln_witness_from_values, serialize_witness},
|
||||
public::RLN,
|
||||
utils::{bytes_le_to_fr, fr_to_bytes_le},
|
||||
utils::{fr_to_bytes_le, IdSecret},
|
||||
};
|
||||
use zerokit_utils::ZerokitMerkleTree;
|
||||
use zerokit_utils::{OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
const MESSAGE_LIMIT: u32 = 1;
|
||||
|
||||
@@ -45,7 +45,7 @@ enum Commands {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Identity {
|
||||
identity_secret_hash: Fr,
|
||||
identity_secret_hash: IdSecret,
|
||||
id_commitment: Fr,
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ impl Identity {
|
||||
|
||||
struct RLNSystem {
|
||||
rln: RLN,
|
||||
tree: PoseidonTree,
|
||||
tree: OptimalMerkleTree<PoseidonHash>,
|
||||
used_nullifiers: HashMap<[u8; 32], Vec<u8>>,
|
||||
local_identities: HashMap<usize, Identity>,
|
||||
}
|
||||
@@ -70,11 +70,12 @@ impl RLNSystem {
|
||||
fn new() -> Result<Self> {
|
||||
let rln = RLN::new()?;
|
||||
let default_leaf = Fr::from(0);
|
||||
let tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
let tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
)?;
|
||||
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(RLNSystem {
|
||||
rln,
|
||||
@@ -93,7 +94,7 @@ impl RLNSystem {
|
||||
println!("Registered users:");
|
||||
for (index, identity) in &self.local_identities {
|
||||
println!("User Index: {index}");
|
||||
println!("+ Identity Secret Hash: {}", identity.identity_secret_hash);
|
||||
println!("+ Identity Secret Hash: {}", *identity.identity_secret_hash);
|
||||
println!("+ Identity Commitment: {}", identity.id_commitment);
|
||||
println!();
|
||||
}
|
||||
@@ -107,7 +108,7 @@ impl RLNSystem {
|
||||
self.tree.update_next(rate_commitment)?;
|
||||
|
||||
println!("Registered User Index: {index}");
|
||||
println!("+ Identity secret hash: {}", identity.identity_secret_hash);
|
||||
println!("+ Identity secret hash: {}", *identity.identity_secret_hash);
|
||||
println!("+ Identity commitment: {}", identity.id_commitment);
|
||||
|
||||
self.local_identities.insert(index, identity);
|
||||
@@ -127,11 +128,12 @@ impl RLNSystem {
|
||||
};
|
||||
|
||||
let merkle_proof = self.tree.proof(user_index)?;
|
||||
let x = hash_to_field(signal.as_bytes());
|
||||
let x = hash_to_field_le(signal.as_bytes());
|
||||
|
||||
let rln_witness = rln_witness_from_values(
|
||||
identity.identity_secret_hash,
|
||||
&merkle_proof,
|
||||
identity.identity_secret_hash.clone(),
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x,
|
||||
external_nullifier,
|
||||
Fr::from(MESSAGE_LIMIT),
|
||||
@@ -208,7 +210,7 @@ impl RLNSystem {
|
||||
{
|
||||
Ok(_) => {
|
||||
let output_data = output.into_inner();
|
||||
let (leaked_identity_secret_hash, _) = bytes_le_to_fr(&output_data);
|
||||
let (leaked_identity_secret_hash, _) = IdSecret::from_bytes_le(&output_data);
|
||||
|
||||
if let Some((user_index, identity)) = self
|
||||
.local_identities
|
||||
@@ -218,19 +220,19 @@ impl RLNSystem {
|
||||
})
|
||||
.map(|(index, identity)| (*index, identity))
|
||||
{
|
||||
let real_identity_secret_hash = identity.identity_secret_hash;
|
||||
let real_identity_secret_hash = identity.identity_secret_hash.clone();
|
||||
if leaked_identity_secret_hash != real_identity_secret_hash {
|
||||
Err(eyre!("identity secret hash mismatch {leaked_identity_secret_hash} != {real_identity_secret_hash}"))
|
||||
Err(eyre!("identity secret hash mismatch: leaked_identity_secret_hash != real_identity_secret_hash"))
|
||||
} else {
|
||||
println!("DUPLICATE message ID detected! Reveal identity secret hash: {leaked_identity_secret_hash}");
|
||||
println!(
|
||||
"DUPLICATE message ID detected! Reveal identity secret hash: ********"
|
||||
);
|
||||
self.local_identities.remove(&user_index);
|
||||
println!("User index {user_index} has been SLASHED");
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
Err(eyre!(
|
||||
"user identity secret hash {leaked_identity_secret_hash} not found"
|
||||
))
|
||||
Err(eyre!("user identity secret hash ******** not found"))
|
||||
}
|
||||
}
|
||||
Err(err) => Err(eyre!("Failed to recover identity secret: {err}")),
|
||||
@@ -242,8 +244,8 @@ fn main() -> Result<()> {
|
||||
println!("Initializing RLN instance...");
|
||||
print!("\x1B[2J\x1B[1;1H");
|
||||
let mut rln_system = RLNSystem::new()?;
|
||||
let rln_epoch = hash_to_field(b"epoch");
|
||||
let rln_identifier = hash_to_field(b"rln-identifier");
|
||||
let rln_epoch = hash_to_field_le(b"epoch");
|
||||
let rln_identifier = hash_to_field_le(b"rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[rln_epoch, rln_identifier]);
|
||||
println!("RLN Stateless Relay Example:");
|
||||
println!("Message Limit: {MESSAGE_LIMIT}");
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::{
|
||||
use clap::Parser;
|
||||
use color_eyre::{eyre::Report, Result};
|
||||
use commands::Commands;
|
||||
use config::{Config, InnerConfig};
|
||||
use config::Config;
|
||||
use rln::{
|
||||
public::RLN,
|
||||
utils::{bytes_le_to_fr, bytes_le_to_vec_fr},
|
||||
@@ -35,43 +35,36 @@ fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
match cli.command {
|
||||
Some(Commands::New { tree_height }) => {
|
||||
Some(Commands::New { tree_depth }) => {
|
||||
let config = Config::load_config()?;
|
||||
state.rln = if let Some(InnerConfig { tree_height, .. }) = config.inner {
|
||||
state.rln = if let Some(tree_config) = config.tree_config {
|
||||
println!("Initializing RLN with custom config");
|
||||
Some(RLN::new(tree_height, Cursor::new(config.as_bytes()))?)
|
||||
Some(RLN::new(tree_depth, Cursor::new(tree_config.as_bytes()))?)
|
||||
} else {
|
||||
println!("Initializing RLN with default config");
|
||||
Some(RLN::new(tree_height, Cursor::new(json!({}).to_string()))?)
|
||||
Some(RLN::new(tree_depth, Cursor::new(json!({}).to_string()))?)
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::NewWithParams {
|
||||
tree_height,
|
||||
tree_depth,
|
||||
resources_path,
|
||||
}) => {
|
||||
let mut resources: Vec<Vec<u8>> = Vec::new();
|
||||
#[cfg(feature = "arkzkey")]
|
||||
let filenames = ["rln_final.arkzkey", "graph.bin"];
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
let filenames = ["rln_final.zkey", "graph.bin"];
|
||||
for filename in filenames {
|
||||
let fullpath = resources_path.join(Path::new(filename));
|
||||
let mut file = File::open(&fullpath)?;
|
||||
let metadata = std::fs::metadata(&fullpath)?;
|
||||
let mut output_buffer = vec![0; metadata.len() as usize];
|
||||
file.read_exact(&mut output_buffer)?;
|
||||
resources.push(output_buffer);
|
||||
let mut buffer = vec![0; metadata.len() as usize];
|
||||
file.read_exact(&mut buffer)?;
|
||||
resources.push(buffer);
|
||||
}
|
||||
let config = Config::load_config()?;
|
||||
if let Some(InnerConfig {
|
||||
tree_height,
|
||||
tree_config,
|
||||
}) = config.inner
|
||||
{
|
||||
if let Some(tree_config) = config.tree_config {
|
||||
println!("Initializing RLN with custom config");
|
||||
state.rln = Some(RLN::new_with_params(
|
||||
tree_height,
|
||||
tree_depth,
|
||||
resources[0].clone(),
|
||||
resources[1].clone(),
|
||||
Cursor::new(tree_config.to_string().as_bytes()),
|
||||
@@ -79,7 +72,7 @@ fn main() -> Result<()> {
|
||||
} else {
|
||||
println!("Initializing RLN with default config");
|
||||
state.rln = Some(RLN::new_with_params(
|
||||
tree_height,
|
||||
tree_depth,
|
||||
resources[0].clone(),
|
||||
resources[1].clone(),
|
||||
Cursor::new(json!({}).to_string()),
|
||||
@@ -87,11 +80,11 @@ fn main() -> Result<()> {
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::SetTree { tree_height }) => {
|
||||
Some(Commands::SetTree { tree_depth }) => {
|
||||
state
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.set_tree(tree_height)?;
|
||||
.set_tree(tree_depth)?;
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::SetLeaf { index, input }) => {
|
||||
@@ -141,7 +134,7 @@ fn main() -> Result<()> {
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.prove(input_data, &mut output_buffer)?;
|
||||
let proof = output_buffer.into_inner();
|
||||
println!("proof: {:?}", proof);
|
||||
println!("proof: {proof:?}");
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::Verify { input }) => {
|
||||
@@ -150,7 +143,7 @@ fn main() -> Result<()> {
|
||||
.rln
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.verify(input_data)?;
|
||||
println!("verified: {:?}", verified);
|
||||
println!("verified: {verified:?}");
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::GenerateProof { input }) => {
|
||||
@@ -161,7 +154,7 @@ fn main() -> Result<()> {
|
||||
.ok_or(Report::msg("no RLN instance initialized"))?
|
||||
.generate_rln_proof(input_data, &mut output_buffer)?;
|
||||
let proof = output_buffer.into_inner();
|
||||
println!("proof: {:?}", proof);
|
||||
println!("proof: {proof:?}");
|
||||
Ok(())
|
||||
}
|
||||
Some(Commands::VerifyWithRoots { input, roots }) => {
|
||||
@@ -193,7 +186,7 @@ fn main() -> Result<()> {
|
||||
let output_buffer_inner = output_buffer.into_inner();
|
||||
let (path_elements, _) = bytes_le_to_vec_fr(&output_buffer_inner)?;
|
||||
for (index, element) in path_elements.iter().enumerate() {
|
||||
println!("path element {}: {}", index, element);
|
||||
println!("path element {index}: {element}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use std::io::Cursor;
|
||||
|
||||
use color_eyre::Result;
|
||||
use rln::public::RLN;
|
||||
use rln::{circuit::TEST_TREE_DEPTH, public::RLN};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::config::{Config, InnerConfig};
|
||||
use crate::config::Config;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct State {
|
||||
@@ -13,8 +14,15 @@ pub(crate) struct State {
|
||||
impl State {
|
||||
pub(crate) fn load_state() -> Result<State> {
|
||||
let config = Config::load_config()?;
|
||||
let rln = if let Some(InnerConfig { tree_height, .. }) = config.inner {
|
||||
Some(RLN::new(tree_height, Cursor::new(config.as_bytes()))?)
|
||||
let rln = if let Some(tree_config) = config.tree_config {
|
||||
let config_json: Value = serde_json::from_str(&tree_config)?;
|
||||
let tree_depth = config_json["tree_depth"]
|
||||
.as_u64()
|
||||
.unwrap_or(TEST_TREE_DEPTH as u64);
|
||||
Some(RLN::new(
|
||||
tree_depth as usize,
|
||||
Cursor::new(tree_config.as_bytes()),
|
||||
)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
21
rln-wasm-utils/.gitignore
vendored
Normal file
21
rln-wasm-utils/.gitignore
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Common files to ignore in Rust projects
|
||||
.DS_Store
|
||||
.idea
|
||||
*.log
|
||||
tmp/
|
||||
|
||||
# Generated by Cargo will have compiled files and executables
|
||||
/target
|
||||
Cargo.lock
|
||||
|
||||
# Generated by rln-wasm
|
||||
pkg/
|
||||
|
||||
# Generated by Nix
|
||||
result
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
35
rln-wasm-utils/Cargo.toml
Normal file
35
rln-wasm-utils/Cargo.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
[package]
|
||||
name = "rln-wasm-utils"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
# TODO: remove this once we have a proper release
|
||||
rln = { path = "../rln", version = "0.9.0", default-features = false, features = ["stateless"] }
|
||||
js-sys = "0.3.77"
|
||||
wasm-bindgen = "0.2.100"
|
||||
rand = "0.8.5"
|
||||
|
||||
# The `console_error_panic_xhook` crate provides better debugging of panics by
|
||||
# logging them with `console.error`. This is great for development, but requires
|
||||
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
|
||||
# code size when deploying.
|
||||
console_error_panic_hook = { version = "0.1.7", optional = true }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
getrandom = { version = "0.2.16", features = ["js"] }
|
||||
|
||||
[dev-dependencies]
|
||||
wasm-bindgen-test = "0.3.50"
|
||||
web-sys = { version = "0.3.77", features = ["console"] }
|
||||
ark-std = { version = "0.5.0", default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["console_error_panic_hook"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
36
rln-wasm-utils/Makefile.toml
Normal file
36
rln-wasm-utils/Makefile.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[tasks.build]
|
||||
clear = true
|
||||
dependencies = ["pack_build", "pack_rename", "pack_resize"]
|
||||
|
||||
[tasks.pack_build]
|
||||
command = "wasm-pack"
|
||||
args = ["build", "--release", "--target", "web", "--scope", "waku"]
|
||||
|
||||
[tasks.pack_rename]
|
||||
script = "sed -i.bak 's/rln-wasm-utils/zerokit-rln-wasm-utils/g' pkg/package.json && rm pkg/package.json.bak"
|
||||
|
||||
[tasks.pack_resize]
|
||||
command = "wasm-opt"
|
||||
args = [
|
||||
"pkg/rln_wasm_utils_bg.wasm",
|
||||
"-Oz",
|
||||
"--strip-debug",
|
||||
"--strip-dwarf",
|
||||
"--remove-unused-module-elements",
|
||||
"--vacuum",
|
||||
"-o",
|
||||
"pkg/rln_wasm_utils_bg.wasm",
|
||||
]
|
||||
|
||||
[tasks.test]
|
||||
command = "wasm-pack"
|
||||
args = [
|
||||
"test",
|
||||
"--release",
|
||||
"--node",
|
||||
"--target",
|
||||
"wasm32-unknown-unknown",
|
||||
"--",
|
||||
"--nocapture",
|
||||
]
|
||||
dependencies = ["build"]
|
||||
206
rln-wasm-utils/README.md
Normal file
206
rln-wasm-utils/README.md
Normal file
@@ -0,0 +1,206 @@
|
||||
# RLN WASM Utils
|
||||
|
||||
[](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm-utils)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
The Zerokit RLN WASM Utils Module provides WebAssembly bindings for Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) cryptographic primitives.
|
||||
This module offers comprehensive functionality for identity generation and hashing needed for RLN applications.
|
||||
|
||||
## Features
|
||||
|
||||
### Identity Generation
|
||||
|
||||
- **Random Identity Generation**: Generate cryptographically secure random identities
|
||||
- **Seeded Identity Generation**: Generate deterministic identities from seeds
|
||||
- **Extended Identity Generation**: Generate extended identities with additional parameters
|
||||
- **Seeded Extended Identity Generation**: Generate deterministic extended identities from seeds
|
||||
- **Endianness Support**: Both little-endian and big-endian serialization support
|
||||
|
||||
### Hashing
|
||||
|
||||
- **Standard Hashing**: Hash arbitrary data to field elements
|
||||
- **Poseidon Hashing**: Advanced cryptographic hashing using Poseidon hash function
|
||||
- **Endianness Support**: Both little-endian and big-endian serialization support
|
||||
|
||||
## API Reference
|
||||
|
||||
### Identity Generation Functions
|
||||
|
||||
#### `generateMembershipKey(isLittleEndian: boolean): Uint8Array`
|
||||
|
||||
Generates a random membership key pair (identity secret and commitment).
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `isLittleEndian`: Boolean indicating endianness for serialization
|
||||
|
||||
**Outputs:** Serialized identity pair as `Uint8Array` in corresponding endianness
|
||||
|
||||
#### `generateExtendedMembershipKey(isLittleEndian: boolean): Uint8Array`
|
||||
|
||||
Generates an extended membership key with additional parameters.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `isLittleEndian`: Boolean indicating endianness for serialization
|
||||
|
||||
**Outputs:** Serialized extended identity tuple as `Uint8Array` in corresponding endianness
|
||||
|
||||
#### `generateSeededMembershipKey(seed: Uint8Array, isLittleEndian: boolean): Uint8Array`
|
||||
|
||||
Generates a deterministic membership key from a seed.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `seed`: Seed data as `Uint8Array`
|
||||
- `isLittleEndian`: Boolean indicating endianness for serialization
|
||||
|
||||
**Outputs:** Serialized identity pair as `Uint8Array` in corresponding endianness
|
||||
|
||||
#### `generateSeededExtendedMembershipKey(seed: Uint8Array, isLittleEndian: boolean): Uint8Array`
|
||||
|
||||
Generates a deterministic extended membership key from a seed.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `seed`: Seed data as `Uint8Array`
|
||||
- `isLittleEndian`: Boolean indicating endianness for serialization
|
||||
|
||||
**Outputs:** Serialized extended identity tuple as `Uint8Array` in corresponding endianness
|
||||
|
||||
### Hashing Functions
|
||||
|
||||
#### `hash(input: Uint8Array, isLittleEndian: boolean): Uint8Array`
|
||||
|
||||
Hashes input data to a field element.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `input`: Input data as `Uint8Array`
|
||||
- `isLittleEndian`: Boolean indicating endianness for serialization
|
||||
|
||||
**Outputs:** Serialized hash result as `Uint8Array` in corresponding endianness
|
||||
|
||||
#### `poseidonHash(input: Uint8Array, isLittleEndian: boolean): Uint8Array`
|
||||
|
||||
Computes Poseidon hash of input field elements.
|
||||
|
||||
**Inputs:**
|
||||
|
||||
- `input`: Serialized field elements as `Uint8Array` (format: length + field elements)
|
||||
- `isLittleEndian`: Boolean indicating endianness for serialization
|
||||
|
||||
**Outputs:** Serialized hash result as `Uint8Array` in corresponding endianness
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### JavaScript/TypeScript
|
||||
|
||||
```javascript
|
||||
import init, {
|
||||
generateMembershipKey,
|
||||
generateSeededMembershipKey,
|
||||
hash,
|
||||
poseidonHash
|
||||
} from '@waku/zerokit-rln-wasm-utils';
|
||||
|
||||
// Initialize the WASM module
|
||||
await init();
|
||||
|
||||
// Generate a random membership key
|
||||
const membershipKey = generateMembershipKey(true); // little-endian
|
||||
console.log('Membership key:', membershipKey);
|
||||
|
||||
// Generate a deterministic membership key from seed
|
||||
const seed = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
|
||||
const seededKey = generateSeededMembershipKey(seed, true);
|
||||
console.log('Seeded key:', seededKey);
|
||||
|
||||
// Hash some data
|
||||
const input = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
|
||||
const hashResult = hash(input, true);
|
||||
console.log('Hash result:', hashResult);
|
||||
|
||||
// Poseidon hash with field elements
|
||||
const fieldElements = new Uint8Array([
|
||||
// Length (8 bytes) + field elements (32 bytes each)
|
||||
1, 0, 0, 0, 0, 0, 0, 0, // length = 1
|
||||
// field element data...
|
||||
]);
|
||||
const poseidonResult = poseidonHash(fieldElements, true);
|
||||
console.log('Poseidon hash:', poseidonResult);
|
||||
```
|
||||
|
||||
## Install Dependencies
|
||||
|
||||
> [!NOTE]
|
||||
> This project requires the following tools:
|
||||
>
|
||||
> - `wasm-pack` - for compiling Rust to WebAssembly
|
||||
> - `cargo-make` - for running build commands
|
||||
> - `nvm` - to install and manage Node.js
|
||||
>
|
||||
> Ensure all dependencies are installed before proceeding.
|
||||
|
||||
### Manually
|
||||
|
||||
#### Install `wasm-pack`
|
||||
|
||||
```bash
|
||||
cargo install wasm-pack --version=0.13.1
|
||||
```
|
||||
|
||||
#### Install `cargo-make`
|
||||
|
||||
```bash
|
||||
cargo install cargo-make
|
||||
```
|
||||
|
||||
#### Install `Node.js`
|
||||
|
||||
If you don't have `nvm` (Node Version Manager), install it by following
|
||||
the [installation instructions](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script).
|
||||
|
||||
After installing `nvm`, install and use Node.js `v22.14.0`:
|
||||
|
||||
```bash
|
||||
nvm install 22.14.0
|
||||
nvm use 22.14.0
|
||||
nvm alias default 22.14.0
|
||||
```
|
||||
|
||||
If you already have Node.js installed,
|
||||
check your version with `node -v` command — the version must be strictly greater than 22.
|
||||
|
||||
### Or install everything
|
||||
|
||||
You can run the following command from the root of the repository to install all required dependencies for `zerokit`
|
||||
|
||||
```bash
|
||||
make installdeps
|
||||
```
|
||||
|
||||
## Building the library
|
||||
|
||||
First, navigate to the rln-wasm-utils directory:
|
||||
|
||||
```bash
|
||||
cd rln-wasm-utils
|
||||
```
|
||||
|
||||
Compile rln-wasm-utils for `wasm32-unknown-unknown`:
|
||||
|
||||
```bash
|
||||
cargo make build
|
||||
```
|
||||
|
||||
## Running tests
|
||||
|
||||
```bash
|
||||
cargo make test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under both MIT and Apache 2.0 licenses. See the LICENSE files for details.
|
||||
112
rln-wasm-utils/src/lib.rs
Normal file
112
rln-wasm-utils/src/lib.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
use js_sys::Uint8Array;
|
||||
use rln::public::{
|
||||
extended_key_gen, hash, key_gen, poseidon_hash, seeded_extended_key_gen, seeded_key_gen,
|
||||
};
|
||||
use std::vec::Vec;
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateMembershipKey)]
|
||||
pub fn wasm_key_gen(is_little_endian: bool) -> Result<Uint8Array, String> {
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
if let Err(err) = key_gen(&mut output_data, is_little_endian) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!(
|
||||
"Msg: could not generate membership keys, Error: {:#?}",
|
||||
err
|
||||
))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
|
||||
pub fn wasm_extended_key_gen(is_little_endian: bool) -> Result<Uint8Array, String> {
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
if let Err(err) = extended_key_gen(&mut output_data, is_little_endian) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!(
|
||||
"Msg: could not generate membership keys, Error: {:#?}",
|
||||
err
|
||||
))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
|
||||
pub fn wasm_seeded_key_gen(seed: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let input_data = &seed.to_vec()[..];
|
||||
if let Err(err) = seeded_key_gen(input_data, &mut output_data, is_little_endian) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!(
|
||||
"Msg: could not generate membership key, Error: {:#?}",
|
||||
err
|
||||
))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
|
||||
pub fn wasm_seeded_extended_key_gen(
|
||||
seed: Uint8Array,
|
||||
is_little_endian: bool,
|
||||
) -> Result<Uint8Array, String> {
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let input_data = &seed.to_vec()[..];
|
||||
if let Err(err) = seeded_extended_key_gen(input_data, &mut output_data, is_little_endian) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!(
|
||||
"Msg: could not generate membership key, Error: {:#?}",
|
||||
err
|
||||
))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = hash)]
|
||||
pub fn wasm_hash(input: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let input_data = &input.to_vec()[..];
|
||||
if let Err(err) = hash(input_data, &mut output_data, is_little_endian) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: could not generate hash, Error: {:#?}", err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = poseidonHash)]
|
||||
pub fn wasm_poseidon_hash(input: Uint8Array, is_little_endian: bool) -> Result<Uint8Array, String> {
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
let input_data = &input.to_vec()[..];
|
||||
if let Err(err) = poseidon_hash(input_data, &mut output_data, is_little_endian) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!(
|
||||
"Msg: could not generate poseidon hash, Error: {:#?}",
|
||||
err
|
||||
))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
114
rln-wasm-utils/tests/wasm_utils_test.rs
Normal file
114
rln-wasm-utils/tests/wasm_utils_test.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use ark_std::{UniformRand, rand::thread_rng};
|
||||
use rand::Rng;
|
||||
use rln::circuit::Fr;
|
||||
use rln::hashers::{ROUND_PARAMS, hash_to_field_le, poseidon_hash};
|
||||
use rln::protocol::{
|
||||
deserialize_identity_pair_be, deserialize_identity_pair_le, deserialize_identity_tuple_be,
|
||||
deserialize_identity_tuple_le,
|
||||
};
|
||||
use rln::utils::{bytes_le_to_fr, vec_fr_to_bytes_le};
|
||||
use rln_wasm_utils::{
|
||||
wasm_extended_key_gen, wasm_hash, wasm_key_gen, wasm_poseidon_hash,
|
||||
wasm_seeded_extended_key_gen, wasm_seeded_key_gen,
|
||||
};
|
||||
use wasm_bindgen_test::*;
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_wasm_key_gen() {
|
||||
let result_le = wasm_key_gen(true);
|
||||
assert!(result_le.is_ok());
|
||||
deserialize_identity_pair_le(result_le.unwrap().to_vec());
|
||||
|
||||
let result_be = wasm_key_gen(false);
|
||||
assert!(result_be.is_ok());
|
||||
deserialize_identity_pair_be(result_be.unwrap().to_vec());
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_wasm_extended_key_gen() {
|
||||
let result_le = wasm_extended_key_gen(true);
|
||||
assert!(result_le.is_ok());
|
||||
deserialize_identity_tuple_le(result_le.unwrap().to_vec());
|
||||
|
||||
let result_be = wasm_extended_key_gen(false);
|
||||
assert!(result_be.is_ok());
|
||||
deserialize_identity_tuple_be(result_be.unwrap().to_vec());
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_wasm_seeded_key_gen() {
|
||||
// Create a test seed
|
||||
let seed_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
let seed = js_sys::Uint8Array::from(&seed_data[..]);
|
||||
|
||||
let result_le = wasm_seeded_key_gen(seed.clone(), true);
|
||||
assert!(result_le.is_ok());
|
||||
let fr_le = deserialize_identity_pair_le(result_le.unwrap().to_vec());
|
||||
|
||||
let result_be = wasm_seeded_key_gen(seed, false);
|
||||
assert!(result_be.is_ok());
|
||||
let fr_be = deserialize_identity_pair_be(result_be.unwrap().to_vec());
|
||||
|
||||
assert_eq!(fr_le, fr_be);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_wasm_seeded_extended_key_gen() {
|
||||
// Create a test seed
|
||||
let seed_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
let seed = js_sys::Uint8Array::from(&seed_data[..]);
|
||||
|
||||
let result_le = wasm_seeded_extended_key_gen(seed.clone(), true);
|
||||
assert!(result_le.is_ok());
|
||||
let fr_le = deserialize_identity_tuple_le(result_le.unwrap().to_vec());
|
||||
|
||||
let result_be = wasm_seeded_extended_key_gen(seed, false);
|
||||
assert!(result_be.is_ok());
|
||||
let fr_be = deserialize_identity_tuple_be(result_be.unwrap().to_vec());
|
||||
|
||||
assert_eq!(fr_le, fr_be);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_wasm_hash() {
|
||||
// Create test input data
|
||||
let signal: [u8; 32] = [0; 32];
|
||||
let input = js_sys::Uint8Array::from(&signal[..]);
|
||||
|
||||
let result_le = wasm_hash(input.clone(), true);
|
||||
assert!(result_le.is_ok());
|
||||
|
||||
let serialized_hash = result_le.unwrap().to_vec();
|
||||
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field_le(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_wasm_poseidon_hash() {
|
||||
let mut rng = thread_rng();
|
||||
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
|
||||
let mut inputs = Vec::with_capacity(number_of_inputs);
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs);
|
||||
let input = js_sys::Uint8Array::from(&inputs_ser[..]);
|
||||
|
||||
let expected_hash = poseidon_hash(inputs.as_ref());
|
||||
|
||||
let result_le = wasm_poseidon_hash(input.clone(), true);
|
||||
assert!(result_le.is_ok());
|
||||
|
||||
let serialized_hash = result_le.unwrap().to_vec();
|
||||
let (received_hash, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
assert_eq!(received_hash, expected_hash);
|
||||
}
|
||||
}
|
||||
21
rln-wasm/.gitignore
vendored
21
rln-wasm/.gitignore
vendored
@@ -1,6 +1,21 @@
|
||||
# Common files to ignore in Rust projects
|
||||
.DS_Store
|
||||
.idea
|
||||
*.log
|
||||
tmp/
|
||||
|
||||
# Generated by Cargo will have compiled files and executables
|
||||
/target
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
||||
bin/
|
||||
|
||||
# Generated by rln-wasm
|
||||
pkg/
|
||||
wasm-pack.log
|
||||
|
||||
# Generated by Nix
|
||||
result
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
|
||||
@@ -1,39 +1,47 @@
|
||||
[package]
|
||||
name = "rln-wasm"
|
||||
version = "0.1.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
license = "MIT or Apache2"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
required-features = ["stateless"]
|
||||
|
||||
[dependencies]
|
||||
rln = { path = "../rln", default-features = false }
|
||||
num-bigint = { version = "0.4.6", default-features = false, features = [
|
||||
"rand",
|
||||
"serde",
|
||||
rln = { path = "../rln", version = "0.9.0", default-features = false, features = [
|
||||
"stateless",
|
||||
] }
|
||||
rln-wasm-utils = { path = "../rln-wasm-utils", version = "0.1.0", default-features = false }
|
||||
zerokit_utils = { path = "../utils", version = "0.7.0", default-features = false }
|
||||
num-bigint = { version = "0.4.6", default-features = false }
|
||||
js-sys = "0.3.77"
|
||||
wasm-bindgen = "0.2.100"
|
||||
serde-wasm-bindgen = "0.6.5"
|
||||
js-sys = "0.3.77"
|
||||
serde_json = "1.0"
|
||||
wasm-bindgen-rayon = { version = "1.3.0", features = [
|
||||
"no-bundler",
|
||||
], optional = true }
|
||||
|
||||
# The `console_error_panic_xhook` crate provides better debugging of panics by
|
||||
# logging them with `console.error`. This is great for development, but requires
|
||||
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
|
||||
# code size when deploying.
|
||||
console_error_panic_hook = { version = "0.1.7", optional = true }
|
||||
zerokit_utils = { path = "../utils" }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
getrandom = { version = "0.2.15", features = ["js"] }
|
||||
getrandom = { version = "0.2.16", features = ["js"] }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1.0.141"
|
||||
wasm-bindgen-test = "0.3.50"
|
||||
wasm-bindgen-futures = "0.4.50"
|
||||
|
||||
[dev-dependencies.web-sys]
|
||||
version = "0.3.77"
|
||||
features = ["Window", "Navigator"]
|
||||
|
||||
[features]
|
||||
default = ["console_error_panic_hook"]
|
||||
stateless = ["rln/stateless"]
|
||||
arkzkey = ["rln/arkzkey"]
|
||||
parallel = ["rln/parallel", "wasm-bindgen-rayon"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -1,24 +1,50 @@
|
||||
[tasks.build]
|
||||
clear = true
|
||||
dependencies = ["pack_build", "pack_rename"]
|
||||
dependencies = ["pack_build", "pack_rename", "pack_resize"]
|
||||
|
||||
[tasks.build_arkzkey]
|
||||
[tasks.build_parallel]
|
||||
clear = true
|
||||
dependencies = ["pack_build_arkzkey", "pack_rename"]
|
||||
dependencies = ["pack_build_parallel", "pack_rename", "pack_resize"]
|
||||
|
||||
[tasks.pack_build]
|
||||
command = "wasm-pack"
|
||||
args = ["build", "--release", "--target", "web", "--scope", "waku"]
|
||||
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\"" }
|
||||
|
||||
[tasks.pack_build_arkzkey]
|
||||
command = "wasm-pack"
|
||||
args = ["build", "--release", "--target", "web", "--scope", "waku"]
|
||||
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\" --cfg feature=\"arkzkey\"" }
|
||||
|
||||
[tasks.pack_build_parallel]
|
||||
command = "env"
|
||||
args = [
|
||||
"RUSTFLAGS=-C target-feature=+atomics,+bulk-memory,+mutable-globals",
|
||||
"rustup",
|
||||
"run",
|
||||
"nightly",
|
||||
"wasm-pack",
|
||||
"build",
|
||||
"--release",
|
||||
"--target",
|
||||
"web",
|
||||
"--scope",
|
||||
"waku",
|
||||
"--features",
|
||||
"parallel",
|
||||
"-Z",
|
||||
"build-std=panic_abort,std",
|
||||
]
|
||||
[tasks.pack_rename]
|
||||
script = "sed -i.bak 's/rln-wasm/zerokit-rln-wasm/g' pkg/package.json && rm pkg/package.json.bak"
|
||||
|
||||
[tasks.pack_resize]
|
||||
command = "wasm-opt"
|
||||
args = [
|
||||
"pkg/rln_wasm_bg.wasm",
|
||||
"-Oz",
|
||||
"--strip-debug",
|
||||
"--strip-dwarf",
|
||||
"--remove-unused-module-elements",
|
||||
"--vacuum",
|
||||
"-o",
|
||||
"pkg/rln_wasm_bg.wasm",
|
||||
]
|
||||
|
||||
[tasks.test]
|
||||
command = "wasm-pack"
|
||||
args = [
|
||||
@@ -30,29 +56,44 @@ args = [
|
||||
"--",
|
||||
"--nocapture",
|
||||
]
|
||||
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\"" }
|
||||
dependencies = ["build"]
|
||||
|
||||
[tasks.test_arkzkey]
|
||||
[tasks.test_browser]
|
||||
command = "wasm-pack"
|
||||
args = [
|
||||
"test",
|
||||
"--release",
|
||||
"--node",
|
||||
"--chrome",
|
||||
"--headless",
|
||||
"--target",
|
||||
"wasm32-unknown-unknown",
|
||||
"--",
|
||||
"--nocapture",
|
||||
]
|
||||
env = { "RUSTFLAGS" = "--cfg feature=\"stateless\" --cfg feature=\"arkzkey\"" }
|
||||
dependencies = ["build_arkzkey"]
|
||||
dependencies = ["build"]
|
||||
|
||||
[tasks.test_parallel]
|
||||
command = "env"
|
||||
args = [
|
||||
"RUSTFLAGS=-C target-feature=+atomics,+bulk-memory,+mutable-globals",
|
||||
"rustup",
|
||||
"run",
|
||||
"nightly",
|
||||
"wasm-pack",
|
||||
"test",
|
||||
"--release",
|
||||
"--chrome",
|
||||
"--headless",
|
||||
"--target",
|
||||
"wasm32-unknown-unknown",
|
||||
"--features",
|
||||
"parallel",
|
||||
"-Z",
|
||||
"build-std=panic_abort,std",
|
||||
"--",
|
||||
"--nocapture",
|
||||
]
|
||||
dependencies = ["build_parallel"]
|
||||
|
||||
[tasks.bench]
|
||||
disabled = true
|
||||
|
||||
[tasks.login]
|
||||
command = "wasm-pack"
|
||||
args = ["login"]
|
||||
|
||||
[tasks.publish]
|
||||
command = "wasm-pack"
|
||||
args = ["publish", "--access", "public", "--target", "web"]
|
||||
|
||||
@@ -1,22 +1,58 @@
|
||||
# RLN for WASM
|
||||
|
||||
This library is used in [waku-org/js-rln](https://github.com/waku-org/js-rln/)
|
||||
[](https://badge.fury.io/js/@waku%2Fzerokit-rln-wasm)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
> **Note**: This project requires `wasm-pack` for compiling Rust to WebAssembly and `cargo-make` for running the build commands. Make sure both are installed before proceeding.
|
||||
The Zerokit RLN WASM Module provides WebAssembly bindings for working with
|
||||
Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and primitives.
|
||||
This module is used by [waku-org/js-rln](https://github.com/waku-org/js-rln/) to enable
|
||||
RLN functionality in JavaScript/TypeScript applications.
|
||||
|
||||
Install `wasm-pack`:
|
||||
## Install Dependencies
|
||||
|
||||
> [!NOTE]
|
||||
> This project requires the following tools:
|
||||
>
|
||||
> - `wasm-pack` - for compiling Rust to WebAssembly
|
||||
> - `cargo-make` - for running build commands
|
||||
> - `nvm` - to install and manage Node.js
|
||||
>
|
||||
> Ensure all dependencies are installed before proceeding.
|
||||
|
||||
### Manually
|
||||
|
||||
#### Install `wasm-pack`
|
||||
|
||||
```bash
|
||||
curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
|
||||
cargo install wasm-pack --version=0.13.1
|
||||
```
|
||||
|
||||
Install `cargo-make`
|
||||
#### Install `cargo-make`
|
||||
|
||||
```bash
|
||||
cargo install cargo-make
|
||||
```
|
||||
|
||||
Or install everything needed for `zerokit` at the root of the repository:
|
||||
#### Install `Node.js`
|
||||
|
||||
If you don't have `nvm` (Node Version Manager), install it by following
|
||||
the [installation instructions](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script).
|
||||
|
||||
After installing `nvm`, install and use Node.js `v22.14.0`:
|
||||
|
||||
```bash
|
||||
nvm install 22.14.0
|
||||
nvm use 22.14.0
|
||||
nvm alias default 22.14.0
|
||||
```
|
||||
|
||||
If you already have Node.js installed,
|
||||
check your version with `node -v` command — the version must be strictly greater than 22.
|
||||
|
||||
### Or install everything
|
||||
|
||||
You can run the following command from the root of the repository to install all required dependencies for `zerokit`
|
||||
|
||||
```bash
|
||||
make installdeps
|
||||
@@ -36,20 +72,75 @@ Compile zerokit for `wasm32-unknown-unknown`:
|
||||
cargo make build
|
||||
```
|
||||
|
||||
Or compile with the **arkzkey** feature enabled
|
||||
|
||||
```bash
|
||||
cargo make build_arkzkey
|
||||
```
|
||||
|
||||
## Running tests and benchmarks
|
||||
|
||||
```bash
|
||||
cargo make test
|
||||
```
|
||||
|
||||
Or test with the **arkzkey** feature enabled
|
||||
If you want to run the tests in browser headless mode, you can use the following command:
|
||||
|
||||
```bash
|
||||
cargo make test_arkzkey
|
||||
cargo make test_browser
|
||||
```
|
||||
|
||||
## Parallel computation
|
||||
|
||||
The library supports parallel computation using the `wasm-bindgen-rayon` crate,
|
||||
enabling multi-threaded execution in the browser.
|
||||
|
||||
> [!NOTE]
|
||||
> Parallel support is not enabled by default due to WebAssembly and browser limitations. \
|
||||
> Compiling this feature requires `nightly` Rust.
|
||||
|
||||
### Build Setup
|
||||
|
||||
#### Install `nightly` Rust
|
||||
|
||||
```bash
|
||||
rustup install nightly
|
||||
```
|
||||
|
||||
### Build Commands
|
||||
|
||||
To enable parallel computation for WebAssembly threads, you can use the following command:
|
||||
|
||||
```bash
|
||||
cargo make build_parallel
|
||||
```
|
||||
|
||||
### WebAssembly Threading Support
|
||||
|
||||
Most modern browsers support WebAssembly threads,
|
||||
but they require the following headers to enable `SharedArrayBuffer`, which is necessary for multithreading:
|
||||
|
||||
- Cross-Origin-Opener-Policy: same-origin
|
||||
- Cross-Origin-Embedder-Policy: require-corp
|
||||
|
||||
Without these, the application will fall back to single-threaded mode.
|
||||
|
||||
## Feature detection
|
||||
|
||||
If you're targeting [older browser versions that didn't support WebAssembly threads yet](https://webassembly.org/roadmap/),
|
||||
you'll likely want to create two builds - one with thread support and one without -
|
||||
and use feature detection to choose the right one on the JavaScript side.
|
||||
|
||||
You can use [wasm-feature-detect](https://github.com/GoogleChromeLabs/wasm-feature-detect) library for this purpose.
|
||||
For example, your code might look like this:
|
||||
|
||||
```js
|
||||
import { threads } from 'wasm-feature-detect';
|
||||
|
||||
let wasmPkg;
|
||||
|
||||
if (await threads()) {
|
||||
wasmPkg = await import('./pkg-with-threads/index.js');
|
||||
await wasmPkg.default();
|
||||
await wasmPkg.initThreadPool(navigator.hardwareConcurrency);
|
||||
} else {
|
||||
wasmPkg = await import('./pkg-without-threads/index.js');
|
||||
await wasmPkg.default();
|
||||
}
|
||||
|
||||
wasmPkg.nowCallAnyExportedFuncs();
|
||||
```
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
module.exports = async function builder(code, options) {
|
||||
// File generated with https://github.com/iden3/circom
|
||||
// following the instructions from:
|
||||
// https://github.com/vacp2p/zerokit/tree/master/rln#advanced-custom-circuit-compilation
|
||||
|
||||
export async function builder(code, options) {
|
||||
options = options || {};
|
||||
|
||||
let wasmModule;
|
||||
@@ -101,7 +105,7 @@ module.exports = async function builder(code, options) {
|
||||
// Then append the value to the message we are creating
|
||||
msgStr += fromArray32(arr).toString();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
class WitnessCalculator {
|
||||
constructor(instance, sanityCheck) {
|
||||
|
||||
@@ -2,10 +2,13 @@
|
||||
|
||||
use js_sys::{BigInt as JsBigInt, Object, Uint8Array};
|
||||
use num_bigint::BigInt;
|
||||
use rln::public::{hash, poseidon_hash, RLN};
|
||||
use rln::public::RLN;
|
||||
use std::vec::Vec;
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use wasm_bindgen_rayon::init_thread_pool;
|
||||
|
||||
#[wasm_bindgen(js_name = initPanicHook)]
|
||||
pub fn init_panic_hook() {
|
||||
console_error_panic_hook::set_once();
|
||||
@@ -75,40 +78,6 @@ macro_rules! call_bool_method_with_error_msg {
|
||||
}
|
||||
}
|
||||
|
||||
// Macro to execute a function with arbitrary amount of arguments,
|
||||
// First argument is the function to execute
|
||||
// Rest are all other arguments to the method
|
||||
macro_rules! fn_call_with_output_and_error_msg {
|
||||
// this variant is needed for the case when
|
||||
// there are zero other arguments
|
||||
($func:ident, $error_msg:expr) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
if let Err(err) = $func(&mut output_data) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
};
|
||||
($func:ident, $error_msg:expr, $( $arg:expr ),* ) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
if let Err(err) = $func($($arg.process()),*, &mut output_data) {
|
||||
std::mem::forget(output_data);
|
||||
Err(format!("Msg: {:#?}, Error: {:#?}", $error_msg, err))
|
||||
} else {
|
||||
let result = Uint8Array::from(&output_data[..]);
|
||||
std::mem::forget(output_data);
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
trait ProcessArg {
|
||||
type ReturnType;
|
||||
fn process(self) -> Self::ReturnType;
|
||||
@@ -210,43 +179,6 @@ pub fn wasm_generate_rln_proof_with_witness(
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateMembershipKey)]
|
||||
pub fn wasm_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(ctx, key_gen, "could not generate membership keys")
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateExtendedMembershipKey)]
|
||||
pub fn wasm_extended_key_gen(ctx: *const RLNWrapper) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(ctx, extended_key_gen, "could not generate membership keys")
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateSeededMembershipKey)]
|
||||
pub fn wasm_seeded_key_gen(ctx: *const RLNWrapper, seed: Uint8Array) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(
|
||||
ctx,
|
||||
seeded_key_gen,
|
||||
"could not generate membership key",
|
||||
&seed.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = generateSeededExtendedMembershipKey)]
|
||||
pub fn wasm_seeded_extended_key_gen(
|
||||
ctx: *const RLNWrapper,
|
||||
seed: Uint8Array,
|
||||
) -> Result<Uint8Array, String> {
|
||||
call_with_output_and_error_msg!(
|
||||
ctx,
|
||||
seeded_extended_key_gen,
|
||||
"could not generate membership key",
|
||||
&seed.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[wasm_bindgen(js_name = recovedIDSecret)]
|
||||
pub fn wasm_recover_id_secret(
|
||||
@@ -278,17 +210,3 @@ pub fn wasm_verify_with_roots(
|
||||
&roots.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = hash)]
|
||||
pub fn wasm_hash(input: Uint8Array) -> Result<Uint8Array, String> {
|
||||
fn_call_with_output_and_error_msg!(hash, "could not generate hash", &input.to_vec()[..])
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = poseidonHash)]
|
||||
pub fn wasm_poseidon_hash(input: Uint8Array) -> Result<Uint8Array, String> {
|
||||
fn_call_with_output_and_error_msg!(
|
||||
poseidon_hash,
|
||||
"could not generate poseidon hash",
|
||||
&input.to_vec()[..]
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
const fs = require("fs");
|
||||
|
||||
// Utils functions for loading circom witness calculator and reading files from test
|
||||
|
||||
module.exports = {
|
||||
read_file: function (path) {
|
||||
return fs.readFileSync(path);
|
||||
},
|
||||
|
||||
calculateWitness: async function (circom_path, inputs) {
|
||||
const wc = require("../resources/witness_calculator.js");
|
||||
const wasmFile = fs.readFileSync(circom_path);
|
||||
const wasmFileBuffer = wasmFile.slice(
|
||||
wasmFile.byteOffset,
|
||||
wasmFile.byteOffset + wasmFile.byteLength
|
||||
);
|
||||
const witnessCalculator = await wc(wasmFileBuffer);
|
||||
const calculatedWitness = await witnessCalculator.calculateWitness(
|
||||
inputs,
|
||||
false
|
||||
);
|
||||
return JSON.stringify(calculatedWitness, (key, value) =>
|
||||
typeof value === "bigint" ? value.toString() : value
|
||||
);
|
||||
},
|
||||
};
|
||||
266
rln-wasm/tests/browser.rs
Normal file
266
rln-wasm/tests/browser.rs
Normal file
@@ -0,0 +1,266 @@
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
|
||||
use rln::circuit::{Fr, TEST_TREE_DEPTH};
|
||||
use rln::hashers::{hash_to_field_le, poseidon_hash, PoseidonHash};
|
||||
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
|
||||
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, IdSecret};
|
||||
use rln_wasm::{
|
||||
wasm_generate_rln_proof_with_witness, wasm_new, wasm_rln_witness_to_json,
|
||||
wasm_verify_with_roots,
|
||||
};
|
||||
use rln_wasm_utils::wasm_key_gen;
|
||||
use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
|
||||
use wasm_bindgen_test::{console_log, wasm_bindgen_test, wasm_bindgen_test_configure};
|
||||
use zerokit_utils::{
|
||||
OptimalMerkleProof, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree,
|
||||
};
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
use {rln_wasm::init_thread_pool, wasm_bindgen_futures::JsFuture, web_sys::window};
|
||||
|
||||
#[wasm_bindgen(inline_js = r#"
|
||||
export function isThreadpoolSupported() {
|
||||
return typeof SharedArrayBuffer !== 'undefined' &&
|
||||
typeof Atomics !== 'undefined' &&
|
||||
typeof crossOriginIsolated !== 'undefined' &&
|
||||
crossOriginIsolated;
|
||||
}
|
||||
|
||||
export function initWitnessCalculator(jsCode) {
|
||||
const processedCode = jsCode
|
||||
.replace(/export\s+async\s+function\s+builder/, 'async function builder')
|
||||
.replace(/export\s*\{\s*builder\s*\};?/g, '');
|
||||
|
||||
const moduleFunc = new Function(processedCode + '\nreturn { builder };');
|
||||
const witnessCalculatorModule = moduleFunc();
|
||||
|
||||
window.witnessCalculatorBuilder = witnessCalculatorModule.builder;
|
||||
|
||||
if (typeof window.witnessCalculatorBuilder !== 'function') {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
export function readFile(data) {
|
||||
return new Uint8Array(data);
|
||||
}
|
||||
|
||||
export async function calculateWitness(circom_data, inputs) {
|
||||
const wasmBuffer = circom_data instanceof Uint8Array ? circom_data : new Uint8Array(circom_data);
|
||||
const witnessCalculator = await window.witnessCalculatorBuilder(wasmBuffer);
|
||||
const calculatedWitness = await witnessCalculator.calculateWitness(inputs, false);
|
||||
return JSON.stringify(calculatedWitness, (key, value) =>
|
||||
typeof value === "bigint" ? value.toString() : value
|
||||
);
|
||||
}
|
||||
"#)]
|
||||
extern "C" {
|
||||
#[wasm_bindgen(catch)]
|
||||
fn isThreadpoolSupported() -> Result<bool, JsValue>;
|
||||
|
||||
#[wasm_bindgen(catch)]
|
||||
fn initWitnessCalculator(js: &str) -> Result<bool, JsValue>;
|
||||
|
||||
#[wasm_bindgen(catch)]
|
||||
fn readFile(data: &[u8]) -> Result<Uint8Array, JsValue>;
|
||||
|
||||
#[wasm_bindgen(catch)]
|
||||
async fn calculateWitness(circom_data: &[u8], inputs: Object) -> Result<JsValue, JsValue>;
|
||||
}
|
||||
|
||||
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator.js");
|
||||
|
||||
const ARKZKEY_BYTES: &[u8] =
|
||||
include_bytes!("../../rln/resources/tree_depth_20/rln_final.arkzkey");
|
||||
|
||||
const CIRCOM_BYTES: &[u8] = include_bytes!("../../rln/resources/tree_depth_20/rln.wasm");
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
pub async fn rln_wasm_benchmark() {
|
||||
// Check if thread pool is supported
|
||||
#[cfg(feature = "parallel")]
|
||||
if !isThreadpoolSupported().expect("Failed to check thread pool support") {
|
||||
panic!("Thread pool is NOT supported");
|
||||
} else {
|
||||
// Initialize thread pool
|
||||
let cpu_count = window()
|
||||
.expect("Failed to get window")
|
||||
.navigator()
|
||||
.hardware_concurrency() as usize;
|
||||
JsFuture::from(init_thread_pool(cpu_count))
|
||||
.await
|
||||
.expect("Failed to initialize thread pool");
|
||||
}
|
||||
|
||||
// Initialize witness calculator
|
||||
initWitnessCalculator(WITNESS_CALCULATOR_JS)
|
||||
.expect("Failed to initialize witness calculator");
|
||||
|
||||
let mut results = String::from("\nbenchmarks:\n");
|
||||
let iterations = 10;
|
||||
|
||||
let zkey = readFile(&ARKZKEY_BYTES).expect("Failed to read zkey file");
|
||||
|
||||
// Benchmark wasm_new
|
||||
let start_wasm_new = Date::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = wasm_new(zkey.clone()).expect("Failed to create RLN instance");
|
||||
}
|
||||
let wasm_new_result = Date::now() - start_wasm_new;
|
||||
|
||||
// Create RLN instance for other benchmarks
|
||||
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
|
||||
let mut tree: OptimalMerkleTree<PoseidonHash> =
|
||||
OptimalMerkleTree::default(TEST_TREE_DEPTH).expect("Failed to create tree");
|
||||
|
||||
// Benchmark wasm_key_gen
|
||||
let start_wasm_key_gen = Date::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = wasm_key_gen(true).expect("Failed to generate keys");
|
||||
}
|
||||
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
|
||||
|
||||
// Generate identity pair for other benchmarks
|
||||
let mem_keys = wasm_key_gen(true).expect("Failed to generate keys");
|
||||
let id_key = mem_keys.subarray(0, 32);
|
||||
let (identity_secret_hash, _) = IdSecret::from_bytes_le(&id_key.to_vec());
|
||||
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
|
||||
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
let identity_index = tree.leaves_set();
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
|
||||
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
tree.update_next(rate_commitment)
|
||||
.expect("Failed to update tree");
|
||||
|
||||
let message_id = Fr::from(0);
|
||||
let signal: [u8; 32] = [0; 32];
|
||||
let x = hash_to_field_le(&signal);
|
||||
|
||||
let merkle_proof: OptimalMerkleProof<PoseidonHash> = tree
|
||||
.proof(identity_index)
|
||||
.expect("Failed to generate merkle proof");
|
||||
|
||||
let rln_witness = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
)
|
||||
.expect("Failed to create RLN witness");
|
||||
|
||||
let serialized_witness =
|
||||
serialize_witness(&rln_witness).expect("Failed to serialize witness");
|
||||
let witness_buffer = Uint8Array::from(&serialized_witness[..]);
|
||||
|
||||
let json_inputs = wasm_rln_witness_to_json(rln_instance, witness_buffer.clone())
|
||||
.expect("Failed to convert witness to JSON");
|
||||
|
||||
// Benchmark calculateWitness
|
||||
let start_calculate_witness = Date::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = calculateWitness(&CIRCOM_BYTES, json_inputs.clone())
|
||||
.await
|
||||
.expect("Failed to calculate witness");
|
||||
}
|
||||
let calculate_witness_result = Date::now() - start_calculate_witness;
|
||||
|
||||
// Calculate witness for other benchmarks
|
||||
let calculated_witness_json = calculateWitness(&CIRCOM_BYTES, json_inputs)
|
||||
.await
|
||||
.expect("Failed to calculate witness")
|
||||
.as_string()
|
||||
.expect("Failed to convert calculated witness to string");
|
||||
let calculated_witness_vec_str: Vec<String> =
|
||||
serde_json::from_str(&calculated_witness_json).expect("Failed to parse JSON");
|
||||
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
|
||||
.iter()
|
||||
.map(|x| JsBigInt::new(&x.into()).expect("Failed to create JsBigInt"))
|
||||
.collect();
|
||||
|
||||
// Benchmark wasm_generate_rln_proof_with_witness
|
||||
let start_wasm_generate_rln_proof_with_witness = Date::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = wasm_generate_rln_proof_with_witness(
|
||||
rln_instance,
|
||||
calculated_witness.clone(),
|
||||
witness_buffer.clone(),
|
||||
)
|
||||
.expect("Failed to generate proof");
|
||||
}
|
||||
let wasm_generate_rln_proof_with_witness_result =
|
||||
Date::now() - start_wasm_generate_rln_proof_with_witness;
|
||||
|
||||
// Generate a proof for other benchmarks
|
||||
let proof =
|
||||
wasm_generate_rln_proof_with_witness(rln_instance, calculated_witness, witness_buffer)
|
||||
.expect("Failed to generate proof");
|
||||
|
||||
let proof_data = proof.to_vec();
|
||||
let verify_input = prepare_verify_input(proof_data, &signal);
|
||||
let input_buffer = Uint8Array::from(&verify_input[..]);
|
||||
|
||||
let root = tree.root();
|
||||
let roots_serialized = fr_to_bytes_le(&root);
|
||||
let roots_buffer = Uint8Array::from(&roots_serialized[..]);
|
||||
|
||||
// Benchmark wasm_verify_with_roots
|
||||
let start_wasm_verify_with_roots = Date::now();
|
||||
for _ in 0..iterations {
|
||||
let _ =
|
||||
wasm_verify_with_roots(rln_instance, input_buffer.clone(), roots_buffer.clone())
|
||||
.expect("Failed to verify proof");
|
||||
}
|
||||
let wasm_verify_with_roots_result = Date::now() - start_wasm_verify_with_roots;
|
||||
|
||||
// Verify the proof with the root
|
||||
let is_proof_valid = wasm_verify_with_roots(rln_instance, input_buffer, roots_buffer)
|
||||
.expect("Failed to verify proof");
|
||||
assert!(is_proof_valid, "verification failed");
|
||||
|
||||
// Format and display results
|
||||
let format_duration = |duration_ms: f64| -> String {
|
||||
let avg_ms = duration_ms / (iterations as f64);
|
||||
if avg_ms >= 1000.0 {
|
||||
format!("{:.3} s", avg_ms / 1000.0)
|
||||
} else {
|
||||
format!("{:.3} ms", avg_ms)
|
||||
}
|
||||
};
|
||||
|
||||
results.push_str(&format!("wasm_new: {}\n", format_duration(wasm_new_result)));
|
||||
results.push_str(&format!(
|
||||
"wasm_key_gen: {}\n",
|
||||
format_duration(wasm_key_gen_result)
|
||||
));
|
||||
results.push_str(&format!(
|
||||
"calculateWitness: {}\n",
|
||||
format_duration(calculate_witness_result)
|
||||
));
|
||||
results.push_str(&format!(
|
||||
"wasm_generate_rln_proof_with_witness: {}\n",
|
||||
format_duration(wasm_generate_rln_proof_with_witness_result)
|
||||
));
|
||||
results.push_str(&format!(
|
||||
"wasm_verify_with_roots: {}\n",
|
||||
format_duration(wasm_verify_with_roots_result)
|
||||
));
|
||||
|
||||
// Log the results
|
||||
console_log!("{results}");
|
||||
}
|
||||
}
|
||||
@@ -1,40 +1,92 @@
|
||||
#![cfg(not(feature = "parallel"))]
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use js_sys::{BigInt as JsBigInt, Date, Object, Uint8Array};
|
||||
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash};
|
||||
use rln::poseidon_tree::PoseidonTree;
|
||||
use rln::circuit::{Fr, TEST_TREE_DEPTH};
|
||||
use rln::hashers::{hash_to_field_le, poseidon_hash, PoseidonHash};
|
||||
use rln::protocol::{prepare_verify_input, rln_witness_from_values, serialize_witness};
|
||||
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le};
|
||||
use rln_wasm::*;
|
||||
use wasm_bindgen::{prelude::*, JsValue};
|
||||
use wasm_bindgen_test::wasm_bindgen_test;
|
||||
use zerokit_utils::merkle_tree::merkle_tree::ZerokitMerkleTree;
|
||||
use rln::utils::{bytes_le_to_fr, fr_to_bytes_le, IdSecret};
|
||||
use rln_wasm::{
|
||||
wasm_generate_rln_proof_with_witness, wasm_new, wasm_rln_witness_to_json,
|
||||
wasm_verify_with_roots,
|
||||
};
|
||||
use rln_wasm_utils::wasm_key_gen;
|
||||
use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
|
||||
use wasm_bindgen_test::{console_log, wasm_bindgen_test};
|
||||
use zerokit_utils::{
|
||||
OptimalMerkleProof, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree,
|
||||
};
|
||||
|
||||
#[wasm_bindgen(module = "src/utils.js")]
|
||||
const WITNESS_CALCULATOR_JS: &str = include_str!("../resources/witness_calculator.js");
|
||||
|
||||
#[wasm_bindgen(inline_js = r#"
|
||||
const fs = require("fs");
|
||||
|
||||
let witnessCalculatorModule = null;
|
||||
|
||||
module.exports = {
|
||||
initWitnessCalculator: function(code) {
|
||||
const processedCode = code
|
||||
.replace(/export\s+async\s+function\s+builder/, 'async function builder')
|
||||
.replace(/export\s*\{\s*builder\s*\};?/g, '');
|
||||
|
||||
const moduleFunc = new Function(processedCode + '\nreturn { builder };');
|
||||
witnessCalculatorModule = moduleFunc();
|
||||
|
||||
if (typeof witnessCalculatorModule.builder !== 'function') {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
|
||||
readFile: function (path) {
|
||||
return fs.readFileSync(path);
|
||||
},
|
||||
|
||||
calculateWitness: async function (circom_path, inputs) {
|
||||
const wasmFile = fs.readFileSync(circom_path);
|
||||
const wasmFileBuffer = wasmFile.slice(
|
||||
wasmFile.byteOffset,
|
||||
wasmFile.byteOffset + wasmFile.byteLength
|
||||
);
|
||||
const witnessCalculator = await witnessCalculatorModule.builder(wasmFileBuffer);
|
||||
const calculatedWitness = await witnessCalculator.calculateWitness(
|
||||
inputs,
|
||||
false
|
||||
);
|
||||
return JSON.stringify(calculatedWitness, (key, value) =>
|
||||
typeof value === "bigint" ? value.toString() : value
|
||||
);
|
||||
},
|
||||
};
|
||||
"#)]
|
||||
extern "C" {
|
||||
#[wasm_bindgen(catch)]
|
||||
fn read_file(path: &str) -> Result<Uint8Array, JsValue>;
|
||||
fn initWitnessCalculator(code: &str) -> Result<bool, JsValue>;
|
||||
|
||||
#[wasm_bindgen(catch)]
|
||||
fn readFile(path: &str) -> Result<Uint8Array, JsValue>;
|
||||
|
||||
#[wasm_bindgen(catch)]
|
||||
async fn calculateWitness(circom_path: &str, input: Object) -> Result<JsValue, JsValue>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
const ZKEY_PATH: &str = "../rln/resources/tree_height_20/rln_final.arkzkey";
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
const ZKEY_PATH: &str = "../rln/resources/tree_height_20/rln_final.zkey";
|
||||
const ARKZKEY_PATH: &str = "../rln/resources/tree_depth_20/rln_final.arkzkey";
|
||||
|
||||
const CIRCOM_PATH: &str = "../rln/resources/tree_height_20/rln.wasm";
|
||||
const CIRCOM_PATH: &str = "../rln/resources/tree_depth_20/rln.wasm";
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
pub async fn rln_wasm_benchmark() {
|
||||
// Initialize witness calculator
|
||||
initWitnessCalculator(WITNESS_CALCULATOR_JS)
|
||||
.expect("Failed to initialize witness calculator");
|
||||
|
||||
let mut results = String::from("\nbenchmarks:\n");
|
||||
let iterations = 10;
|
||||
|
||||
let zkey = read_file(&ZKEY_PATH).expect("Failed to read zkey file");
|
||||
let zkey = readFile(&ARKZKEY_PATH).expect("Failed to read zkey file");
|
||||
|
||||
// Benchmark wasm_new
|
||||
let start_wasm_new = Date::now();
|
||||
@@ -45,23 +97,24 @@ mod tests {
|
||||
|
||||
// Create RLN instance for other benchmarks
|
||||
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
|
||||
let mut tree = PoseidonTree::default(TEST_TREE_HEIGHT).expect("Failed to create tree");
|
||||
let mut tree: OptimalMerkleTree<PoseidonHash> =
|
||||
OptimalMerkleTree::default(TEST_TREE_DEPTH).expect("Failed to create tree");
|
||||
|
||||
// Benchmark wasm_key_gen
|
||||
let start_wasm_key_gen = Date::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = wasm_key_gen(rln_instance).expect("Failed to generate keys");
|
||||
let _ = wasm_key_gen(true).expect("Failed to generate keys");
|
||||
}
|
||||
let wasm_key_gen_result = Date::now() - start_wasm_key_gen;
|
||||
|
||||
// Generate identity pair for other benchmarks
|
||||
let mem_keys = wasm_key_gen(rln_instance).expect("Failed to generate keys");
|
||||
let mem_keys = wasm_key_gen(true).expect("Failed to generate keys");
|
||||
let id_key = mem_keys.subarray(0, 32);
|
||||
let (identity_secret_hash, _) = bytes_le_to_fr(&id_key.to_vec());
|
||||
let (identity_secret_hash, _) = IdSecret::from_bytes_le(&id_key.to_vec());
|
||||
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
|
||||
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
let identity_index = tree.leaves_set();
|
||||
@@ -74,15 +127,16 @@ mod tests {
|
||||
|
||||
let message_id = Fr::from(0);
|
||||
let signal: [u8; 32] = [0; 32];
|
||||
let x = hash_to_field(&signal);
|
||||
let x = hash_to_field_le(&signal);
|
||||
|
||||
let merkle_proof = tree
|
||||
let merkle_proof: OptimalMerkleProof<PoseidonHash> = tree
|
||||
.proof(identity_index)
|
||||
.expect("Failed to generate merkle proof");
|
||||
|
||||
let rln_witness = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -175,7 +229,7 @@ mod tests {
|
||||
format_duration(wasm_key_gen_result)
|
||||
));
|
||||
results.push_str(&format!(
|
||||
"calculateWitness: {}\n",
|
||||
"calculate_witness: {}\n",
|
||||
format_duration(calculate_witness_result)
|
||||
));
|
||||
results.push_str(&format!(
|
||||
@@ -188,102 +242,6 @@ mod tests {
|
||||
));
|
||||
|
||||
// Log the results
|
||||
wasm_bindgen_test::console_log!("{results}");
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
pub async fn rln_wasm_test() {
|
||||
// Read the zkey file
|
||||
let zkey = read_file(&ZKEY_PATH).expect("Failed to read zkey file");
|
||||
|
||||
// Create RLN instance and separated tree
|
||||
let rln_instance = wasm_new(zkey).expect("Failed to create RLN instance");
|
||||
let mut tree = PoseidonTree::default(TEST_TREE_HEIGHT).expect("Failed to create tree");
|
||||
|
||||
// Setting up the epoch and rln_identifier
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// Generate identity pair
|
||||
let mem_keys = wasm_key_gen(rln_instance).expect("Failed to generate keys");
|
||||
let (identity_secret_hash, _) = bytes_le_to_fr(&mem_keys.subarray(0, 32).to_vec());
|
||||
let (id_commitment, _) = bytes_le_to_fr(&mem_keys.subarray(32, 64).to_vec());
|
||||
|
||||
// Get index of the identity
|
||||
let identity_index = tree.leaves_set();
|
||||
|
||||
// Setting up the user message limit
|
||||
let user_message_limit = Fr::from(100);
|
||||
|
||||
// Updating the tree with the rate commitment
|
||||
let rate_commitment = poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
tree.update_next(rate_commitment)
|
||||
.expect("Failed to update tree");
|
||||
|
||||
// Generate merkle proof
|
||||
let merkle_proof = tree
|
||||
.proof(identity_index)
|
||||
.expect("Failed to generate merkle proof");
|
||||
|
||||
// Create message id and signal
|
||||
let message_id = Fr::from(0);
|
||||
let signal: [u8; 32] = [0; 32];
|
||||
let x = hash_to_field(&signal);
|
||||
|
||||
// Prepare input for witness calculation
|
||||
let rln_witness = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
)
|
||||
.expect("Failed to create RLN witness");
|
||||
|
||||
// Serialize the rln witness
|
||||
let serialized_witness =
|
||||
serialize_witness(&rln_witness).expect("Failed to serialize witness");
|
||||
// Convert the serialized witness to a Uint8Array
|
||||
let witness_buffer = Uint8Array::from(&serialized_witness[..]);
|
||||
|
||||
// Obtaining inputs that should be sent to circom witness calculator
|
||||
let json_inputs = wasm_rln_witness_to_json(rln_instance, witness_buffer.clone())
|
||||
.expect("Failed to convert witness to JSON");
|
||||
|
||||
// Calculating witness with JS
|
||||
// (Using a JSON since wasm_bindgen does not like Result<Vec<JsBigInt>,JsValue>)
|
||||
let calculated_witness_json = calculateWitness(&CIRCOM_PATH, json_inputs)
|
||||
.await
|
||||
.expect("Failed to calculate witness")
|
||||
.as_string()
|
||||
.expect("Failed to convert calculated witness to string");
|
||||
let calculated_witness_vec_str: Vec<String> =
|
||||
serde_json::from_str(&calculated_witness_json).expect("Failed to parse JSON");
|
||||
let calculated_witness: Vec<JsBigInt> = calculated_witness_vec_str
|
||||
.iter()
|
||||
.map(|x| JsBigInt::new(&x.into()).expect("Failed to create JsBigInt"))
|
||||
.collect();
|
||||
|
||||
// Generate a proof from the calculated witness
|
||||
let proof =
|
||||
wasm_generate_rln_proof_with_witness(rln_instance, calculated_witness, witness_buffer)
|
||||
.expect("Failed to generate proof");
|
||||
|
||||
// Prepare the root for verification
|
||||
let root = tree.root();
|
||||
let roots_serialized = fr_to_bytes_le(&root);
|
||||
let roots_buffer = Uint8Array::from(&roots_serialized[..]);
|
||||
|
||||
// Prepare input for proof verification
|
||||
let proof_data = proof.to_vec();
|
||||
let verify_input = prepare_verify_input(proof_data, &signal);
|
||||
let input_buffer = Uint8Array::from(&verify_input[..]);
|
||||
|
||||
// Verify the proof with the root
|
||||
let is_proof_valid = wasm_verify_with_roots(rln_instance, input_buffer, roots_buffer)
|
||||
.expect("Failed to verify proof");
|
||||
assert!(is_proof_valid, "verification failed");
|
||||
console_log!("{results}");
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "rln"
|
||||
version = "0.7.0"
|
||||
version = "0.9.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "APIs to manage, compute and verify zkSNARK proofs and RLN primitives"
|
||||
@@ -9,7 +9,7 @@ homepage = "https://vac.dev"
|
||||
repository = "https://github.com/vacp2p/zerokit"
|
||||
|
||||
[lib]
|
||||
crate-type = ["rlib", "staticlib"]
|
||||
crate-type = ["rlib", "staticlib", "cdylib"]
|
||||
bench = false
|
||||
|
||||
# This flag disable cargo doctests, i.e. testing example code-snippets in documentation
|
||||
@@ -19,78 +19,63 @@ doctest = false
|
||||
# ZKP Generation
|
||||
ark-bn254 = { version = "0.5.0", features = ["std"] }
|
||||
ark-relations = { version = "0.5.1", features = ["std"] }
|
||||
ark-ff = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
ark-ec = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
|
||||
ark-std = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
ark-poly = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
ark-groth16 = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
ark-serialize = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
ark-ff = { version = "0.5.0", default-features = false }
|
||||
ark-ec = { version = "0.5.0", default-features = false }
|
||||
ark-std = { version = "0.5.0", default-features = false }
|
||||
ark-poly = { version = "0.5.0", default-features = false }
|
||||
ark-groth16 = { version = "0.5.0", default-features = false }
|
||||
ark-serialize = { version = "0.5.0", default-features = false }
|
||||
|
||||
# error handling
|
||||
color-eyre = "0.6.3"
|
||||
thiserror = "2.0.12"
|
||||
|
||||
# utilities
|
||||
rayon = { version = "1.10.0", optional = true }
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0"
|
||||
num-bigint = { version = "0.4.6", default-features = false, features = [
|
||||
"rand",
|
||||
"std",
|
||||
] }
|
||||
num-bigint = { version = "0.4.6", default-features = false, features = ["std"] }
|
||||
num-traits = "0.2.19"
|
||||
once_cell = "1.21.3"
|
||||
lazy_static = "1.5.0"
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
ruint = { version = "1.14.0", features = ["rand", "serde", "ark-ff-04"] }
|
||||
ruint = { version = "1.15.0", features = ["rand", "serde", "ark-ff-04"] }
|
||||
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
|
||||
utils = { package = "zerokit_utils", version = "0.5.2", path = "../utils", default-features = false }
|
||||
zeroize = "1.8"
|
||||
tempfile = "3.21.0"
|
||||
utils = { package = "zerokit_utils", version = "0.7.0", path = "../utils", default-features = false }
|
||||
|
||||
# serialization
|
||||
prost = "0.13.5"
|
||||
serde_json = "1.0"
|
||||
prost = "0.14.1"
|
||||
serde_json = "1.0.141"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
||||
document-features = { version = "0.2.11", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
sled = "0.34.7"
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
criterion = { version = "0.7.0", features = ["html_reports"] }
|
||||
|
||||
[features]
|
||||
default = ["pmtree-ft"]
|
||||
fullmerkletree = ["default"]
|
||||
default = ["parallel", "pmtree-ft"]
|
||||
stateless = []
|
||||
arkzkey = []
|
||||
|
||||
# Note: pmtree feature is still experimental
|
||||
pmtree-ft = ["utils/pmtree-ft"]
|
||||
|
||||
[[bench]]
|
||||
name = "circuit_loading_arkzkey_benchmark"
|
||||
harness = false
|
||||
required-features = ["arkzkey"]
|
||||
|
||||
[[bench]]
|
||||
name = "circuit_loading_benchmark"
|
||||
harness = false
|
||||
parallel = [
|
||||
"rayon",
|
||||
"utils/parallel",
|
||||
"ark-ff/parallel",
|
||||
"ark-ec/parallel",
|
||||
"ark-std/parallel",
|
||||
"ark-poly/parallel",
|
||||
"ark-groth16/parallel",
|
||||
"ark-serialize/parallel",
|
||||
]
|
||||
fullmerkletree = [] # Pre-allocated tree, fastest access
|
||||
optimalmerkletree = [] # Sparse storage, memory efficient
|
||||
pmtree-ft = ["utils/pmtree-ft"] # Persistent storage, disk-based
|
||||
|
||||
[[bench]]
|
||||
name = "pmtree_benchmark"
|
||||
harness = false
|
||||
required-features = ["pmtree-ft"]
|
||||
|
||||
[[bench]]
|
||||
name = "poseidon_tree_benchmark"
|
||||
|
||||
@@ -8,11 +8,7 @@ args = ["test", "--release", "--", "--nocapture"]
|
||||
|
||||
[tasks.test_stateless]
|
||||
command = "cargo"
|
||||
args = ["test", "--release", "--features", "stateless"]
|
||||
|
||||
[tasks.test_arkzkey]
|
||||
command = "cargo"
|
||||
args = ["test", "--release", "--features", "arkzkey"]
|
||||
args = ["test", "--release", "--no-default-features", "--features", "stateless"]
|
||||
|
||||
[tasks.bench]
|
||||
command = "cargo"
|
||||
|
||||
118
rln/README.md
118
rln/README.md
@@ -1,8 +1,12 @@
|
||||
# Zerokit RLN Module
|
||||
|
||||
[](https://crates.io/crates/rln)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
The Zerokit RLN Module provides a Rust implementation for working with Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and primitives. This module allows you to:
|
||||
The Zerokit RLN Module provides a Rust implementation for working with
|
||||
Rate-Limiting Nullifier [RLN](https://rfc.vac.dev/spec/32/) zkSNARK proofs and primitives.
|
||||
This module allows you to:
|
||||
|
||||
- Generate and verify RLN proofs
|
||||
- Work with Merkle trees for commitment storage
|
||||
@@ -11,7 +15,8 @@ The Zerokit RLN Module provides a Rust implementation for working with Rate-Limi
|
||||
## Quick Start
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Version 0.6.1 is required for WASM support or x32 architecture. Current version doesn't support these platforms due to dependency issues. WASM support will return in a future release.
|
||||
> Version 0.7.0 is the only version that does not support WASM and x32 architecture.
|
||||
> WASM support is available in version 0.8.0 and above.
|
||||
|
||||
### Add RLN as dependency
|
||||
|
||||
@@ -24,9 +29,15 @@ rln = { git = "https://github.com/vacp2p/zerokit" }
|
||||
|
||||
## Basic Usage Example
|
||||
|
||||
Note that we need to pass to RLN object constructor the path where the graph file (`graph.bin`, built for the input tree size), the corresponding proving key (`rln_final.zkey`) or (`rln_final_uncompr.arkzkey`) and verification key (`verification_key.arkvkey`, optional) are found.
|
||||
The RLN object constructor requires the following files:
|
||||
|
||||
In the following we will use [cursors](https://doc.rust-lang.org/std/io/struct.Cursor.html) as readers/writers for interfacing with RLN public APIs.
|
||||
- `rln_final.arkzkey`: The proving key in arkzkey format.
|
||||
- `graph.bin`: The graph file built for the input tree size
|
||||
|
||||
Additionally, `rln.wasm` is used for testing in the rln-wasm module.
|
||||
|
||||
In the following we will use [cursors](https://doc.rust-lang.org/std/io/struct.Cursor.html)
|
||||
as readers/writers for interfacing with RLN public APIs.
|
||||
|
||||
```rust
|
||||
use std::io::Cursor;
|
||||
@@ -42,11 +53,11 @@ use serde_json::json;
|
||||
|
||||
fn main() {
|
||||
// 1. Initialize RLN with parameters:
|
||||
// - the tree height;
|
||||
// - the tree depth;
|
||||
// - the tree config, if it is not defined, the default value will be set
|
||||
let tree_height = 20;
|
||||
let tree_depth = 20;
|
||||
let input = Cursor::new(json!({}).to_string());
|
||||
let mut rln = RLN::new(tree_height, input).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, input).unwrap();
|
||||
|
||||
// 2. Generate an identity keypair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
@@ -74,7 +85,8 @@ fn main() {
|
||||
let signal = b"RLN is awesome";
|
||||
|
||||
// 6. Prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | external_nullifier<32> | user_message_limit<32> | message_id<32> | signal_len<8> | signal<var> ]
|
||||
// input_data is [ identity_secret<32> | id_index<8> | external_nullifier<32>
|
||||
// | user_message_limit<32> | message_id<32> | signal_len<8> | signal<var> ]
|
||||
let prove_input = prepare_prove_input(
|
||||
identity_secret_hash,
|
||||
id_index,
|
||||
@@ -92,11 +104,13 @@ fn main() {
|
||||
.unwrap();
|
||||
|
||||
// We get the public outputs returned by the circuit evaluation
|
||||
// The byte vector `proof_data` is serialized as `[ zk-proof | tree_root | external_nullifier | share_x | share_y | nullifier ]`.
|
||||
// The byte vector `proof_data` is serialized as
|
||||
// `[ zk-proof | tree_root | external_nullifier | share_x | share_y | nullifier ]`.
|
||||
let proof_data = output_buffer.into_inner();
|
||||
|
||||
// 8. Verify a RLN proof
|
||||
// Input buffer is serialized as `[proof_data | signal_len | signal ]`, where `proof_data` is (computed as) the output obtained by `generate_rln_proof`.
|
||||
// Input buffer is serialized as `[proof_data | signal_len | signal ]`,
|
||||
// where `proof_data` is (computed as) the output obtained by `generate_rln_proof`.
|
||||
let verify_data = prepare_verify_input(proof_data, signal);
|
||||
|
||||
// We verify the zk-proof against the provided proof values
|
||||
@@ -113,16 +127,24 @@ fn main() {
|
||||
The `external nullifier` includes two parameters.
|
||||
|
||||
The first one is `epoch` and it's used to identify messages received in a certain time frame.
|
||||
It usually corresponds to the current UNIX time but can also be set to a random value or generated by a seed, provided that it corresponds to a field element.
|
||||
It usually corresponds to the current UNIX time but can also be set to a random value or generated by a seed,
|
||||
provided that it corresponds to a field element.
|
||||
|
||||
The second one is `rln_identifier` and it's used to prevent a RLN ZK proof generated for one application to be re-used in another one.
|
||||
The second one is `rln_identifier` and it's used to prevent a RLN ZK proof generated
|
||||
for one application to be re-used in another one.
|
||||
|
||||
### Features
|
||||
|
||||
- **Multiple Backend Support**: Choose between different zkey formats with feature flags
|
||||
- `arkzkey`: Use the optimized Arkworks-compatible zkey format (faster loading)
|
||||
- `stateless`: For stateless proof verification
|
||||
- **Pre-compiled Circuits**: Ready-to-use circuits with Merkle tree height of 20
|
||||
- **Stateless Mode**: Allows the use of RLN without maintaining state of the Merkle tree.
|
||||
- **Pre-compiled Circuits**: Ready-to-use circuits with Merkle tree depth of 20
|
||||
- **Wasm Support**: WebAssembly bindings via rln-wasm crate with features like:
|
||||
- Browser and Node.js compatibility
|
||||
- Optional parallel feature support using [wasm-bindgen-rayon](https://github.com/RReverser/wasm-bindgen-rayon)
|
||||
- Headless browser testing capabilities
|
||||
- **Merkle Tree Implementations**: Multiple tree variants optimized for different use cases:
|
||||
- **Full Merkle Tree**: Fastest access with complete pre-allocated tree in memory. Best for frequent random access (enable with `fullmerkletree` feature).
|
||||
- **Optimal Merkle Tree**: Memory-efficient sparse storage using HashMap. Ideal for partially populated trees (enable with `optimalmerkletree` feature).
|
||||
- **Persistent Merkle Tree**: Disk-based storage with [sled](https://github.com/spacejam/sled) for persistence across application restarts and large datasets (enable with `pmtree-ft` feature).
|
||||
|
||||
## Building and Testing
|
||||
|
||||
@@ -143,20 +165,22 @@ cargo make build
|
||||
# Test with default features
|
||||
cargo make test
|
||||
|
||||
# Test with specific features
|
||||
cargo make test_arkzkey # For arkzkey feature
|
||||
cargo make test_stateless # For stateless feature
|
||||
# Test with stateless features
|
||||
cargo make test_stateless
|
||||
```
|
||||
|
||||
## Advanced: Custom Circuit Compilation
|
||||
|
||||
The `rln` (<https://github.com/rate-limiting-nullifier/circom-rln>) repository, which contains the RLN circuit implementation is using for pre-compiled RLN circuit for zerokit RLN.
|
||||
The `rln` (<https://github.com/rate-limiting-nullifier/circom-rln>) repository,
|
||||
which contains the RLN circuit implementation is using for pre-compiled RLN circuit for zerokit RLN.
|
||||
If you want to compile your own RLN circuit, you can follow the instructions below.
|
||||
|
||||
### 1. Compile ZK Circuits for getting the zkey and verification key files
|
||||
### 1. Compile ZK Circuits for getting the zkey file
|
||||
|
||||
This script actually generates not only the zkey and verification key files for the RLN circuit, but also the execution wasm file used for witness calculation.
|
||||
However, the wasm file is not needed for the `rln` module, because current implementation uses the iden3 graph file for witness calculation.
|
||||
This script actually generates not only the zkey file for the RLN circuit,
|
||||
but also the execution wasm file used for witness calculation.
|
||||
However, the wasm file is not needed for the `rln` module,
|
||||
because current implementation uses the iden3 graph file for witness calculation.
|
||||
This graph file is generated by the `circom-witnesscalc` tool in [step 2](#2-generate-witness-calculation-graph).
|
||||
|
||||
To customize the circuit parameters, modify `circom-rln/circuits/rln.circom`:
|
||||
@@ -169,19 +193,27 @@ component main { public [x, externalNullifier] } = RLN(N, M);
|
||||
|
||||
Where:
|
||||
|
||||
- `N`: Merkle tree height, determining the maximum membership capacity (2^N members).
|
||||
- `N`: Merkle tree depth, determining the maximum membership capacity (2^N members).
|
||||
|
||||
- `M`: Bit size for range checks, setting an upper bound for the number of messages per epoch (2^M messages).
|
||||
|
||||
> [!NOTE]
|
||||
> However, if `N` is too big, this might require a larger Powers of Tau ceremony than the one hardcoded in `./scripts/build-circuits.sh`, which is `2^14`. \
|
||||
> In such case, we refer to the official [Circom documentation](https://docs.circom.io/getting-started/proving-circuits/#powers-of-tau) for instructions on how to run an appropriate Powers of Tau ceremony and Phase 2 in order to compile the desired circuit. \
|
||||
> Additionally, while `M` sets an upper bound on the number of messages per epoch (`2^M`), you can configure lower message limit for your use case, as long as it satisfies `user_message_limit ≤ 2^M`. \
|
||||
> Currently, the `rln` module comes with a [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) RLN circuit with a Merkle tree of height `20` and a bit size of `16`, allowing up to `2^20` registered members and a `2^16` message limit per epoch.
|
||||
> However, if `N` is too big, this might require a larger Powers of Tau ceremony
|
||||
> than the one hardcoded in `./scripts/build-circuits.sh`, which is `2^14`.
|
||||
> In such case, we refer to the official
|
||||
> [Circom documentation](https://docs.circom.io/getting-started/proving-circuits/#powers-of-tau)
|
||||
> for instructions on how to run an appropriate Powers of Tau ceremony and Phase 2 in order to compile the desired circuit. \
|
||||
> Additionally, while `M` sets an upper bound on the number of messages per epoch (`2^M`),
|
||||
> you can configure lower message limit for your use case, as long as it satisfies `user_message_limit ≤ 2^M`. \
|
||||
> Currently, the `rln` module comes with a [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources)
|
||||
> RLN circuit with a Merkle tree of depth `20` and a bit size of `16`,
|
||||
> allowing up to `2^20` registered members and a `2^16` message limit per epoch.
|
||||
|
||||
#### Install circom compiler
|
||||
|
||||
You can follow the instructions below or refer to the [installing Circom](https://docs.circom.io/getting-started/installation/#installing-circom) guide for more details, but make sure to use the specific version `v2.1.0`.
|
||||
You can follow the instructions below or refer to the
|
||||
[installing Circom](https://docs.circom.io/getting-started/installation/#installing-circom) guide for more details,
|
||||
but make sure to use the specific version `v2.1.0`.
|
||||
|
||||
```sh
|
||||
# Clone the circom repository
|
||||
@@ -218,7 +250,8 @@ cp zkeyFiles/rln/final.zkey <path_to_rln_final.zkey>
|
||||
|
||||
### 2. Generate Witness Calculation Graph
|
||||
|
||||
The execution graph file used for witness calculation can be compiled following instructions in the [circom-witnesscalc](https://github.com/iden3/circom-witnesscalc) repository.
|
||||
The execution graph file used for witness calculation can be compiled following instructions
|
||||
in the [circom-witnesscalc](https://github.com/iden3/circom-witnesscalc) repository.
|
||||
As mentioned in step 1, we should use `rln.circom` file from `circom-rln` repository.
|
||||
|
||||
```sh
|
||||
@@ -235,11 +268,14 @@ cargo build
|
||||
cargo run --package circom_witnesscalc --bin build-circuit ../circom-rln/circuits/rln.circom <path_to_graph.bin>
|
||||
```
|
||||
|
||||
The `rln` module comes with [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) execution graph files for the RLN circuit.
|
||||
The `rln` module comes with [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources)
|
||||
execution graph files for the RLN circuit.
|
||||
|
||||
### 3. Generate Arkzkey Representation for zkey and verification key files
|
||||
### 3. Generate Arkzkey Representation for zkey file
|
||||
|
||||
For faster loading, compile the zkey file into the arkzkey format using [ark-zkey](https://github.com/seemenkina/ark-zkey). This is fork of the [original](https://github.com/zkmopro/ark-zkey) repository with the uncompressed zkey support.
|
||||
For faster loading, compile the zkey file into the arkzkey format using
|
||||
[ark-zkey](https://github.com/seemenkina/ark-zkey).
|
||||
This is fork of the [original](https://github.com/zkmopro/ark-zkey) repository with the uncompressed arkzkey support.
|
||||
|
||||
```sh
|
||||
# Clone the ark-zkey repository
|
||||
@@ -252,7 +288,21 @@ cd ark-zkey && cargo build
|
||||
cargo run --bin arkzkey-util <path_to_rln_final.zkey>
|
||||
```
|
||||
|
||||
Currently, the `rln` module comes with [pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) arkzkey keys for the RLN circuit.
|
||||
This will generate the `rln_final.arkzkey` file, which is used by the `rln` module.
|
||||
|
||||
Currently, the `rln` module comes with
|
||||
[pre-compiled](https://github.com/vacp2p/zerokit/tree/master/rln/resources) arkzkey keys for the RLN circuit.
|
||||
|
||||
> [!NOTE]
|
||||
> You can use this [convert_zkey.sh](./convert_zkey.sh) script
|
||||
> to automate the process of generating the arkzkey file from any zkey file
|
||||
|
||||
Run the script as follows:
|
||||
|
||||
```sh
|
||||
chmod +x ./convert_zkey.sh
|
||||
./convert_zkey.sh <path_to_rln_final.zkey>
|
||||
```
|
||||
|
||||
## Get involved
|
||||
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use rln::circuit::{read_arkzkey_from_bytes_uncompressed, ARKZKEY_BYTES};
|
||||
|
||||
pub fn uncompressed_bench(c: &mut Criterion) {
|
||||
let arkzkey = ARKZKEY_BYTES.to_vec();
|
||||
let size = arkzkey.len() as f32;
|
||||
println!(
|
||||
"Size of uncompressed arkzkey: {:.2?} MB",
|
||||
size / 1024.0 / 1024.0
|
||||
);
|
||||
|
||||
c.bench_function("arkzkey::arkzkey_from_raw_uncompressed", |b| {
|
||||
b.iter(|| {
|
||||
let r = read_arkzkey_from_bytes_uncompressed(&arkzkey);
|
||||
assert_eq!(r.is_ok(), true);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(10);
|
||||
targets = uncompressed_bench
|
||||
}
|
||||
criterion_main!(benches);
|
||||
@@ -1,24 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use rln::circuit::zkey::read_zkey;
|
||||
use std::io::Cursor;
|
||||
|
||||
pub fn zkey_load_benchmark(c: &mut Criterion) {
|
||||
let zkey = rln::circuit::ZKEY_BYTES.to_vec();
|
||||
let size = zkey.len() as f32;
|
||||
println!("Size of zkey: {:.2?} MB", size / 1024.0 / 1024.0);
|
||||
|
||||
c.bench_function("zkey::zkey_from_raw", |b| {
|
||||
b.iter(|| {
|
||||
let mut reader = Cursor::new(zkey.clone());
|
||||
let r = read_zkey(&mut reader);
|
||||
assert_eq!(r.is_ok(), true);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(10);
|
||||
targets = zkey_load_benchmark
|
||||
}
|
||||
criterion_main!(benches);
|
||||
@@ -5,7 +5,7 @@ use utils::ZerokitMerkleTree;
|
||||
pub fn pmtree_benchmark(c: &mut Criterion) {
|
||||
let mut tree = PmTree::default(2).unwrap();
|
||||
|
||||
let leaves: Vec<Fr> = (0..4).map(|s| Fr::from(s)).collect();
|
||||
let leaves: Vec<Fr> = (0..4).map(Fr::from).collect();
|
||||
|
||||
c.bench_function("Pmtree::set", |b| {
|
||||
b.iter(|| {
|
||||
@@ -26,12 +26,6 @@ pub fn pmtree_benchmark(c: &mut Criterion) {
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree::compute_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.compute_root().unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Pmtree::get", |b| {
|
||||
b.iter(|| {
|
||||
tree.get(0).unwrap();
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use rln::{
|
||||
circuit::{Fr, TEST_TREE_HEIGHT},
|
||||
circuit::{Fr, TEST_TREE_DEPTH},
|
||||
hashers::PoseidonHash,
|
||||
};
|
||||
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleTree};
|
||||
|
||||
pub fn get_leaves(n: u32) -> Vec<Fr> {
|
||||
(0..n).map(|s| Fr::from(s)).collect()
|
||||
(0..n).map(Fr::from).collect()
|
||||
}
|
||||
|
||||
pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
c.bench_function("OptimalMerkleTree::<Poseidon>::full_height_gen", |b| {
|
||||
c.bench_function("OptimalMerkleTree::<Poseidon>::full_depth_gen", |b| {
|
||||
b.iter(|| {
|
||||
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
@@ -20,7 +20,7 @@ pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
for &n in [1u32, 10, 100].iter() {
|
||||
let leaves = get_leaves(n);
|
||||
|
||||
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
let mut tree = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
|
||||
group.bench_function(
|
||||
BenchmarkId::new("OptimalMerkleTree::<Poseidon>::set", n),
|
||||
|b| {
|
||||
@@ -41,9 +41,9 @@ pub fn optimal_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
c.bench_function("FullMerkleTree::<Poseidon>::full_height_gen", |b| {
|
||||
c.bench_function("FullMerkleTree::<Poseidon>::full_depth_gen", |b| {
|
||||
b.iter(|| {
|
||||
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
@@ -51,7 +51,7 @@ pub fn full_merkle_tree_poseidon_benchmark(c: &mut Criterion) {
|
||||
for &n in [1u32, 10, 100].iter() {
|
||||
let leaves = get_leaves(n);
|
||||
|
||||
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
let mut tree = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
|
||||
group.bench_function(
|
||||
BenchmarkId::new("FullMerkleTree::<Poseidon>::set", n),
|
||||
|b| {
|
||||
|
||||
53
rln/convert_zkey.sh
Executable file
53
rln/convert_zkey.sh
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Convert zkey to arkzkey using /tmp directory
|
||||
# Usage: ./convert.sh <path_to_zkey_file>
|
||||
|
||||
set -e
|
||||
|
||||
# Check input
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "Usage: $0 <path_to_zkey_file>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ZKEY_FILE="$1"
|
||||
|
||||
if [ ! -f "$ZKEY_FILE" ]; then
|
||||
echo "Error: File '$ZKEY_FILE' does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get absolute path before changing directories
|
||||
ZKEY_ABSOLUTE_PATH=$(realpath "$ZKEY_FILE")
|
||||
|
||||
# Create temp directory in /tmp
|
||||
TEMP_DIR="/tmp/ark-zkey-$$"
|
||||
echo "Using temp directory: $TEMP_DIR"
|
||||
|
||||
# Cleanup function
|
||||
cleanup() {
|
||||
echo "Cleaning up temp directory: $TEMP_DIR"
|
||||
rm -rf "$TEMP_DIR"
|
||||
}
|
||||
|
||||
# Setup cleanup trap
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create temp directory and clone ark-zkey
|
||||
mkdir -p "$TEMP_DIR"
|
||||
cd "$TEMP_DIR"
|
||||
git clone https://github.com/seemenkina/ark-zkey.git
|
||||
cd ark-zkey
|
||||
cargo build
|
||||
|
||||
# Convert
|
||||
cargo run --bin arkzkey-util "$ZKEY_ABSOLUTE_PATH"
|
||||
|
||||
# Check if arkzkey file was created (tool creates it in same directory as input)
|
||||
ARKZKEY_FILE="${ZKEY_ABSOLUTE_PATH%.zkey}.arkzkey"
|
||||
|
||||
if [ ! -f "$ARKZKEY_FILE" ]; then
|
||||
echo "Could not find generated .arkzkey file at $ARKZKEY_FILE"
|
||||
exit 1
|
||||
fi
|
||||
Binary file not shown.
7
rln/src/circuit/error.rs
Normal file
7
rln/src/circuit/error.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ZKeyReadError {
|
||||
#[error("No proving key found!")]
|
||||
EmptyBytes,
|
||||
#[error("{0}")]
|
||||
SerializationError(#[from] ark_serialize::SerializationError),
|
||||
}
|
||||
@@ -8,19 +8,33 @@ pub mod storage;
|
||||
use ruint::aliases::U256;
|
||||
use std::collections::HashMap;
|
||||
use storage::deserialize_witnesscalc_graph;
|
||||
use zeroize::zeroize_flat_type;
|
||||
|
||||
use crate::circuit::iden3calc::graph::fr_to_u256;
|
||||
use crate::circuit::Fr;
|
||||
use graph::{fr_to_u256, Node};
|
||||
use crate::utils::FrOrSecret;
|
||||
use graph::Node;
|
||||
|
||||
pub type InputSignalsInfo = HashMap<String, (usize, usize)>;
|
||||
|
||||
pub fn calc_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
|
||||
pub fn calc_witness<I: IntoIterator<Item = (String, Vec<FrOrSecret>)>>(
|
||||
inputs: I,
|
||||
graph_data: &[u8],
|
||||
) -> Vec<Fr> {
|
||||
let inputs: HashMap<String, Vec<U256>> = inputs
|
||||
let mut inputs: HashMap<String, Vec<U256>> = inputs
|
||||
.into_iter()
|
||||
.map(|(key, value)| (key, value.iter().map(fr_to_u256).collect()))
|
||||
.map(|(key, value)| {
|
||||
(
|
||||
key,
|
||||
value
|
||||
.iter()
|
||||
.map(|f_| match f_ {
|
||||
FrOrSecret::IdSecret(s) => s.to_u256(),
|
||||
FrOrSecret::Fr(f) => fr_to_u256(f),
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (nodes, signals, input_mapping): (Vec<Node>, Vec<usize>, InputSignalsInfo) =
|
||||
@@ -28,8 +42,15 @@ pub fn calc_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
|
||||
|
||||
let mut inputs_buffer = get_inputs_buffer(get_inputs_size(&nodes));
|
||||
populate_inputs(&inputs, &input_mapping, &mut inputs_buffer);
|
||||
|
||||
graph::evaluate(&nodes, inputs_buffer.as_slice(), &signals)
|
||||
if let Some(v) = inputs.get_mut("identitySecret") {
|
||||
// ~== v[0] = U256::ZERO;
|
||||
unsafe { zeroize_flat_type(v) };
|
||||
}
|
||||
let res = graph::evaluate(&nodes, inputs_buffer.as_slice(), &signals);
|
||||
inputs_buffer.iter_mut().for_each(|i| {
|
||||
unsafe { zeroize_flat_type(i) };
|
||||
});
|
||||
res
|
||||
}
|
||||
|
||||
fn get_inputs_size(nodes: &[Node]) -> usize {
|
||||
@@ -56,7 +77,7 @@ fn populate_inputs(
|
||||
for (key, value) in input_list {
|
||||
let (offset, len) = inputs_info[key];
|
||||
if len != value.len() {
|
||||
panic!("Invalid input length for {}", key);
|
||||
panic!("Invalid input length for {key}");
|
||||
}
|
||||
|
||||
for (i, v) in value.iter().enumerate() {
|
||||
|
||||
@@ -10,7 +10,7 @@ use std::{
|
||||
cmp::Ordering,
|
||||
collections::HashMap,
|
||||
error::Error,
|
||||
ops::{BitAnd, BitOr, BitXor, Deref, Shl, Shr},
|
||||
ops::{Deref, Shl, Shr},
|
||||
};
|
||||
|
||||
use crate::circuit::iden3calc::proto;
|
||||
@@ -944,14 +944,14 @@ mod tests {
|
||||
let x = M.div(uint!(2_U256));
|
||||
|
||||
println!("x: {:?}", x.as_limbs());
|
||||
println!("x: {}", M);
|
||||
println!("x: {M}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_2() {
|
||||
let nodes: Vec<Node> = vec![];
|
||||
// let node = nodes[0];
|
||||
let node = nodes.get(0);
|
||||
println!("{:?}", node);
|
||||
let node = nodes.first();
|
||||
println!("{node:?}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,13 +419,13 @@ mod tests {
|
||||
let mut r = WriteBackReader::new(std::io::Cursor::new(&data));
|
||||
|
||||
let buf = &mut [0u8; 5];
|
||||
r.read(buf).unwrap();
|
||||
r.read_exact(buf).unwrap();
|
||||
assert_eq!(buf, &[1, 2, 3, 4, 5]);
|
||||
|
||||
// return [4, 5] to reader
|
||||
r.write(&buf[3..]).unwrap();
|
||||
r.write_all(&buf[3..]).unwrap();
|
||||
// return [2, 3] to reader
|
||||
r.write(&buf[1..3]).unwrap();
|
||||
r.write_all(&buf[1..3]).unwrap();
|
||||
|
||||
buf.fill(0);
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
// This crate provides interfaces for the zero-knowledge circuit and keys
|
||||
|
||||
pub mod error;
|
||||
pub mod iden3calc;
|
||||
pub mod qap;
|
||||
pub mod zkey;
|
||||
|
||||
use ::lazy_static::lazy_static;
|
||||
use ark_bn254::{
|
||||
@@ -11,42 +11,25 @@ use ark_bn254::{
|
||||
};
|
||||
use ark_groth16::ProvingKey;
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use cfg_if::cfg_if;
|
||||
use color_eyre::{Report, Result};
|
||||
|
||||
use crate::circuit::error::ZKeyReadError;
|
||||
use crate::circuit::iden3calc::calc_witness;
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
use {
|
||||
ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::CanonicalSerialize,
|
||||
color_eyre::eyre::WrapErr,
|
||||
};
|
||||
use {ark_ff::Field, ark_serialize::CanonicalDeserialize, ark_serialize::CanonicalSerialize};
|
||||
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
use {crate::circuit::zkey::read_zkey, std::io::Cursor};
|
||||
use crate::utils::FrOrSecret;
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/rln_final.arkzkey");
|
||||
|
||||
pub const ZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/rln_final.zkey");
|
||||
pub const ARKZKEY_BYTES: &[u8] = include_bytes!("../../resources/tree_depth_20/rln_final.arkzkey");
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
const GRAPH_BYTES: &[u8] = include_bytes!("../../resources/tree_height_20/graph.bin");
|
||||
const GRAPH_BYTES: &[u8] = include_bytes!("../../resources/tree_depth_20/graph.bin");
|
||||
|
||||
lazy_static! {
|
||||
static ref ZKEY: (ProvingKey<Curve>, ConstraintMatrices<Fr>) = {
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "arkzkey")] {
|
||||
read_arkzkey_from_bytes_uncompressed(ARKZKEY_BYTES).expect("Failed to read arkzkey")
|
||||
} else {
|
||||
let mut reader = Cursor::new(ZKEY_BYTES);
|
||||
read_zkey(&mut reader).expect("Failed to read zkey")
|
||||
}
|
||||
}
|
||||
};
|
||||
static ref ARKZKEY: (ProvingKey<Curve>, ConstraintMatrices<Fr>) =
|
||||
read_arkzkey_from_bytes_uncompressed(ARKZKEY_BYTES).expect("Failed to read arkzkey");
|
||||
}
|
||||
|
||||
pub const TEST_TREE_HEIGHT: usize = 20;
|
||||
pub const TEST_TREE_DEPTH: usize = 20;
|
||||
|
||||
// The following types define the pairing friendly elliptic curve, the underlying finite fields and groups default to this module
|
||||
// Note that proofs are serialized assuming Fr to be 4x8 = 32 bytes in size. Hence, changing to a curve with different encoding will make proof verification to fail
|
||||
@@ -60,20 +43,14 @@ pub type G2Affine = ArkG2Affine;
|
||||
pub type G2Projective = ArkG2Projective;
|
||||
|
||||
// Loads the proving key using a bytes vector
|
||||
pub fn zkey_from_raw(zkey_data: &[u8]) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
pub fn zkey_from_raw(
|
||||
zkey_data: &[u8],
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>), ZKeyReadError> {
|
||||
if zkey_data.is_empty() {
|
||||
return Err(Report::msg("No proving key found!"));
|
||||
return Err(ZKeyReadError::EmptyBytes);
|
||||
}
|
||||
|
||||
let proving_key_and_matrices = match () {
|
||||
#[cfg(feature = "arkzkey")]
|
||||
() => read_arkzkey_from_bytes_uncompressed(zkey_data)?,
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
() => {
|
||||
let mut reader = Cursor::new(zkey_data);
|
||||
read_zkey(&mut reader)?
|
||||
}
|
||||
};
|
||||
let proving_key_and_matrices = read_arkzkey_from_bytes_uncompressed(zkey_data)?;
|
||||
|
||||
Ok(proving_key_and_matrices)
|
||||
}
|
||||
@@ -81,10 +58,10 @@ pub fn zkey_from_raw(zkey_data: &[u8]) -> Result<(ProvingKey<Curve>, ConstraintM
|
||||
// Loads the proving key
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn zkey_from_folder() -> &'static (ProvingKey<Curve>, ConstraintMatrices<Fr>) {
|
||||
&ZKEY
|
||||
&ARKZKEY
|
||||
}
|
||||
|
||||
pub fn calculate_rln_witness<I: IntoIterator<Item = (String, Vec<Fr>)>>(
|
||||
pub fn calculate_rln_witness<I: IntoIterator<Item = (String, Vec<FrOrSecret>)>>(
|
||||
inputs: I,
|
||||
graph_data: &[u8],
|
||||
) -> Vec<Fr> {
|
||||
@@ -101,11 +78,9 @@ pub fn graph_from_folder() -> &'static [u8] {
|
||||
// without print and allow to choose between compressed and uncompressed arkzkey
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq)]
|
||||
pub struct SerializableProvingKey(pub ProvingKey<Bn254>);
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq)]
|
||||
pub struct SerializableConstraintMatrices<F: Field> {
|
||||
pub num_instance_variables: usize,
|
||||
@@ -119,29 +94,25 @@ pub struct SerializableConstraintMatrices<F: Field> {
|
||||
pub c: SerializableMatrix<F>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq)]
|
||||
pub struct SerializableMatrix<F: Field> {
|
||||
pub data: Vec<Vec<(F, usize)>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
pub fn read_arkzkey_from_bytes_uncompressed(
|
||||
arkzkey_data: &[u8],
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>)> {
|
||||
) -> Result<(ProvingKey<Curve>, ConstraintMatrices<Fr>), ZKeyReadError> {
|
||||
if arkzkey_data.is_empty() {
|
||||
return Err(Report::msg("No proving key found!"));
|
||||
return Err(ZKeyReadError::EmptyBytes);
|
||||
}
|
||||
|
||||
let mut cursor = std::io::Cursor::new(arkzkey_data);
|
||||
|
||||
let serialized_proving_key =
|
||||
SerializableProvingKey::deserialize_uncompressed_unchecked(&mut cursor)
|
||||
.wrap_err("Failed to deserialize proving key")?;
|
||||
SerializableProvingKey::deserialize_uncompressed_unchecked(&mut cursor)?;
|
||||
|
||||
let serialized_constraint_matrices =
|
||||
SerializableConstraintMatrices::deserialize_uncompressed_unchecked(&mut cursor)
|
||||
.wrap_err("Failed to deserialize constraint matrices")?;
|
||||
SerializableConstraintMatrices::deserialize_uncompressed_unchecked(&mut cursor)?;
|
||||
|
||||
// Get on right form for API
|
||||
let proving_key: ProvingKey<Bn254> = serialized_proving_key.0;
|
||||
|
||||
@@ -7,6 +7,12 @@ use ark_poly::EvaluationDomain;
|
||||
use ark_relations::r1cs::{ConstraintMatrices, ConstraintSystemRef, SynthesisError};
|
||||
use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::iter::{
|
||||
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator,
|
||||
IntoParallelRefMutIterator, ParallelIterator,
|
||||
};
|
||||
|
||||
/// Implements the witness map used by snarkjs. The arkworks witness map calculates the
|
||||
/// coefficients of H through computing (AB-C)/Z in the evaluation domain and going back to the
|
||||
/// coefficients domain. snarkjs instead precomputes the Lagrange form of the powers of tau bases
|
||||
|
||||
@@ -1,371 +0,0 @@
|
||||
// This file is based on the code by arkworks. Its preimage can be found here:
|
||||
// https://github.com/arkworks-rs/circom-compat/blob/3c95ed98e23a408b4d99a53e483a9bba39685a4e/src/zkey.rs
|
||||
|
||||
//! ZKey Parsing
|
||||
//!
|
||||
//! Each ZKey file is broken into sections:
|
||||
//! Header(1)
|
||||
//! Prover Type 1 Groth
|
||||
//! HeaderGroth(2)
|
||||
//! n8q
|
||||
//! q
|
||||
//! n8r
|
||||
//! r
|
||||
//! NVars
|
||||
//! NPub
|
||||
//! DomainSize (multiple of 2
|
||||
//! alpha1
|
||||
//! beta1
|
||||
//! delta1
|
||||
//! beta2
|
||||
//! gamma2
|
||||
//! delta2
|
||||
//! IC(3)
|
||||
//! Coefs(4)
|
||||
//! PointsA(5)
|
||||
//! PointsB1(6)
|
||||
//! PointsB2(7)
|
||||
//! PointsC(8)
|
||||
//! PointsH(9)
|
||||
//! Contributions(10)
|
||||
use ark_ff::{BigInteger256, PrimeField};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use ark_serialize::{CanonicalDeserialize, SerializationError};
|
||||
use ark_std::log2;
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
use ark_bn254::{Bn254, Fq, Fq2, Fr, G1Affine, G2Affine};
|
||||
use ark_groth16::{ProvingKey, VerifyingKey};
|
||||
use num_traits::Zero;
|
||||
|
||||
type IoResult<T> = Result<T, SerializationError>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct Section {
|
||||
position: u64,
|
||||
#[allow(dead_code)]
|
||||
size: usize,
|
||||
}
|
||||
|
||||
/// Reads a SnarkJS ZKey file into an Arkworks ProvingKey.
|
||||
pub fn read_zkey<R: Read + Seek>(
|
||||
reader: &mut R,
|
||||
) -> IoResult<(ProvingKey<Bn254>, ConstraintMatrices<Fr>)> {
|
||||
let mut binfile = BinFile::new(reader)?;
|
||||
let proving_key = binfile.proving_key()?;
|
||||
let matrices = binfile.matrices()?;
|
||||
Ok((proving_key, matrices))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BinFile<'a, R> {
|
||||
#[allow(dead_code)]
|
||||
ftype: String,
|
||||
#[allow(dead_code)]
|
||||
version: u32,
|
||||
sections: HashMap<u32, Vec<Section>>,
|
||||
reader: &'a mut R,
|
||||
}
|
||||
|
||||
impl<'a, R: Read + Seek> BinFile<'a, R> {
|
||||
fn new(reader: &'a mut R) -> IoResult<Self> {
|
||||
let mut magic = [0u8; 4];
|
||||
reader.read_exact(&mut magic)?;
|
||||
|
||||
let version = reader.read_u32::<LittleEndian>()?;
|
||||
|
||||
let num_sections = reader.read_u32::<LittleEndian>()?;
|
||||
|
||||
let mut sections = HashMap::new();
|
||||
for _ in 0..num_sections {
|
||||
let section_id = reader.read_u32::<LittleEndian>()?;
|
||||
let section_length = reader.read_u64::<LittleEndian>()?;
|
||||
|
||||
let section = sections.entry(section_id).or_insert_with(Vec::new);
|
||||
section.push(Section {
|
||||
position: reader.stream_position()?,
|
||||
size: section_length as usize,
|
||||
});
|
||||
|
||||
reader.seek(SeekFrom::Current(section_length as i64))?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
ftype: std::str::from_utf8(&magic[..]).unwrap().to_string(),
|
||||
version,
|
||||
sections,
|
||||
reader,
|
||||
})
|
||||
}
|
||||
|
||||
fn proving_key(&mut self) -> IoResult<ProvingKey<Bn254>> {
|
||||
let header = self.groth_header()?;
|
||||
let ic = self.ic(header.n_public)?;
|
||||
|
||||
let a_query = self.a_query(header.n_vars)?;
|
||||
let b_g1_query = self.b_g1_query(header.n_vars)?;
|
||||
let b_g2_query = self.b_g2_query(header.n_vars)?;
|
||||
let l_query = self.l_query(header.n_vars - header.n_public - 1)?;
|
||||
let h_query = self.h_query(header.domain_size as usize)?;
|
||||
|
||||
let vk = VerifyingKey::<Bn254> {
|
||||
alpha_g1: header.verifying_key.alpha_g1,
|
||||
beta_g2: header.verifying_key.beta_g2,
|
||||
gamma_g2: header.verifying_key.gamma_g2,
|
||||
delta_g2: header.verifying_key.delta_g2,
|
||||
gamma_abc_g1: ic,
|
||||
};
|
||||
|
||||
let pk = ProvingKey::<Bn254> {
|
||||
vk,
|
||||
beta_g1: header.verifying_key.beta_g1,
|
||||
delta_g1: header.verifying_key.delta_g1,
|
||||
a_query,
|
||||
b_g1_query,
|
||||
b_g2_query,
|
||||
h_query,
|
||||
l_query,
|
||||
};
|
||||
|
||||
Ok(pk)
|
||||
}
|
||||
|
||||
fn get_section(&self, id: u32) -> Section {
|
||||
self.sections.get(&id).unwrap()[0].clone()
|
||||
}
|
||||
|
||||
fn groth_header(&mut self) -> IoResult<HeaderGroth> {
|
||||
let section = self.get_section(2);
|
||||
let header = HeaderGroth::new(&mut self.reader, §ion)?;
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
fn ic(&mut self, n_public: usize) -> IoResult<Vec<G1Affine>> {
|
||||
// the range is non-inclusive so we do +1 to get all inputs
|
||||
self.g1_section(n_public + 1, 3)
|
||||
}
|
||||
|
||||
/// Returns the [`ConstraintMatrices`] corresponding to the zkey
|
||||
pub fn matrices(&mut self) -> IoResult<ConstraintMatrices<Fr>> {
|
||||
let header = self.groth_header()?;
|
||||
|
||||
let section = self.get_section(4);
|
||||
self.reader.seek(SeekFrom::Start(section.position))?;
|
||||
let num_coeffs: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
|
||||
// insantiate AB
|
||||
let mut matrices = vec![vec![vec![]; header.domain_size as usize]; 2];
|
||||
let mut max_constraint_index = 0;
|
||||
for _ in 0..num_coeffs {
|
||||
let matrix: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
let constraint: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
let signal: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
|
||||
let value: Fr = deserialize_field_fr(&mut self.reader)?;
|
||||
max_constraint_index = std::cmp::max(max_constraint_index, constraint);
|
||||
matrices[matrix as usize][constraint as usize].push((value, signal as usize));
|
||||
}
|
||||
|
||||
let num_constraints = max_constraint_index as usize - header.n_public;
|
||||
// Remove the public input constraints, Arkworks adds them later
|
||||
matrices.iter_mut().for_each(|m| {
|
||||
m.truncate(num_constraints);
|
||||
});
|
||||
// This is taken from Arkworks' to_matrices() function
|
||||
let a = matrices[0].clone();
|
||||
let b = matrices[1].clone();
|
||||
let a_num_non_zero: usize = a.iter().map(|lc| lc.len()).sum();
|
||||
let b_num_non_zero: usize = b.iter().map(|lc| lc.len()).sum();
|
||||
let matrices = ConstraintMatrices {
|
||||
num_instance_variables: header.n_public + 1,
|
||||
num_witness_variables: header.n_vars - header.n_public,
|
||||
num_constraints,
|
||||
|
||||
a_num_non_zero,
|
||||
b_num_non_zero,
|
||||
c_num_non_zero: 0,
|
||||
|
||||
a,
|
||||
b,
|
||||
c: vec![],
|
||||
};
|
||||
|
||||
Ok(matrices)
|
||||
}
|
||||
|
||||
fn a_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
|
||||
self.g1_section(n_vars, 5)
|
||||
}
|
||||
|
||||
fn b_g1_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
|
||||
self.g1_section(n_vars, 6)
|
||||
}
|
||||
|
||||
fn b_g2_query(&mut self, n_vars: usize) -> IoResult<Vec<G2Affine>> {
|
||||
self.g2_section(n_vars, 7)
|
||||
}
|
||||
|
||||
fn l_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
|
||||
self.g1_section(n_vars, 8)
|
||||
}
|
||||
|
||||
fn h_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
|
||||
self.g1_section(n_vars, 9)
|
||||
}
|
||||
|
||||
fn g1_section(&mut self, num: usize, section_id: usize) -> IoResult<Vec<G1Affine>> {
|
||||
let section = self.get_section(section_id as u32);
|
||||
self.reader.seek(SeekFrom::Start(section.position))?;
|
||||
deserialize_g1_vec(self.reader, num as u32)
|
||||
}
|
||||
|
||||
fn g2_section(&mut self, num: usize, section_id: usize) -> IoResult<Vec<G2Affine>> {
|
||||
let section = self.get_section(section_id as u32);
|
||||
self.reader.seek(SeekFrom::Start(section.position))?;
|
||||
deserialize_g2_vec(self.reader, num as u32)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug, CanonicalDeserialize)]
|
||||
pub struct ZVerifyingKey {
|
||||
alpha_g1: G1Affine,
|
||||
beta_g1: G1Affine,
|
||||
beta_g2: G2Affine,
|
||||
gamma_g2: G2Affine,
|
||||
delta_g1: G1Affine,
|
||||
delta_g2: G2Affine,
|
||||
}
|
||||
|
||||
impl ZVerifyingKey {
|
||||
fn new<R: Read>(reader: &mut R) -> IoResult<Self> {
|
||||
let alpha_g1 = deserialize_g1(reader)?;
|
||||
let beta_g1 = deserialize_g1(reader)?;
|
||||
let beta_g2 = deserialize_g2(reader)?;
|
||||
let gamma_g2 = deserialize_g2(reader)?;
|
||||
let delta_g1 = deserialize_g1(reader)?;
|
||||
let delta_g2 = deserialize_g2(reader)?;
|
||||
|
||||
Ok(Self {
|
||||
alpha_g1,
|
||||
beta_g1,
|
||||
beta_g2,
|
||||
gamma_g2,
|
||||
delta_g1,
|
||||
delta_g2,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct HeaderGroth {
|
||||
#[allow(dead_code)]
|
||||
n8q: u32,
|
||||
#[allow(dead_code)]
|
||||
q: BigInteger256,
|
||||
#[allow(dead_code)]
|
||||
n8r: u32,
|
||||
#[allow(dead_code)]
|
||||
r: BigInteger256,
|
||||
|
||||
n_vars: usize,
|
||||
n_public: usize,
|
||||
|
||||
domain_size: u32,
|
||||
#[allow(dead_code)]
|
||||
power: u32,
|
||||
|
||||
verifying_key: ZVerifyingKey,
|
||||
}
|
||||
|
||||
impl HeaderGroth {
|
||||
fn new<R: Read + Seek>(reader: &mut R, section: &Section) -> IoResult<Self> {
|
||||
reader.seek(SeekFrom::Start(section.position))?;
|
||||
Self::read(reader)
|
||||
}
|
||||
|
||||
fn read<R: Read>(mut reader: &mut R) -> IoResult<Self> {
|
||||
// TODO: Impl From<u32> in Arkworks
|
||||
let n8q: u32 = u32::deserialize_uncompressed(&mut reader)?;
|
||||
// group order r of Bn254
|
||||
let q = BigInteger256::deserialize_uncompressed(&mut reader)?;
|
||||
|
||||
let n8r: u32 = u32::deserialize_uncompressed(&mut reader)?;
|
||||
// Prime field modulus
|
||||
let r = BigInteger256::deserialize_uncompressed(&mut reader)?;
|
||||
|
||||
let n_vars = u32::deserialize_uncompressed(&mut reader)? as usize;
|
||||
let n_public = u32::deserialize_uncompressed(&mut reader)? as usize;
|
||||
|
||||
let domain_size: u32 = u32::deserialize_uncompressed(&mut reader)?;
|
||||
let power = log2(domain_size as usize);
|
||||
|
||||
let verifying_key = ZVerifyingKey::new(&mut reader)?;
|
||||
|
||||
Ok(Self {
|
||||
n8q,
|
||||
q,
|
||||
n8r,
|
||||
r,
|
||||
n_vars,
|
||||
n_public,
|
||||
domain_size,
|
||||
power,
|
||||
verifying_key,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// need to divide by R, since snarkjs outputs the zkey with coefficients
|
||||
// multiplieid by R^2
|
||||
fn deserialize_field_fr<R: Read>(reader: &mut R) -> IoResult<Fr> {
|
||||
let bigint = BigInteger256::deserialize_uncompressed(reader)?;
|
||||
Ok(Fr::new_unchecked(Fr::new_unchecked(bigint).into_bigint()))
|
||||
}
|
||||
|
||||
// skips the multiplication by R because Circom points are already in Montgomery form
|
||||
fn deserialize_field<R: Read>(reader: &mut R) -> IoResult<Fq> {
|
||||
let bigint = BigInteger256::deserialize_uncompressed(reader)?;
|
||||
// if you use Fq::new it multiplies by R
|
||||
Ok(Fq::new_unchecked(bigint))
|
||||
}
|
||||
|
||||
pub fn deserialize_field2<R: Read>(reader: &mut R) -> IoResult<Fq2> {
|
||||
let c0 = deserialize_field(reader)?;
|
||||
let c1 = deserialize_field(reader)?;
|
||||
Ok(Fq2::new(c0, c1))
|
||||
}
|
||||
|
||||
fn deserialize_g1<R: Read>(reader: &mut R) -> IoResult<G1Affine> {
|
||||
let x = deserialize_field(reader)?;
|
||||
let y = deserialize_field(reader)?;
|
||||
let infinity = x.is_zero() && y.is_zero();
|
||||
if infinity {
|
||||
Ok(G1Affine::identity())
|
||||
} else {
|
||||
Ok(G1Affine::new(x, y))
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_g2<R: Read>(reader: &mut R) -> IoResult<G2Affine> {
|
||||
let f1 = deserialize_field2(reader)?;
|
||||
let f2 = deserialize_field2(reader)?;
|
||||
let infinity = f1.is_zero() && f2.is_zero();
|
||||
if infinity {
|
||||
Ok(G2Affine::identity())
|
||||
} else {
|
||||
Ok(G2Affine::new(f1, f2))
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_g1_vec<R: Read>(reader: &mut R, n_vars: u32) -> IoResult<Vec<G1Affine>> {
|
||||
(0..n_vars).map(|_| deserialize_g1(reader)).collect()
|
||||
}
|
||||
|
||||
fn deserialize_g2_vec<R: Read>(reader: &mut R, n_vars: u32) -> IoResult<Vec<G2Affine>> {
|
||||
(0..n_vars).map(|_| deserialize_g2(reader)).collect()
|
||||
}
|
||||
79
rln/src/error.rs
Normal file
79
rln/src/error.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
use crate::circuit::error::ZKeyReadError;
|
||||
use ark_bn254::Fr;
|
||||
use ark_relations::r1cs::SynthesisError;
|
||||
use ark_serialize::SerializationError;
|
||||
use num_bigint::{BigInt, ParseBigIntError};
|
||||
use std::array::TryFromSliceError;
|
||||
use std::num::TryFromIntError;
|
||||
use std::string::FromUtf8Error;
|
||||
use thiserror::Error;
|
||||
use utils::error::{FromConfigError, ZerokitMerkleTreeError};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConversionError {
|
||||
#[error("Expected radix 10 or 16")]
|
||||
WrongRadix,
|
||||
#[error("{0}")]
|
||||
ParseBigInt(#[from] ParseBigIntError),
|
||||
#[error("{0}")]
|
||||
ToUsize(#[from] TryFromIntError),
|
||||
#[error("{0}")]
|
||||
FromSlice(#[from] TryFromSliceError),
|
||||
#[error("Input data too short: expected at least {expected} bytes, got {actual} bytes")]
|
||||
InsufficientData { expected: usize, actual: usize },
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProofError {
|
||||
#[error("{0}")]
|
||||
ProtocolError(#[from] ProtocolError),
|
||||
#[error("Error producing proof: {0}")]
|
||||
SynthesisError(#[from] SynthesisError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ProtocolError {
|
||||
#[error("{0}")]
|
||||
Conversion(#[from] ConversionError),
|
||||
#[error("Expected to read {0} bytes but read only {1} bytes")]
|
||||
InvalidReadLen(usize, usize),
|
||||
#[error("Cannot convert bigint {0:?} to biguint")]
|
||||
BigUintConversion(BigInt),
|
||||
#[error("{0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("Message id ({0}) is not within user_message_limit ({1})")]
|
||||
InvalidMessageId(Fr, Fr),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ComputeIdSecretError {
|
||||
/// Usually it means that the same signal is used to recover the user secret hash
|
||||
#[error("Cannot recover secret: division by zero")]
|
||||
DivisionByZero,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum RLNError {
|
||||
#[error("I/O error: {0}")]
|
||||
IO(#[from] std::io::Error),
|
||||
#[error("Utf8 error: {0}")]
|
||||
Utf8(#[from] FromUtf8Error),
|
||||
#[error("Serde json error: {0}")]
|
||||
JSON(#[from] serde_json::Error),
|
||||
#[error("Config error: {0}")]
|
||||
Config(#[from] FromConfigError),
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(#[from] SerializationError),
|
||||
#[error("Merkle tree error: {0}")]
|
||||
MerkleTree(#[from] ZerokitMerkleTreeError),
|
||||
#[error("ZKey error: {0}")]
|
||||
ZKey(#[from] ZKeyReadError),
|
||||
#[error("Conversion error: {0}")]
|
||||
Conversion(#[from] ConversionError),
|
||||
#[error("Protocol error: {0}")]
|
||||
Protocol(#[from] ProtocolError),
|
||||
#[error("Proof error: {0}")]
|
||||
Proof(#[from] ProofError),
|
||||
#[error("Unable to extract secret")]
|
||||
RecoverSecret(#[from] ComputeIdSecretError),
|
||||
}
|
||||
184
rln/src/ffi.rs
184
rln/src/ffi.rs
@@ -2,7 +2,12 @@
|
||||
|
||||
use std::slice;
|
||||
|
||||
use crate::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
|
||||
use crate::public::{
|
||||
extended_key_gen as public_extended_key_gen, hash as public_hash, key_gen as public_key_gen,
|
||||
poseidon_hash as public_poseidon_hash,
|
||||
seeded_extended_key_gen as public_seeded_extended_key_gen,
|
||||
seeded_key_gen as public_seeded_key_gen, RLN,
|
||||
};
|
||||
|
||||
// Macro to call methods with arbitrary amount of arguments,
|
||||
// First argument to the macro is context,
|
||||
@@ -80,23 +85,48 @@ macro_rules! call_with_output_arg {
|
||||
// Second argument is the output buffer argument
|
||||
// The remaining arguments are all other inputs to the method
|
||||
macro_rules! no_ctx_call_with_output_arg {
|
||||
($method:ident, $output_arg:expr, $( $arg:expr ),* ) => {
|
||||
{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
match $method($($arg.process()),*, &mut output_data) {
|
||||
Ok(()) => {
|
||||
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
|
||||
std::mem::forget(output_data);
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
std::mem::forget(output_data);
|
||||
eprintln!("execution error: {err}");
|
||||
false
|
||||
}
|
||||
($method:ident, $output_arg:expr, $input_arg:expr, $endianness_arg:expr) => {{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
match $method(
|
||||
$input_arg.process(),
|
||||
&mut output_data,
|
||||
$endianness_arg.process(),
|
||||
) {
|
||||
Ok(()) => {
|
||||
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
|
||||
std::mem::forget(output_data);
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
std::mem::forget(output_data);
|
||||
eprintln!("execution error: {err}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
// Macro to call methods with arbitrary amount of arguments,
|
||||
// which are not implemented in a ctx RLN object
|
||||
// First argument is the method to call
|
||||
// Second argument is the output buffer argument
|
||||
// The remaining arguments are all other inputs to the method
|
||||
macro_rules! no_ctx_call_with_output_arg_and_endianness {
|
||||
($method:ident, $output_arg:expr, $endianness_arg:expr) => {{
|
||||
let mut output_data: Vec<u8> = Vec::new();
|
||||
match $method(&mut output_data, $endianness_arg.process()) {
|
||||
Ok(()) => {
|
||||
unsafe { *$output_arg = Buffer::from(&output_data[..]) };
|
||||
std::mem::forget(output_data);
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
std::mem::forget(output_data);
|
||||
eprintln!("execution error: {err}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
// Macro to call methods with arbitrary amount of arguments,
|
||||
@@ -158,6 +188,13 @@ impl ProcessArg for *mut RLN {
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessArg for bool {
|
||||
type ReturnType = bool;
|
||||
fn process(self) -> Self::ReturnType {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
///// Buffer struct is taken from
|
||||
///// <https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs>
|
||||
/////
|
||||
@@ -195,8 +232,8 @@ impl<'a> From<&Buffer> for &'a [u8] {
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn new(tree_height: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
|
||||
match RLN::new(tree_height, input_buffer.process()) {
|
||||
pub extern "C" fn new(tree_depth: usize, input_buffer: *const Buffer, ctx: *mut *mut RLN) -> bool {
|
||||
match RLN::new(tree_depth, input_buffer.process()) {
|
||||
Ok(rln) => {
|
||||
unsafe { *ctx = Box::into_raw(Box::new(rln)) };
|
||||
true
|
||||
@@ -228,14 +265,14 @@ pub extern "C" fn new(ctx: *mut *mut RLN) -> bool {
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn new_with_params(
|
||||
tree_height: usize,
|
||||
tree_depth: usize,
|
||||
zkey_buffer: *const Buffer,
|
||||
graph_data: *const Buffer,
|
||||
tree_config: *const Buffer,
|
||||
ctx: *mut *mut RLN,
|
||||
) -> bool {
|
||||
match RLN::new_with_params(
|
||||
tree_height,
|
||||
tree_depth,
|
||||
zkey_buffer.process().to_vec(),
|
||||
graph_data.process().to_vec(),
|
||||
tree_config.process(),
|
||||
@@ -280,8 +317,8 @@ pub extern "C" fn new_with_params(
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub extern "C" fn set_tree(ctx: *mut RLN, tree_height: usize) -> bool {
|
||||
call!(ctx, set_tree, tree_height)
|
||||
pub extern "C" fn set_tree(ctx: *mut RLN, tree_depth: usize) -> bool {
|
||||
call!(ctx, set_tree, tree_depth)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
@@ -460,38 +497,6 @@ pub extern "C" fn verify_with_roots(
|
||||
////////////////////////////////////////////////////////
|
||||
// Utils
|
||||
////////////////////////////////////////////////////////
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn key_gen(ctx: *const RLN, output_buffer: *mut Buffer) -> bool {
|
||||
call_with_output_arg!(ctx, key_gen, output_buffer)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn seeded_key_gen(
|
||||
ctx: *const RLN,
|
||||
input_buffer: *const Buffer,
|
||||
output_buffer: *mut Buffer,
|
||||
) -> bool {
|
||||
call_with_output_arg!(ctx, seeded_key_gen, output_buffer, input_buffer)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn extended_key_gen(ctx: *const RLN, output_buffer: *mut Buffer) -> bool {
|
||||
call_with_output_arg!(ctx, extended_key_gen, output_buffer)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn seeded_extended_key_gen(
|
||||
ctx: *const RLN,
|
||||
input_buffer: *const Buffer,
|
||||
output_buffer: *mut Buffer,
|
||||
) -> bool {
|
||||
call_with_output_arg!(ctx, seeded_extended_key_gen, output_buffer, input_buffer)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn recover_id_secret(
|
||||
@@ -534,14 +539,77 @@ pub extern "C" fn flush(ctx: *mut RLN) -> bool {
|
||||
call!(ctx, flush)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Utils APIs
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
|
||||
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer)
|
||||
pub extern "C" fn hash(
|
||||
input_buffer: *const Buffer,
|
||||
output_buffer: *mut Buffer,
|
||||
is_little_endian: bool,
|
||||
) -> bool {
|
||||
no_ctx_call_with_output_arg!(public_hash, output_buffer, input_buffer, is_little_endian)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn poseidon_hash(input_buffer: *const Buffer, output_buffer: *mut Buffer) -> bool {
|
||||
no_ctx_call_with_output_arg!(public_poseidon_hash, output_buffer, input_buffer)
|
||||
pub extern "C" fn poseidon_hash(
|
||||
input_buffer: *const Buffer,
|
||||
output_buffer: *mut Buffer,
|
||||
is_little_endian: bool,
|
||||
) -> bool {
|
||||
no_ctx_call_with_output_arg!(
|
||||
public_poseidon_hash,
|
||||
output_buffer,
|
||||
input_buffer,
|
||||
is_little_endian
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn key_gen(output_buffer: *mut Buffer, is_little_endian: bool) -> bool {
|
||||
no_ctx_call_with_output_arg_and_endianness!(public_key_gen, output_buffer, is_little_endian)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn seeded_key_gen(
|
||||
input_buffer: *const Buffer,
|
||||
output_buffer: *mut Buffer,
|
||||
is_little_endian: bool,
|
||||
) -> bool {
|
||||
no_ctx_call_with_output_arg!(
|
||||
public_seeded_key_gen,
|
||||
output_buffer,
|
||||
input_buffer,
|
||||
is_little_endian
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn extended_key_gen(output_buffer: *mut Buffer, is_little_endian: bool) -> bool {
|
||||
no_ctx_call_with_output_arg_and_endianness!(
|
||||
public_extended_key_gen,
|
||||
output_buffer,
|
||||
is_little_endian
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::not_unsafe_ptr_arg_deref)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn seeded_extended_key_gen(
|
||||
input_buffer: *const Buffer,
|
||||
output_buffer: *mut Buffer,
|
||||
is_little_endian: bool,
|
||||
) -> bool {
|
||||
no_ctx_call_with_output_arg!(
|
||||
public_seeded_extended_key_gen,
|
||||
output_buffer,
|
||||
input_buffer,
|
||||
is_little_endian
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
/// This crate instantiates the Poseidon hash algorithm.
|
||||
use crate::{circuit::Fr, utils::bytes_le_to_fr};
|
||||
use crate::{
|
||||
circuit::Fr,
|
||||
utils::{bytes_be_to_fr, bytes_le_to_fr},
|
||||
};
|
||||
use once_cell::sync::Lazy;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
use utils::poseidon::Poseidon;
|
||||
@@ -45,7 +48,7 @@ impl utils::merkle_tree::Hasher for PoseidonHash {
|
||||
}
|
||||
|
||||
/// Hashes arbitrary signal to the underlying prime field.
|
||||
pub fn hash_to_field(signal: &[u8]) -> Fr {
|
||||
pub fn hash_to_field_le(signal: &[u8]) -> Fr {
|
||||
// We hash the input signal using Keccak256
|
||||
let mut hash = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
@@ -56,3 +59,19 @@ pub fn hash_to_field(signal: &[u8]) -> Fr {
|
||||
let (el, _) = bytes_le_to_fr(hash.as_ref());
|
||||
el
|
||||
}
|
||||
|
||||
/// Hashes arbitrary signal to the underlying prime field.
|
||||
pub fn hash_to_field_be(signal: &[u8]) -> Fr {
|
||||
// We hash the input signal using Keccak256
|
||||
let mut hash = [0; 32];
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(signal);
|
||||
hasher.finalize(&mut hash);
|
||||
|
||||
// Reverse the bytes to get big endian representation
|
||||
hash.reverse();
|
||||
|
||||
// We export the hash as a field element
|
||||
let (el, _) = bytes_be_to_fr(hash.as_ref());
|
||||
el
|
||||
}
|
||||
|
||||
@@ -1,12 +1,35 @@
|
||||
pub mod circuit;
|
||||
pub mod error;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub mod ffi;
|
||||
pub mod hashers;
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
pub mod pm_tree_adapter;
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub mod poseidon_tree;
|
||||
pub mod protocol;
|
||||
pub mod public;
|
||||
#[cfg(test)]
|
||||
pub mod public_api_tests;
|
||||
pub mod utils;
|
||||
|
||||
// Ensure that only one Merkle tree feature is enabled at a time
|
||||
#[cfg(any(
|
||||
all(feature = "fullmerkletree", feature = "optimalmerkletree"),
|
||||
all(feature = "fullmerkletree", feature = "pmtree-ft"),
|
||||
all(feature = "optimalmerkletree", feature = "pmtree-ft"),
|
||||
))]
|
||||
compile_error!(
|
||||
"Only one of `fullmerkletree`, `optimalmerkletree`, or `pmtree-ft` can be enabled at a time."
|
||||
);
|
||||
|
||||
// Ensure that the `stateless` feature is not enabled with any Merkle tree features
|
||||
#[cfg(all(
|
||||
feature = "stateless",
|
||||
any(
|
||||
feature = "fullmerkletree",
|
||||
feature = "optimalmerkletree",
|
||||
feature = "pmtree-ft"
|
||||
)
|
||||
))]
|
||||
compile_error!("Cannot enable any Merkle tree features with stateless");
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use serde_json::Value;
|
||||
use std::fmt::Debug;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
|
||||
use color_eyre::{Report, Result};
|
||||
use serde_json::Value;
|
||||
|
||||
use utils::pmtree::tree::Key;
|
||||
use utils::pmtree::{Database, Hasher};
|
||||
use utils::*;
|
||||
use tempfile::Builder;
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::hashers::{poseidon_hash, PoseidonHash};
|
||||
use crate::utils::{bytes_le_to_fr, fr_to_bytes_le};
|
||||
use utils::error::{FromConfigError, ZerokitMerkleTreeError};
|
||||
use utils::pmtree::tree::Key;
|
||||
use utils::pmtree::{Database, Hasher, PmtreeErrorKind};
|
||||
use utils::{pmtree, Config, Mode, SledDB, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
const METADATA_KEY: [u8; 8] = *b"metadata";
|
||||
|
||||
@@ -52,20 +51,104 @@ impl Hasher for PoseidonHash {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tmp_path() -> PathBuf {
|
||||
std::env::temp_dir().join(format!("pmtree-{}", rand::random::<u64>()))
|
||||
fn default_tmp_path() -> PathBuf {
|
||||
Builder::new()
|
||||
.prefix("pmtree-")
|
||||
.tempfile()
|
||||
.expect("Failed to create temp file")
|
||||
.into_temp_path()
|
||||
.to_path_buf()
|
||||
}
|
||||
|
||||
fn get_tmp() -> bool {
|
||||
true
|
||||
const DEFAULT_TEMPORARY: bool = true;
|
||||
const DEFAULT_CACHE_CAPACITY: u64 = 1073741824; // 1 Gigabyte
|
||||
const DEFAULT_FLUSH_EVERY_MS: u64 = 500; // 500 Milliseconds
|
||||
const DEFAULT_MODE: Mode = Mode::HighThroughput;
|
||||
const DEFAULT_USE_COMPRESSION: bool = false;
|
||||
|
||||
pub struct PmtreeConfigBuilder {
|
||||
path: Option<PathBuf>,
|
||||
temporary: bool,
|
||||
cache_capacity: u64,
|
||||
flush_every_ms: u64,
|
||||
mode: Mode,
|
||||
use_compression: bool,
|
||||
}
|
||||
|
||||
impl PmtreeConfigBuilder {
|
||||
fn new() -> Self {
|
||||
PmtreeConfigBuilder {
|
||||
path: None,
|
||||
temporary: DEFAULT_TEMPORARY,
|
||||
cache_capacity: DEFAULT_CACHE_CAPACITY,
|
||||
flush_every_ms: DEFAULT_FLUSH_EVERY_MS,
|
||||
mode: DEFAULT_MODE,
|
||||
use_compression: DEFAULT_USE_COMPRESSION,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path<P: Into<PathBuf>>(mut self, path: P) -> Self {
|
||||
self.path = Some(path.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn temporary(mut self, temporary: bool) -> Self {
|
||||
self.temporary = temporary;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn cache_capacity(mut self, capacity: u64) -> Self {
|
||||
self.cache_capacity = capacity;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn flush_every_ms(mut self, ms: u64) -> Self {
|
||||
self.flush_every_ms = ms;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn mode(mut self, mode: Mode) -> Self {
|
||||
self.mode = mode;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn use_compression(mut self, compression: bool) -> Self {
|
||||
self.use_compression = compression;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<PmtreeConfig, FromConfigError> {
|
||||
let path = match (self.temporary, self.path) {
|
||||
(true, None) => default_tmp_path(),
|
||||
(false, None) => return Err(FromConfigError::MissingPath),
|
||||
(true, Some(path)) if path.exists() => return Err(FromConfigError::PathExists),
|
||||
(_, Some(path)) => path,
|
||||
};
|
||||
|
||||
let config = Config::new()
|
||||
.temporary(self.temporary)
|
||||
.path(path)
|
||||
.cache_capacity(self.cache_capacity)
|
||||
.flush_every_ms(Some(self.flush_every_ms))
|
||||
.mode(self.mode)
|
||||
.use_compression(self.use_compression);
|
||||
|
||||
Ok(PmtreeConfig(config))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PmtreeConfig(Config);
|
||||
|
||||
impl FromStr for PmtreeConfig {
|
||||
type Err = Report;
|
||||
impl PmtreeConfig {
|
||||
pub fn builder() -> PmtreeConfigBuilder {
|
||||
PmtreeConfigBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn from_str(s: &str) -> Result<Self> {
|
||||
impl FromStr for PmtreeConfig {
|
||||
type Err = FromConfigError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let config: Value = serde_json::from_str(s)?;
|
||||
|
||||
let path = config["path"].as_str();
|
||||
@@ -80,21 +163,16 @@ impl FromStr for PmtreeConfig {
|
||||
};
|
||||
let use_compression = config["use_compression"].as_bool();
|
||||
|
||||
if temporary.is_some()
|
||||
&& path.is_some()
|
||||
&& temporary.unwrap()
|
||||
&& path.as_ref().unwrap().exists()
|
||||
{
|
||||
return Err(Report::msg(format!(
|
||||
"Path {:?} already exists, cannot use temporary",
|
||||
path.unwrap()
|
||||
)));
|
||||
if let (Some(true), Some(path)) = (temporary, path.as_ref()) {
|
||||
if path.exists() {
|
||||
return Err(FromConfigError::PathExists);
|
||||
}
|
||||
}
|
||||
|
||||
let config = Config::new()
|
||||
.temporary(temporary.unwrap_or(get_tmp()))
|
||||
.path(path.unwrap_or(get_tmp_path()))
|
||||
.cache_capacity(cache_capacity.unwrap_or(1024 * 1024 * 1024))
|
||||
.temporary(temporary.unwrap_or(DEFAULT_TEMPORARY))
|
||||
.path(path.unwrap_or(default_tmp_path()))
|
||||
.cache_capacity(cache_capacity.unwrap_or(DEFAULT_CACHE_CAPACITY))
|
||||
.flush_every_ms(flush_every_ms)
|
||||
.mode(mode)
|
||||
.use_compression(use_compression.unwrap_or(false));
|
||||
@@ -104,16 +182,9 @@ impl FromStr for PmtreeConfig {
|
||||
|
||||
impl Default for PmtreeConfig {
|
||||
fn default() -> Self {
|
||||
let tmp_path = get_tmp_path();
|
||||
PmtreeConfig(
|
||||
Config::new()
|
||||
.temporary(true)
|
||||
.path(tmp_path)
|
||||
.cache_capacity(150_000)
|
||||
.mode(Mode::HighThroughput)
|
||||
.use_compression(false)
|
||||
.flush_every_ms(Some(12_000)),
|
||||
)
|
||||
Self::builder()
|
||||
.build()
|
||||
.expect("Default configuration should never fail")
|
||||
}
|
||||
}
|
||||
impl Debug for PmtreeConfig {
|
||||
@@ -133,12 +204,16 @@ impl ZerokitMerkleTree for PmTree {
|
||||
type Hasher = PoseidonHash;
|
||||
type Config = PmtreeConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
let default_config = PmtreeConfig::default();
|
||||
PmTree::new(depth, Self::Hasher::default_leaf(), default_config)
|
||||
}
|
||||
|
||||
fn new(depth: usize, _default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self> {
|
||||
fn new(
|
||||
depth: usize,
|
||||
_default_leaf: FrOf<Self::Hasher>,
|
||||
config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
let tree_loaded = pmtree::MerkleTree::load(config.clone().0);
|
||||
let tree = match tree_loaded {
|
||||
Ok(tree) => tree,
|
||||
@@ -168,14 +243,12 @@ impl ZerokitMerkleTree for PmTree {
|
||||
self.tree.root()
|
||||
}
|
||||
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
|
||||
Ok(self.tree.root())
|
||||
}
|
||||
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
self.tree
|
||||
.set(index, leaf)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
fn set(
|
||||
&mut self,
|
||||
index: usize,
|
||||
leaf: FrOf<Self::Hasher>,
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree.set(index, leaf)?;
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
Ok(())
|
||||
}
|
||||
@@ -184,27 +257,31 @@ impl ZerokitMerkleTree for PmTree {
|
||||
&mut self,
|
||||
start: usize,
|
||||
values: I,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
let v = values.into_iter().collect::<Vec<_>>();
|
||||
self.tree
|
||||
.set_range(start, v.clone().into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.tree.set_range(start, v.clone().into_iter())?;
|
||||
for i in start..v.len() {
|
||||
self.cached_leaves_indices[i] = 1
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
self.tree.get(index).map_err(|e| Report::msg(e.to_string()))
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.get(index)
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
fn get_subtree_root(
|
||||
&self,
|
||||
n: usize,
|
||||
index: usize,
|
||||
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLevel);
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
@@ -235,55 +312,71 @@ impl ZerokitMerkleTree for PmTree {
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
let leaves = leaves.into_iter().collect::<Vec<_>>();
|
||||
let mut indices = indices.into_iter().collect::<Vec<_>>();
|
||||
indices.sort();
|
||||
|
||||
match (leaves.len(), indices.len()) {
|
||||
(0, 0) => Err(Report::msg("no leaves or indices to be removed")),
|
||||
(0, 0) => Err(ZerokitMerkleTreeError::InvalidLeaf),
|
||||
(1, 0) => self.set(start, leaves[0]),
|
||||
(0, 1) => self.delete(indices[0]),
|
||||
(_, 0) => self.set_range(start, leaves.into_iter()),
|
||||
(0, _) => self.remove_indices(&indices),
|
||||
(_, _) => self.remove_indices_and_set_leaves(start, leaves, &indices),
|
||||
(0, _) => self
|
||||
.remove_indices(&indices)
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind),
|
||||
(_, _) => self
|
||||
.remove_indices_and_set_leaves(start, leaves, &indices)
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind),
|
||||
}
|
||||
}
|
||||
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.update_next(leaf)
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
|
||||
}
|
||||
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
/// Delete a leaf in the merkle tree given its index
|
||||
///
|
||||
/// Deleting a leaf is done by resetting it to its default value. Note that the next_index field
|
||||
/// will not be changed (== previously used index cannot be reused - this to avoid replay
|
||||
/// attacks or unexpected and very hard to tackle issues)
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.delete(index)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)?;
|
||||
self.cached_leaves_indices[index] = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof> {
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError> {
|
||||
let proof = self.tree.proof(index)?;
|
||||
Ok(PmTreeProof { proof })
|
||||
}
|
||||
|
||||
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool> {
|
||||
fn verify(
|
||||
&self,
|
||||
leaf: &FrOf<Self::Hasher>,
|
||||
witness: &Self::Proof,
|
||||
) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
if self.tree.verify(leaf, &witness.proof) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(Report::msg("verify failed"))
|
||||
Err(ZerokitMerkleTreeError::InvalidWitness)
|
||||
}
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
self.tree.db.put(METADATA_KEY, metadata.to_vec())?;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.db
|
||||
.put(METADATA_KEY, metadata.to_vec())
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)?;
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
|
||||
if !self.metadata.is_empty() {
|
||||
return Ok(self.metadata.clone());
|
||||
}
|
||||
@@ -297,8 +390,11 @@ impl ZerokitMerkleTree for PmTree {
|
||||
Ok(data.unwrap())
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
self.tree.db.close().map_err(|e| Report::msg(e.to_string()))
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.tree
|
||||
.db
|
||||
.close()
|
||||
.map_err(ZerokitMerkleTreeError::PmtreeErrorKind)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,15 +402,13 @@ type PmTreeHasher = <PmTree as ZerokitMerkleTree>::Hasher;
|
||||
type FrOfPmTreeHasher = FrOf<PmTreeHasher>;
|
||||
|
||||
impl PmTree {
|
||||
fn remove_indices(&mut self, indices: &[usize]) -> Result<()> {
|
||||
fn remove_indices(&mut self, indices: &[usize]) -> Result<(), PmtreeErrorKind> {
|
||||
let start = indices[0];
|
||||
let end = indices.last().unwrap() + 1;
|
||||
|
||||
let new_leaves = (start..end).map(|_| PmTreeHasher::default_leaf());
|
||||
|
||||
self.tree
|
||||
.set_range(start, new_leaves)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.tree.set_range(start, new_leaves)?;
|
||||
|
||||
for i in start..end {
|
||||
self.cached_leaves_indices[i] = 0
|
||||
@@ -327,7 +421,7 @@ impl PmTree {
|
||||
start: usize,
|
||||
leaves: Vec<FrOfPmTreeHasher>,
|
||||
indices: &[usize],
|
||||
) -> Result<()> {
|
||||
) -> Result<(), PmtreeErrorKind> {
|
||||
let min_index = *indices.first().unwrap();
|
||||
let max_index = start + leaves.len();
|
||||
|
||||
@@ -344,9 +438,7 @@ impl PmTree {
|
||||
set_values[start - min_index + i] = leaf;
|
||||
}
|
||||
|
||||
self.tree
|
||||
.set_range(start, set_values)
|
||||
.map_err(|e| Report::msg(e.to_string()))?;
|
||||
self.tree.set_range(start, set_values)?;
|
||||
|
||||
for i in indices {
|
||||
self.cached_leaves_indices[*i] = 0;
|
||||
@@ -382,3 +474,32 @@ impl ZerokitMerkleProof for PmTreeProof {
|
||||
self.proof.compute_root_from(leaf)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_pmtree_json_config() {
|
||||
let json = r#"
|
||||
{
|
||||
"path": "pmtree-123456",
|
||||
"temporary": false,
|
||||
"cache_capacity": 1073741824,
|
||||
"flush_every_ms": 500,
|
||||
"mode": "HighThroughput",
|
||||
"use_compression": false
|
||||
}"#;
|
||||
|
||||
let _: PmtreeConfig = json.parse().expect("Failed to parse JSON config");
|
||||
|
||||
let _ = PmtreeConfig::builder()
|
||||
.path(default_tmp_path())
|
||||
.temporary(DEFAULT_TEMPORARY)
|
||||
.cache_capacity(DEFAULT_CACHE_CAPACITY)
|
||||
.mode(DEFAULT_MODE)
|
||||
.use_compression(DEFAULT_USE_COMPRESSION)
|
||||
.build()
|
||||
.expect("Failed to build config");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,30 +1,32 @@
|
||||
// This crate defines the RLN module default Merkle tree implementation and its Hasher
|
||||
// Implementation inspired by https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/poseidon_tree.rs
|
||||
|
||||
// Implementation inspired by https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/poseidon_tree.rs (no differences)
|
||||
#![cfg(not(feature = "stateless"))]
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "pmtree-ft")] {
|
||||
use crate::pm_tree_adapter::*;
|
||||
} else {
|
||||
use crate::hashers::{PoseidonHash};
|
||||
use utils::merkle_tree::*;
|
||||
}
|
||||
}
|
||||
|
||||
// The zerokit RLN default Merkle tree implementation is the OptimalMerkleTree.
|
||||
// To switch to FullMerkleTree implementation, it is enough to enable the fullmerkletree feature
|
||||
// The zerokit RLN default Merkle tree implementation is the PMTree from the vacp2p_pmtree crate
|
||||
// To switch to FullMerkleTree or OptimalMerkleTree, enable the corresponding feature in the Cargo.toml file
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "fullmerkletree")] {
|
||||
use utils::{FullMerkleTree, FullMerkleProof};
|
||||
use crate::hashers::PoseidonHash;
|
||||
|
||||
pub type PoseidonTree = FullMerkleTree<PoseidonHash>;
|
||||
pub type MerkleProof = FullMerkleProof<PoseidonHash>;
|
||||
} else if #[cfg(feature = "pmtree-ft")] {
|
||||
pub type PoseidonTree = PmTree;
|
||||
pub type MerkleProof = PmTreeProof;
|
||||
} else {
|
||||
} else if #[cfg(feature = "optimalmerkletree")] {
|
||||
use utils::{OptimalMerkleTree, OptimalMerkleProof};
|
||||
use crate::hashers::PoseidonHash;
|
||||
|
||||
pub type PoseidonTree = OptimalMerkleTree<PoseidonHash>;
|
||||
pub type MerkleProof = OptimalMerkleProof<PoseidonHash>;
|
||||
} else if #[cfg(feature = "pmtree-ft")] {
|
||||
use crate::pm_tree_adapter::{PmTree, PmTreeProof};
|
||||
|
||||
pub type PoseidonTree = PmTree;
|
||||
pub type MerkleProof = PmTreeProof;
|
||||
} else {
|
||||
compile_error!("One of the features `fullmerkletree`, `optimalmerkletree`, or `pmtree-ft` must be enabled.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,35 @@
|
||||
// This crate collects all the underlying primitives used to implement RLN
|
||||
|
||||
use ark_bn254::Fr;
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
use {
|
||||
crate::error::ConversionError,
|
||||
crate::poseidon_tree::PoseidonTree,
|
||||
utils::{ZerokitMerkleProof, ZerokitMerkleTree},
|
||||
};
|
||||
|
||||
use crate::circuit::{calculate_rln_witness, qap::CircomReduction, Curve};
|
||||
use crate::error::{ComputeIdSecretError, ProofError, ProtocolError};
|
||||
use crate::hashers::{hash_to_field_le, poseidon_hash};
|
||||
use crate::public::RLN_IDENTIFIER;
|
||||
use crate::utils::{
|
||||
bytes_be_to_fr, bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, fr_byte_size,
|
||||
fr_to_bytes_le, normalize_usize_le, to_bigint, vec_fr_to_bytes_le, vec_u8_to_bytes_le,
|
||||
FrOrSecret, IdSecret,
|
||||
};
|
||||
use ark_bn254::{Fr, FrConfig};
|
||||
use ark_ff::{AdditiveGroup, Fp, MontBackend};
|
||||
use ark_groth16::{prepare_verifying_key, Groth16, Proof as ArkProof, ProvingKey, VerifyingKey};
|
||||
use ark_relations::r1cs::{ConstraintMatrices, SynthesisError};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use color_eyre::{Report, Result};
|
||||
use num_bigint::BigInt;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[cfg(test)]
|
||||
use std::time::Instant;
|
||||
use thiserror::Error;
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
|
||||
use crate::circuit::{calculate_rln_witness, qap::CircomReduction, Curve};
|
||||
use crate::hashers::{hash_to_field, poseidon_hash};
|
||||
use crate::poseidon_tree::*;
|
||||
use crate::public::RLN_IDENTIFIER;
|
||||
use crate::utils::*;
|
||||
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
///////////////////////////////////////////////////////
|
||||
// RLN Witness data structure and utility functions
|
||||
///////////////////////////////////////////////////////
|
||||
@@ -29,7 +37,7 @@ use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RLNWitnessInput {
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
identity_secret: Fr,
|
||||
identity_secret: IdSecret,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
user_message_limit: Fr,
|
||||
#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")]
|
||||
@@ -64,14 +72,21 @@ pub fn deserialize_field_element(serialized: Vec<u8>) -> Fr {
|
||||
element
|
||||
}
|
||||
|
||||
pub fn deserialize_identity_pair(serialized: Vec<u8>) -> (Fr, Fr) {
|
||||
pub fn deserialize_identity_pair_le(serialized: Vec<u8>) -> (Fr, Fr) {
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&serialized[read..]);
|
||||
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
|
||||
pub fn deserialize_identity_pair_be(serialized: Vec<u8>) -> (Fr, Fr) {
|
||||
let (identity_secret_hash, read) = bytes_be_to_fr(&serialized);
|
||||
let (id_commitment, _) = bytes_be_to_fr(&serialized[read..]);
|
||||
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
pub fn deserialize_identity_tuple_le(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
|
||||
let mut all_read = 0;
|
||||
|
||||
let (identity_trapdoor, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
@@ -93,13 +108,35 @@ pub fn deserialize_identity_tuple(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn deserialize_identity_tuple_be(serialized: Vec<u8>) -> (Fr, Fr, Fr, Fr) {
|
||||
let mut all_read = 0;
|
||||
|
||||
let (identity_trapdoor, read) = bytes_be_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_nullifier, read) = bytes_be_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_secret_hash, read) = bytes_be_to_fr(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (identity_commitment, _) = bytes_be_to_fr(&serialized[all_read..]);
|
||||
|
||||
(
|
||||
identity_trapdoor,
|
||||
identity_nullifier,
|
||||
identity_secret_hash,
|
||||
identity_commitment,
|
||||
)
|
||||
}
|
||||
|
||||
/// Serializes witness
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
/// input data is [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements[<32>] | identity_path_index<8> | x<32> | external_nullifier<32> ]
|
||||
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
/// input data is [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements<32> | identity_path_index<8> | x<32> | external_nullifier<32> ]
|
||||
pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>, ProtocolError> {
|
||||
// Check if message_id is within user_message_limit
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
@@ -111,11 +148,11 @@ pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
fr_byte_size() * (5 + rln_witness.path_elements.len())
|
||||
+ rln_witness.identity_path_index.len(),
|
||||
);
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.identity_secret));
|
||||
serialized.extend_from_slice(&rln_witness.identity_secret.to_bytes_le());
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.user_message_limit));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.message_id));
|
||||
serialized.extend_from_slice(&vec_fr_to_bytes_le(&rln_witness.path_elements)?);
|
||||
serialized.extend_from_slice(&vec_u8_to_bytes_le(&rln_witness.identity_path_index)?);
|
||||
serialized.extend_from_slice(&vec_fr_to_bytes_le(&rln_witness.path_elements));
|
||||
serialized.extend_from_slice(&vec_u8_to_bytes_le(&rln_witness.identity_path_index));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.x));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&rln_witness.external_nullifier));
|
||||
|
||||
@@ -127,10 +164,10 @@ pub fn serialize_witness(rln_witness: &RLNWitnessInput) -> Result<Vec<u8>> {
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)> {
|
||||
pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize), ProtocolError> {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
let (identity_secret, read) = IdSecret::from_bytes_le(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
@@ -154,7 +191,7 @@ pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)
|
||||
all_read += read;
|
||||
|
||||
if serialized.len() != all_read {
|
||||
return Err(Report::msg("serialized length is not equal to all_read"));
|
||||
return Err(ProtocolError::InvalidReadLen(serialized.len(), all_read));
|
||||
}
|
||||
|
||||
Ok((
|
||||
@@ -175,18 +212,22 @@ pub fn deserialize_witness(serialized: &[u8]) -> Result<(RLNWitnessInput, usize)
|
||||
// https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L148
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
// return value is a rln witness populated according to this information
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
pub fn proof_inputs_to_rln_witness(
|
||||
tree: &mut PoseidonTree,
|
||||
serialized: &[u8],
|
||||
) -> Result<(RLNWitnessInput, usize)> {
|
||||
) -> Result<(RLNWitnessInput, usize), ProtocolError> {
|
||||
let mut all_read: usize = 0;
|
||||
|
||||
let (identity_secret, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
let (identity_secret, read) = IdSecret::from_bytes_le(&serialized[all_read..]);
|
||||
all_read += read;
|
||||
|
||||
let id_index = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
serialized[all_read..all_read + 8]
|
||||
.try_into()
|
||||
.map_err(ConversionError::FromSlice)?,
|
||||
))
|
||||
.map_err(ConversionError::ToUsize)?;
|
||||
all_read += 8;
|
||||
|
||||
let (user_message_limit, read) = bytes_le_to_fr(&serialized[all_read..]);
|
||||
@@ -199,8 +240,11 @@ pub fn proof_inputs_to_rln_witness(
|
||||
all_read += read;
|
||||
|
||||
let signal_len = usize::try_from(u64::from_le_bytes(
|
||||
serialized[all_read..all_read + 8].try_into()?,
|
||||
))?;
|
||||
serialized[all_read..all_read + 8]
|
||||
.try_into()
|
||||
.map_err(ConversionError::FromSlice)?,
|
||||
))
|
||||
.map_err(ConversionError::ToUsize)?;
|
||||
all_read += 8;
|
||||
|
||||
let signal: Vec<u8> = serialized[all_read..all_read + signal_len].to_vec();
|
||||
@@ -209,7 +253,7 @@ pub fn proof_inputs_to_rln_witness(
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
|
||||
let x = hash_to_field(&signal);
|
||||
let x = hash_to_field_le(&signal);
|
||||
|
||||
Ok((
|
||||
RLNWitnessInput {
|
||||
@@ -231,18 +275,16 @@ pub fn proof_inputs_to_rln_witness(
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_from_values(
|
||||
identity_secret: Fr,
|
||||
merkle_proof: &MerkleProof,
|
||||
identity_secret: IdSecret,
|
||||
path_elements: Vec<Fp<MontBackend<FrConfig, 4>, 4>>,
|
||||
identity_path_index: Vec<u8>,
|
||||
x: Fr,
|
||||
external_nullifier: Fr,
|
||||
user_message_limit: Fr,
|
||||
message_id: Fr,
|
||||
) -> Result<RLNWitnessInput> {
|
||||
) -> Result<RLNWitnessInput, ProtocolError> {
|
||||
message_id_range_check(&message_id, &user_message_limit)?;
|
||||
|
||||
let path_elements = merkle_proof.get_path_elements();
|
||||
let identity_path_index = merkle_proof.get_path_index();
|
||||
|
||||
Ok(RLNWitnessInput {
|
||||
identity_secret,
|
||||
path_elements,
|
||||
@@ -254,19 +296,19 @@ pub fn rln_witness_from_values(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
|
||||
pub fn random_rln_witness(tree_depth: usize) -> RLNWitnessInput {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
let identity_secret = hash_to_field(&rng.gen::<[u8; 32]>());
|
||||
let x = hash_to_field(&rng.gen::<[u8; 32]>());
|
||||
let epoch = hash_to_field(&rng.gen::<[u8; 32]>());
|
||||
let rln_identifier = hash_to_field(RLN_IDENTIFIER); //hash_to_field(&rng.gen::<[u8; 32]>());
|
||||
let identity_secret = IdSecret::rand(&mut rng);
|
||||
let x = hash_to_field_le(&rng.gen::<[u8; 32]>());
|
||||
let epoch = hash_to_field_le(&rng.gen::<[u8; 32]>());
|
||||
let rln_identifier = hash_to_field_le(RLN_IDENTIFIER);
|
||||
|
||||
let mut path_elements: Vec<Fr> = Vec::new();
|
||||
let mut identity_path_index: Vec<u8> = Vec::new();
|
||||
|
||||
for _ in 0..tree_height {
|
||||
path_elements.push(hash_to_field(&rng.gen::<[u8; 32]>()));
|
||||
for _ in 0..tree_depth {
|
||||
path_elements.push(hash_to_field_le(&rng.gen::<[u8; 32]>()));
|
||||
identity_path_index.push(rng.gen_range(0..2) as u8);
|
||||
}
|
||||
|
||||
@@ -284,16 +326,24 @@ pub fn random_rln_witness(tree_height: usize) -> RLNWitnessInput {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn proof_values_from_witness(rln_witness: &RLNWitnessInput) -> Result<RLNProofValues> {
|
||||
pub fn proof_values_from_witness(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<RLNProofValues, ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
// y share
|
||||
let a_0 = rln_witness.identity_secret;
|
||||
let a_1 = poseidon_hash(&[a_0, rln_witness.external_nullifier, rln_witness.message_id]);
|
||||
let y = a_0 + rln_witness.x * a_1;
|
||||
let a_0 = &rln_witness.identity_secret;
|
||||
let mut to_hash = [
|
||||
*(a_0.clone()),
|
||||
rln_witness.external_nullifier,
|
||||
rln_witness.message_id,
|
||||
];
|
||||
let a_1 = poseidon_hash(&to_hash);
|
||||
let y = *(a_0.clone()) + rln_witness.x * a_1;
|
||||
|
||||
// Nullifier
|
||||
let nullifier = poseidon_hash(&[a_1]);
|
||||
to_hash[0].zeroize();
|
||||
|
||||
// Merkle tree root computations
|
||||
let root = compute_tree_root(
|
||||
@@ -361,7 +411,7 @@ pub fn deserialize_proof_values(serialized: &[u8]) -> (RLNProofValues, usize) {
|
||||
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
pub fn prepare_prove_input(
|
||||
identity_secret: Fr,
|
||||
identity_secret: IdSecret,
|
||||
id_index: usize,
|
||||
user_message_limit: Fr,
|
||||
message_id: Fr,
|
||||
@@ -374,12 +424,12 @@ pub fn prepare_prove_input(
|
||||
// - variable length signal data
|
||||
let mut serialized = Vec::with_capacity(fr_byte_size() * 4 + 16 + signal.len()); // length of 4 fr elements + 16 bytes (id_index + len) + signal length
|
||||
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&identity_secret));
|
||||
serialized.extend_from_slice(&normalize_usize(id_index));
|
||||
serialized.extend_from_slice(&identity_secret.to_bytes_le());
|
||||
serialized.extend_from_slice(&normalize_usize_le(id_index));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&user_message_limit));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&message_id));
|
||||
serialized.extend_from_slice(&fr_to_bytes_le(&external_nullifier));
|
||||
serialized.extend_from_slice(&normalize_usize(signal.len()));
|
||||
serialized.extend_from_slice(&normalize_usize_le(signal.len()));
|
||||
serialized.extend_from_slice(signal);
|
||||
|
||||
serialized
|
||||
@@ -394,7 +444,7 @@ pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
|
||||
let mut serialized = Vec::with_capacity(proof_data.len() + 8 + signal.len());
|
||||
|
||||
serialized.extend(proof_data);
|
||||
serialized.extend_from_slice(&normalize_usize(signal.len()));
|
||||
serialized.extend_from_slice(&normalize_usize_le(signal.len()));
|
||||
serialized.extend_from_slice(signal);
|
||||
|
||||
serialized
|
||||
@@ -405,12 +455,15 @@ pub fn prepare_verify_input(proof_data: Vec<u8>, signal: &[u8]) -> Vec<u8> {
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
pub fn compute_tree_root(
|
||||
identity_secret: &Fr,
|
||||
identity_secret: &IdSecret,
|
||||
user_message_limit: &Fr,
|
||||
path_elements: &[Fr],
|
||||
identity_path_index: &[u8],
|
||||
) -> Fr {
|
||||
let id_commitment = poseidon_hash(&[*identity_secret]);
|
||||
let mut to_hash = [*identity_secret.clone()];
|
||||
let id_commitment = poseidon_hash(&to_hash);
|
||||
to_hash[0].zeroize();
|
||||
|
||||
let mut root = poseidon_hash(&[id_commitment, *user_message_limit]);
|
||||
|
||||
for i in 0..identity_path_index.len() {
|
||||
@@ -431,10 +484,12 @@ pub fn compute_tree_root(
|
||||
// Generates a tuple (identity_secret_hash, id_commitment) where
|
||||
// identity_secret_hash is random and id_commitment = PoseidonHash(identity_secret_hash)
|
||||
// RNG is instantiated using thread_rng()
|
||||
pub fn keygen() -> (Fr, Fr) {
|
||||
pub fn keygen() -> (IdSecret, Fr) {
|
||||
let mut rng = thread_rng();
|
||||
let identity_secret_hash = Fr::rand(&mut rng);
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
let identity_secret_hash = IdSecret::rand(&mut rng);
|
||||
let mut to_hash = [*identity_secret_hash.clone()];
|
||||
let id_commitment = poseidon_hash(&to_hash);
|
||||
to_hash[0].zeroize();
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
@@ -502,7 +557,10 @@ pub fn extended_seeded_keygen(signal: &[u8]) -> (Fr, Fr, Fr, Fr) {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn compute_id_secret(share1: (Fr, Fr), share2: (Fr, Fr)) -> Result<Fr, String> {
|
||||
pub fn compute_id_secret(
|
||||
share1: (Fr, Fr),
|
||||
share2: (Fr, Fr),
|
||||
) -> Result<IdSecret, ComputeIdSecretError> {
|
||||
// Assuming a0 is the identity secret and a1 = poseidonHash([a0, external_nullifier]),
|
||||
// a (x,y) share satisfies the following relation
|
||||
// y = a_0 + x * a_1
|
||||
@@ -512,30 +570,26 @@ pub fn compute_id_secret(share1: (Fr, Fr), share2: (Fr, Fr)) -> Result<Fr, Strin
|
||||
// If the two input shares were computed for the same external_nullifier and identity secret, we can recover the latter
|
||||
// y1 = a_0 + x1 * a_1
|
||||
// y2 = a_0 + x2 * a_1
|
||||
let a_1 = (y1 - y2) / (x1 - x2);
|
||||
let a_0 = y1 - x1 * a_1;
|
||||
|
||||
// If shares come from the same polynomial, a0 is correctly recovered and a1 = poseidonHash([a0, external_nullifier])
|
||||
Ok(a_0)
|
||||
if (x1 - x2) != Fr::ZERO {
|
||||
let a_1 = (y1 - y2) / (x1 - x2);
|
||||
let mut a_0 = y1 - x1 * a_1;
|
||||
|
||||
// If shares come from the same polynomial, a0 is correctly recovered and a1 = poseidonHash([a0, external_nullifier])
|
||||
let id_secret = IdSecret::from(&mut a_0);
|
||||
Ok(id_secret)
|
||||
} else {
|
||||
Err(ComputeIdSecretError::DivisionByZero)
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// zkSNARK utility functions
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProofError {
|
||||
#[error("Error reading circuit key: {0}")]
|
||||
CircuitKeyError(#[from] Report),
|
||||
#[error("Error producing witness: {0}")]
|
||||
WitnessError(Report),
|
||||
#[error("Error producing proof: {0}")]
|
||||
SynthesisError(#[from] SynthesisError),
|
||||
}
|
||||
|
||||
fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
|
||||
witness: Vec<BigInt>,
|
||||
) -> Result<Vec<E::ScalarField>> {
|
||||
) -> Result<Vec<E::ScalarField>, ProtocolError> {
|
||||
use ark_ff::PrimeField;
|
||||
let modulus = <E::ScalarField as PrimeField>::MODULUS;
|
||||
|
||||
@@ -548,9 +602,9 @@ fn calculate_witness_element<E: ark_ec::pairing::Pairing>(
|
||||
modulus.into()
|
||||
- w.abs()
|
||||
.to_biguint()
|
||||
.ok_or(Report::msg("not a biguint value"))?
|
||||
.ok_or(ProtocolError::BigUintConversion(w))?
|
||||
} else {
|
||||
w.to_biguint().ok_or(Report::msg("not a biguint value"))?
|
||||
w.to_biguint().ok_or(ProtocolError::BigUintConversion(w))?
|
||||
};
|
||||
witness_vec.push(E::ScalarField::from(w))
|
||||
}
|
||||
@@ -566,8 +620,7 @@ pub fn generate_proof_with_witness(
|
||||
#[cfg(test)]
|
||||
let now = Instant::now();
|
||||
|
||||
let full_assignment =
|
||||
calculate_witness_element::<Curve>(witness).map_err(ProofError::WitnessError)?;
|
||||
let full_assignment = calculate_witness_element::<Curve>(witness)?;
|
||||
|
||||
#[cfg(test)]
|
||||
println!("witness generation took: {:.2?}", now.elapsed());
|
||||
@@ -604,7 +657,7 @@ pub fn generate_proof_with_witness(
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn inputs_for_witness_calculation(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<[(&str, Vec<Fr>); 7]> {
|
||||
) -> Result<[(&str, Vec<FrOrSecret>); 7], ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let mut identity_path_index = Vec::with_capacity(rln_witness.identity_path_index.len());
|
||||
@@ -614,13 +667,33 @@ pub fn inputs_for_witness_calculation(
|
||||
.for_each(|v| identity_path_index.push(Fr::from(*v)));
|
||||
|
||||
Ok([
|
||||
("identitySecret", vec![rln_witness.identity_secret]),
|
||||
("userMessageLimit", vec![rln_witness.user_message_limit]),
|
||||
("messageId", vec![rln_witness.message_id]),
|
||||
("pathElements", rln_witness.path_elements.clone()),
|
||||
("identityPathIndex", identity_path_index),
|
||||
("x", vec![rln_witness.x]),
|
||||
("externalNullifier", vec![rln_witness.external_nullifier]),
|
||||
(
|
||||
"identitySecret",
|
||||
vec![rln_witness.identity_secret.clone().into()],
|
||||
),
|
||||
(
|
||||
"userMessageLimit",
|
||||
vec![rln_witness.user_message_limit.into()],
|
||||
),
|
||||
("messageId", vec![rln_witness.message_id.into()]),
|
||||
(
|
||||
"pathElements",
|
||||
rln_witness
|
||||
.path_elements
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(Into::into)
|
||||
.collect(),
|
||||
),
|
||||
(
|
||||
"identityPathIndex",
|
||||
identity_path_index.into_iter().map(Into::into).collect(),
|
||||
),
|
||||
("x", vec![rln_witness.x.into()]),
|
||||
(
|
||||
"externalNullifier",
|
||||
vec![rln_witness.external_nullifier.into()],
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -732,7 +805,9 @@ where
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `rln_witness.message_id` is not within `rln_witness.user_message_limit`.
|
||||
pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitnessInput> {
|
||||
pub fn rln_witness_from_json(
|
||||
input_json: serde_json::Value,
|
||||
) -> Result<RLNWitnessInput, ProtocolError> {
|
||||
let rln_witness: RLNWitnessInput = serde_json::from_value(input_json).unwrap();
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
@@ -744,7 +819,9 @@ pub fn rln_witness_from_json(input_json: serde_json::Value) -> Result<RLNWitness
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
|
||||
pub fn rln_witness_to_json(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<serde_json::Value, ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let rln_witness_json = serde_json::to_value(rln_witness)?;
|
||||
@@ -757,13 +834,15 @@ pub fn rln_witness_to_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if `message_id` is not within `user_message_limit`.
|
||||
pub fn rln_witness_to_bigint_json(rln_witness: &RLNWitnessInput) -> Result<serde_json::Value> {
|
||||
pub fn rln_witness_to_bigint_json(
|
||||
rln_witness: &RLNWitnessInput,
|
||||
) -> Result<serde_json::Value, ProtocolError> {
|
||||
message_id_range_check(&rln_witness.message_id, &rln_witness.user_message_limit)?;
|
||||
|
||||
let mut path_elements = Vec::new();
|
||||
|
||||
for v in rln_witness.path_elements.iter() {
|
||||
path_elements.push(to_bigint(v)?.to_str_radix(10));
|
||||
path_elements.push(to_bigint(v).to_str_radix(10));
|
||||
}
|
||||
|
||||
let mut identity_path_index = Vec::new();
|
||||
@@ -773,22 +852,26 @@ pub fn rln_witness_to_bigint_json(rln_witness: &RLNWitnessInput) -> Result<serde
|
||||
.for_each(|v| identity_path_index.push(BigInt::from(*v).to_str_radix(10)));
|
||||
|
||||
let inputs = serde_json::json!({
|
||||
"identitySecret": to_bigint(&rln_witness.identity_secret)?.to_str_radix(10),
|
||||
"userMessageLimit": to_bigint(&rln_witness.user_message_limit)?.to_str_radix(10),
|
||||
"messageId": to_bigint(&rln_witness.message_id)?.to_str_radix(10),
|
||||
"identitySecret": to_bigint(&rln_witness.identity_secret).to_str_radix(10),
|
||||
"userMessageLimit": to_bigint(&rln_witness.user_message_limit).to_str_radix(10),
|
||||
"messageId": to_bigint(&rln_witness.message_id).to_str_radix(10),
|
||||
"pathElements": path_elements,
|
||||
"identityPathIndex": identity_path_index,
|
||||
"x": to_bigint(&rln_witness.x)?.to_str_radix(10),
|
||||
"externalNullifier": to_bigint(&rln_witness.external_nullifier)?.to_str_radix(10),
|
||||
"x": to_bigint(&rln_witness.x).to_str_radix(10),
|
||||
"externalNullifier": to_bigint(&rln_witness.external_nullifier).to_str_radix(10),
|
||||
});
|
||||
|
||||
Ok(inputs)
|
||||
}
|
||||
|
||||
pub fn message_id_range_check(message_id: &Fr, user_message_limit: &Fr) -> Result<()> {
|
||||
pub fn message_id_range_check(
|
||||
message_id: &Fr,
|
||||
user_message_limit: &Fr,
|
||||
) -> Result<(), ProtocolError> {
|
||||
if message_id > user_message_limit {
|
||||
return Err(color_eyre::Report::msg(
|
||||
"message_id is not within user_message_limit",
|
||||
return Err(ProtocolError::InvalidMessageId(
|
||||
*message_id,
|
||||
*user_message_limit,
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,21 @@
|
||||
use crate::circuit::TEST_TREE_HEIGHT;
|
||||
use crate::protocol::*;
|
||||
use crate::circuit::TEST_TREE_DEPTH;
|
||||
use crate::protocol::{
|
||||
proof_values_from_witness, random_rln_witness, serialize_proof_values, serialize_witness,
|
||||
verify_proof, RLNProofValues,
|
||||
};
|
||||
use crate::public::RLN;
|
||||
use crate::utils::*;
|
||||
use crate::utils::str_to_fr;
|
||||
use ark_groth16::Proof as ArkProof;
|
||||
use ark_serialize::CanonicalDeserialize;
|
||||
use serde_json::{json, Value};
|
||||
use std::io::Cursor;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
use crate::utils::generate_input_buffer;
|
||||
|
||||
fn fq_from_str(s: &str) -> ark_bn254::Fq {
|
||||
ark_bn254::Fq::from_str(&s).unwrap()
|
||||
ark_bn254::Fq::from_str(s).unwrap()
|
||||
}
|
||||
|
||||
fn g1_from_str(g1: &[String]) -> ark_bn254::G1Affine {
|
||||
@@ -40,7 +45,7 @@ fn value_to_string_vec(value: &Value) -> Vec<String> {
|
||||
value
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
}
|
||||
@@ -48,7 +53,7 @@ fn value_to_string_vec(value: &Value) -> Vec<String> {
|
||||
#[test]
|
||||
fn test_groth16_proof_hardcoded() {
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
let rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
|
||||
let rln = RLN::new(TEST_TREE_DEPTH, generate_input_buffer()).unwrap();
|
||||
#[cfg(feature = "stateless")]
|
||||
let rln = RLN::new().unwrap();
|
||||
|
||||
@@ -87,7 +92,7 @@ fn test_groth16_proof_hardcoded() {
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|item| value_to_string_vec(item))
|
||||
.map(value_to_string_vec)
|
||||
.collect::<Vec<Vec<String>>>(),
|
||||
),
|
||||
c: g1_from_str(&value_to_string_vec(&valid_snarkjs_proof["pi_c"])),
|
||||
@@ -128,15 +133,15 @@ fn test_groth16_proof_hardcoded() {
|
||||
#[test]
|
||||
// This test is similar to the one in lib, but uses only public API
|
||||
fn test_groth16_proof() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
#[cfg(feature = "stateless")]
|
||||
let mut rln = RLN::new().unwrap();
|
||||
|
||||
// Note: we only test Groth16 proof generation, so we ignore setting the tree in the RLN object
|
||||
let rln_witness = random_rln_witness(tree_height);
|
||||
let rln_witness = random_rln_witness(tree_depth);
|
||||
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
|
||||
|
||||
// We compute a Groth16 proof
|
||||
@@ -166,12 +171,14 @@ fn test_groth16_proof() {
|
||||
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
mod tree_test {
|
||||
use crate::circuit::{Fr, TEST_TREE_HEIGHT};
|
||||
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
|
||||
use crate::circuit::{Fr, TEST_TREE_DEPTH};
|
||||
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
|
||||
use crate::pm_tree_adapter::PmtreeConfig;
|
||||
use crate::protocol::*;
|
||||
use crate::public::RLN;
|
||||
use crate::public::{TreeConfigInput, RLN};
|
||||
use crate::utils::*;
|
||||
use ark_serialize::Read;
|
||||
use serde_json::json;
|
||||
use std::io::Cursor;
|
||||
use utils::ZerokitMerkleTree;
|
||||
|
||||
@@ -181,7 +188,7 @@ mod tree_test {
|
||||
#[test]
|
||||
// We test merkle batch Merkle tree additions
|
||||
fn test_merkle_operations() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -192,14 +199,14 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We first add leaves one by one specifying the index
|
||||
for (i, leaf) in leaves.iter().enumerate() {
|
||||
// We check if the number of leaves set is consistent
|
||||
assert_eq!(rln.tree.leaves_set(), i);
|
||||
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(leaf));
|
||||
rln.set_leaf(i, &mut buffer).unwrap();
|
||||
}
|
||||
|
||||
@@ -209,11 +216,11 @@ mod tree_test {
|
||||
let (root_single, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
rln.set_tree(tree_depth).unwrap();
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(leaf));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
}
|
||||
|
||||
@@ -228,10 +235,10 @@ mod tree_test {
|
||||
assert_eq!(root_single, root_next);
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
rln.set_tree(tree_depth).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -258,7 +265,7 @@ mod tree_test {
|
||||
let (root_delete, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
rln.set_tree(tree_depth).unwrap();
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
@@ -271,7 +278,7 @@ mod tree_test {
|
||||
// We test leaf setting with a custom index, to enable batch updates to the root
|
||||
// Uses `set_leaves_from` to set leaves in a batch, from index `start_index`
|
||||
fn test_leaf_setting_with_index() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -286,10 +293,10 @@ mod tree_test {
|
||||
let set_index = rng.gen_range(0..no_of_leaves) as usize;
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -300,14 +307,14 @@ mod tree_test {
|
||||
rln.get_root(&mut buffer).unwrap();
|
||||
let (root_batch_with_init, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
|
||||
// `init_tree_with_leaves` resets the tree to the depth it was initialized with, using `set_tree`
|
||||
|
||||
// We add leaves in a batch starting from index 0..set_index
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[0..set_index]));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We add the remaining n leaves in a batch starting from index m
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves[set_index..]));
|
||||
rln.set_leaves_from(set_index, &mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -321,11 +328,11 @@ mod tree_test {
|
||||
assert_eq!(root_batch_with_init, root_batch_with_custom_index);
|
||||
|
||||
// We reset the tree to default
|
||||
rln.set_tree(tree_height).unwrap();
|
||||
rln.set_tree(tree_depth).unwrap();
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(&leaf));
|
||||
let mut buffer = Cursor::new(fr_to_bytes_le(leaf));
|
||||
rln.set_next_leaf(&mut buffer).unwrap();
|
||||
}
|
||||
|
||||
@@ -345,7 +352,7 @@ mod tree_test {
|
||||
#[test]
|
||||
// Tests the atomic_operation fn, which set_leaves_from uses internally
|
||||
fn test_atomic_operation() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -356,10 +363,10 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -377,8 +384,8 @@ mod tree_test {
|
||||
let last_leaf_index = no_of_leaves - 1;
|
||||
let indices = vec![last_leaf_index as u8];
|
||||
let last_leaf = vec![*last_leaf];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf).unwrap());
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&last_leaf));
|
||||
|
||||
rln.atomic_operation(last_leaf_index, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
@@ -394,7 +401,7 @@ mod tree_test {
|
||||
#[test]
|
||||
fn test_atomic_operation_zero_indexed() {
|
||||
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -405,10 +412,10 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -422,8 +429,8 @@ mod tree_test {
|
||||
let zero_index = 0;
|
||||
let indices = vec![zero_index as u8];
|
||||
let zero_leaf: Vec<Fr> = vec![];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf));
|
||||
rln.atomic_operation(0, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
@@ -438,7 +445,7 @@ mod tree_test {
|
||||
#[test]
|
||||
fn test_atomic_operation_consistency() {
|
||||
// Test duplicated from https://github.com/waku-org/go-zerokit-rln/pull/12/files
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -449,10 +456,10 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// We check if number of leaves set is consistent
|
||||
@@ -466,8 +473,8 @@ mod tree_test {
|
||||
let set_index = rng.gen_range(0..no_of_leaves) as usize;
|
||||
let indices = vec![set_index as u8];
|
||||
let zero_leaf: Vec<Fr> = vec![];
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices).unwrap());
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf).unwrap());
|
||||
let indices_buffer = Cursor::new(vec_u8_to_bytes_le(&indices));
|
||||
let leaves_buffer = Cursor::new(vec_fr_to_bytes_le(&zero_leaf));
|
||||
rln.atomic_operation(0, leaves_buffer, indices_buffer)
|
||||
.unwrap();
|
||||
|
||||
@@ -489,7 +496,7 @@ mod tree_test {
|
||||
#[test]
|
||||
// This test checks if `set_leaves_from` throws an error when the index is out of bounds
|
||||
fn test_set_leaves_bad_index() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -498,10 +505,10 @@ mod tree_test {
|
||||
for _ in 0..no_of_leaves {
|
||||
leaves.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let bad_index = (1 << tree_height) - rng.gen_range(0..no_of_leaves) as usize;
|
||||
let bad_index = (1 << tree_depth) - rng.gen_range(0..no_of_leaves) as usize;
|
||||
|
||||
// We create a new tree
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// Get root of empty tree
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
@@ -509,7 +516,7 @@ mod tree_test {
|
||||
let (root_empty, _) = bytes_le_to_fr(&buffer.into_inner());
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
|
||||
#[allow(unused_must_use)]
|
||||
rln.set_leaves_from(bad_index, &mut buffer)
|
||||
@@ -529,9 +536,9 @@ mod tree_test {
|
||||
#[test]
|
||||
fn test_get_leaf() {
|
||||
// We generate a random tree
|
||||
let tree_height = 10;
|
||||
let tree_depth = 10;
|
||||
let mut rng = thread_rng();
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We generate a random leaf
|
||||
let leaf = Fr::rand(&mut rng);
|
||||
@@ -554,9 +561,9 @@ mod tree_test {
|
||||
|
||||
#[test]
|
||||
fn test_valid_metadata() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
let arbitrary_metadata: &[u8] = b"block_number:200000";
|
||||
rln.set_metadata(arbitrary_metadata).unwrap();
|
||||
@@ -570,9 +577,9 @@ mod tree_test {
|
||||
|
||||
#[test]
|
||||
fn test_empty_metadata() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
|
||||
let rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.get_metadata(&mut buffer).unwrap();
|
||||
@@ -583,7 +590,7 @@ mod tree_test {
|
||||
|
||||
#[test]
|
||||
fn test_rln_proof() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -596,10 +603,10 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
@@ -617,9 +624,9 @@ mod tree_test {
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -657,7 +664,7 @@ mod tree_test {
|
||||
|
||||
#[test]
|
||||
fn test_rln_with_witness() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -668,10 +675,10 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
@@ -689,9 +696,9 @@ mod tree_test {
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -740,7 +747,7 @@ mod tree_test {
|
||||
#[test]
|
||||
fn proof_verification_with_roots() {
|
||||
// The first part is similar to test_rln_with_witness
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
let no_of_leaves = 256;
|
||||
|
||||
// We generate a vector of random leaves
|
||||
@@ -751,10 +758,10 @@ mod tree_test {
|
||||
}
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves).unwrap());
|
||||
let mut buffer = Cursor::new(vec_fr_to_bytes_le(&leaves));
|
||||
rln.init_tree_with_leaves(&mut buffer).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
@@ -772,9 +779,9 @@ mod tree_test {
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -823,7 +830,7 @@ mod tree_test {
|
||||
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(verified, false);
|
||||
assert!(!verified);
|
||||
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
@@ -842,10 +849,10 @@ mod tree_test {
|
||||
|
||||
#[test]
|
||||
fn test_recover_id_secret() {
|
||||
let tree_height = TEST_TREE_HEIGHT;
|
||||
let tree_depth = TEST_TREE_DEPTH;
|
||||
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(tree_depth, generate_input_buffer()).unwrap();
|
||||
|
||||
// Generate identity pair
|
||||
let (identity_secret_hash, id_commitment) = keygen();
|
||||
@@ -864,9 +871,9 @@ mod tree_test {
|
||||
let signal2: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -877,7 +884,7 @@ mod tree_test {
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
let prove_input1 = prepare_prove_input(
|
||||
identity_secret_hash,
|
||||
identity_secret_hash.clone(),
|
||||
identity_index,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
@@ -886,7 +893,7 @@ mod tree_test {
|
||||
);
|
||||
|
||||
let prove_input2 = prepare_prove_input(
|
||||
identity_secret_hash,
|
||||
identity_secret_hash.clone(),
|
||||
identity_index,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
@@ -929,7 +936,7 @@ mod tree_test {
|
||||
|
||||
// We check if the recovered identity secret hash corresponds to the original one
|
||||
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, *identity_secret_hash);
|
||||
|
||||
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
|
||||
|
||||
@@ -979,20 +986,59 @@ mod tree_test {
|
||||
|
||||
// ensure that the recovered secret does not match with either of the
|
||||
// used secrets in proof generation
|
||||
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
|
||||
assert_ne!(
|
||||
recovered_identity_secret_hash_new,
|
||||
*identity_secret_hash_new
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tree_config_input_trait() {
|
||||
let empty_json_input = generate_input_buffer();
|
||||
let rln_with_empty_json_config = RLN::new(TEST_TREE_DEPTH, empty_json_input);
|
||||
assert!(rln_with_empty_json_config.is_ok());
|
||||
|
||||
let json_config = json!({
|
||||
"tree_config": {
|
||||
"path": "pmtree-123456",
|
||||
"temporary": false,
|
||||
"cache_capacity": 1073741824,
|
||||
"flush_every_ms": 500,
|
||||
"mode": "HighThroughput",
|
||||
"use_compression": false
|
||||
}
|
||||
});
|
||||
let json_input = Cursor::new(json_config.to_string());
|
||||
let rln_with_json_config = RLN::new(TEST_TREE_DEPTH, json_input.clone());
|
||||
assert!(rln_with_json_config.is_ok());
|
||||
|
||||
let json_to_tree_config = json_input.into_tree_config();
|
||||
assert!(json_to_tree_config.is_ok());
|
||||
let rln_with_json_to_tree_config = RLN::new(TEST_TREE_DEPTH, json_to_tree_config.unwrap());
|
||||
assert!(rln_with_json_to_tree_config.is_ok());
|
||||
|
||||
let default_pmtree_config = PmtreeConfig::default();
|
||||
let rln_with_default_tree_config = RLN::new(TEST_TREE_DEPTH, default_pmtree_config);
|
||||
assert!(rln_with_default_tree_config.is_ok());
|
||||
|
||||
let custom_pmtree_config = PmtreeConfig::builder()
|
||||
.temporary(true)
|
||||
.use_compression(false)
|
||||
.build();
|
||||
let rln_with_custom_tree_config = RLN::new(TEST_TREE_DEPTH, custom_pmtree_config.unwrap());
|
||||
assert!(rln_with_custom_tree_config.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "stateless")]
|
||||
mod stateless_test {
|
||||
use crate::circuit::{Fr, TEST_TREE_HEIGHT};
|
||||
use crate::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash};
|
||||
use crate::poseidon_tree::PoseidonTree;
|
||||
use crate::circuit::{Fr, TEST_TREE_DEPTH};
|
||||
use crate::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash, PoseidonHash};
|
||||
use crate::protocol::*;
|
||||
use crate::public::RLN;
|
||||
use crate::utils::*;
|
||||
use std::io::Cursor;
|
||||
use utils::ZerokitMerkleTree;
|
||||
use utils::{OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
@@ -1005,10 +1051,10 @@ mod stateless_test {
|
||||
let mut rln = RLN::new().unwrap();
|
||||
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -1026,20 +1072,21 @@ mod stateless_test {
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
|
||||
let x = hash_to_field(&signal);
|
||||
let x = hash_to_field_le(&signal);
|
||||
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
|
||||
|
||||
let rln_witness = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1081,7 +1128,7 @@ mod stateless_test {
|
||||
.verify_with_roots(&mut input_buffer.clone(), &mut roots_buffer)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(verified, false);
|
||||
assert!(!verified);
|
||||
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let root = tree.root();
|
||||
@@ -1102,10 +1149,10 @@ mod stateless_test {
|
||||
let mut rln = RLN::new().unwrap();
|
||||
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -1116,25 +1163,26 @@ mod stateless_test {
|
||||
tree.update_next(rate_commitment).unwrap();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We generate a random signal
|
||||
let mut rng = thread_rng();
|
||||
let signal1: [u8; 32] = rng.gen();
|
||||
let x1 = hash_to_field(&signal1);
|
||||
let x1 = hash_to_field_le(&signal1);
|
||||
|
||||
let signal2: [u8; 32] = rng.gen();
|
||||
let x2 = hash_to_field(&signal2);
|
||||
let x2 = hash_to_field_le(&signal2);
|
||||
|
||||
let identity_index = tree.leaves_set();
|
||||
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
|
||||
|
||||
let rln_witness1 = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
identity_secret_hash.clone(),
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x1,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1143,8 +1191,9 @@ mod stateless_test {
|
||||
.unwrap();
|
||||
|
||||
let rln_witness2 = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
identity_secret_hash.clone(),
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x2,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1183,7 +1232,7 @@ mod stateless_test {
|
||||
|
||||
// We check if the recovered identity secret hash corresponds to the original one
|
||||
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, *identity_secret_hash);
|
||||
|
||||
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
|
||||
|
||||
@@ -1193,14 +1242,15 @@ mod stateless_test {
|
||||
tree.update_next(rate_commitment_new).unwrap();
|
||||
|
||||
let signal3: [u8; 32] = rng.gen();
|
||||
let x3 = hash_to_field(&signal3);
|
||||
let x3 = hash_to_field_le(&signal3);
|
||||
|
||||
let identity_index_new = tree.leaves_set();
|
||||
let merkle_proof_new = tree.proof(identity_index_new).expect("proof should exist");
|
||||
|
||||
let rln_witness3 = rln_witness_from_values(
|
||||
identity_secret_hash_new,
|
||||
&merkle_proof_new,
|
||||
identity_secret_hash_new.clone(),
|
||||
merkle_proof_new.get_path_elements(),
|
||||
merkle_proof_new.get_path_index(),
|
||||
x3,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1227,7 +1277,7 @@ mod stateless_test {
|
||||
|
||||
let serialized_identity_secret_hash = output_buffer.into_inner();
|
||||
let (recovered_identity_secret_hash_new, _) =
|
||||
bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
IdSecret::from_bytes_le(&serialized_identity_secret_hash);
|
||||
|
||||
// ensure that the recovered secret does not match with either of the
|
||||
// used secrets in proof generation
|
||||
|
||||
316
rln/src/utils.rs
316
rln/src/utils.rs
@@ -1,29 +1,34 @@
|
||||
// This crate provides cross-module useful utilities (mainly type conversions) not necessarily specific to RLN
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use crate::error::ConversionError;
|
||||
use ark_ff::PrimeField;
|
||||
use color_eyre::{Report, Result};
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
use ark_std::UniformRand;
|
||||
use num_bigint::{BigInt, BigUint};
|
||||
use num_traits::Num;
|
||||
use rand::Rng;
|
||||
use ruint::aliases::U256;
|
||||
use serde_json::json;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::circuit::Fr;
|
||||
use std::ops::Deref;
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
|
||||
#[inline(always)]
|
||||
pub fn to_bigint(el: &Fr) -> Result<BigInt> {
|
||||
Ok(BigUint::from(*el).into())
|
||||
pub fn to_bigint(el: &Fr) -> BigInt {
|
||||
BigUint::from(*el).into()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn fr_byte_size() -> usize {
|
||||
pub const fn fr_byte_size() -> usize {
|
||||
let mbs = <Fr as PrimeField>::MODULUS_BIT_SIZE;
|
||||
((mbs + 64 - (mbs % 64)) / 8) as usize
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr> {
|
||||
pub fn str_to_fr(input: &str, radix: u32) -> Result<Fr, ConversionError> {
|
||||
if !(radix == 10 || radix == 16) {
|
||||
return Err(Report::msg("wrong radix"));
|
||||
return Err(ConversionError::WrongRadix);
|
||||
}
|
||||
|
||||
// We remove any quote present and we trim
|
||||
@@ -48,6 +53,15 @@ pub fn bytes_le_to_fr(input: &[u8]) -> (Fr, usize) {
|
||||
)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_be_to_fr(input: &[u8]) -> (Fr, usize) {
|
||||
let el_size = fr_byte_size();
|
||||
(
|
||||
Fr::from(BigUint::from_bytes_be(&input[0..el_size])),
|
||||
el_size,
|
||||
)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
|
||||
let input_biguint: BigUint = (*input).into();
|
||||
@@ -58,96 +72,350 @@ pub fn fr_to_bytes_le(input: &Fr) -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Result<Vec<u8>> {
|
||||
pub fn fr_to_bytes_be(input: &Fr) -> Vec<u8> {
|
||||
let input_biguint: BigUint = (*input).into();
|
||||
let mut res = input_biguint.to_bytes_be();
|
||||
// For BE, insert 0 at the start of the Vec (see also fr_to_bytes_le comments)
|
||||
let to_insert_count = fr_byte_size().saturating_sub(res.len());
|
||||
if to_insert_count > 0 {
|
||||
// Insert multi 0 at index 0
|
||||
res.splice(0..0, std::iter::repeat_n(0, to_insert_count));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vec_fr_to_bytes_le(input: &[Fr]) -> Vec<u8> {
|
||||
// Calculate capacity for Vec:
|
||||
// - 8 bytes for normalized vector length (usize)
|
||||
// - each Fr element requires fr_byte_size() bytes (typically 32 bytes)
|
||||
let mut bytes = Vec::with_capacity(8 + input.len() * fr_byte_size());
|
||||
|
||||
// We store the vector length
|
||||
bytes.extend_from_slice(&normalize_usize(input.len()));
|
||||
bytes.extend_from_slice(&normalize_usize_le(input.len()));
|
||||
|
||||
// We store each element
|
||||
for el in input {
|
||||
bytes.extend_from_slice(&fr_to_bytes_le(el));
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Result<Vec<u8>> {
|
||||
pub fn vec_fr_to_bytes_be(input: &[Fr]) -> Vec<u8> {
|
||||
// Calculate capacity for Vec:
|
||||
// - 8 bytes for normalized vector length (usize)
|
||||
// - each Fr element requires fr_byte_size() bytes (typically 32 bytes)
|
||||
let mut bytes = Vec::with_capacity(8 + input.len() * fr_byte_size());
|
||||
|
||||
// We store the vector length
|
||||
bytes.extend_from_slice(&normalize_usize_be(input.len()));
|
||||
|
||||
// We store each element
|
||||
for el in input {
|
||||
bytes.extend_from_slice(&fr_to_bytes_be(el));
|
||||
}
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vec_u8_to_bytes_le(input: &[u8]) -> Vec<u8> {
|
||||
// Calculate capacity for Vec:
|
||||
// - 8 bytes for normalized vector length (usize)
|
||||
// - variable length input data
|
||||
let mut bytes = Vec::with_capacity(8 + input.len());
|
||||
|
||||
// We store the vector length
|
||||
bytes.extend_from_slice(&normalize_usize(input.len()));
|
||||
bytes.extend_from_slice(&normalize_usize_le(input.len()));
|
||||
|
||||
// We store the input
|
||||
bytes.extend_from_slice(input);
|
||||
|
||||
Ok(bytes)
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize)> {
|
||||
let mut read: usize = 0;
|
||||
pub fn vec_u8_to_bytes_be(input: &[u8]) -> Vec<u8> {
|
||||
// Calculate capacity for Vec:
|
||||
// - 8 bytes for normalized vector length (usize)
|
||||
// - variable length input data
|
||||
let mut bytes = Vec::with_capacity(8 + input.len());
|
||||
|
||||
// We store the vector length
|
||||
bytes.extend_from_slice(&normalize_usize_be(input.len()));
|
||||
|
||||
// We store the input
|
||||
bytes.extend_from_slice(input);
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
|
||||
let mut read: usize = 0;
|
||||
if input.len() < 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
|
||||
if input.len() < 8 + len {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8 + len,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let res = input[8..8 + len].to_vec();
|
||||
read += res.len();
|
||||
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize)> {
|
||||
pub fn bytes_be_to_vec_u8(input: &[u8]) -> Result<(Vec<u8>, usize), ConversionError> {
|
||||
let mut read: usize = 0;
|
||||
let mut res: Vec<Fr> = Vec::new();
|
||||
if input.len() < 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
if input.len() < 8 + len {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8 + len,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let res = input[8..8 + len].to_vec();
|
||||
read += res.len();
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
|
||||
let mut read: usize = 0;
|
||||
if input.len() < 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let len = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
|
||||
let el_size = fr_byte_size();
|
||||
if input.len() < 8 + len * el_size {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8 + len * el_size,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let mut res: Vec<Fr> = Vec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
let (curr_el, _) = bytes_le_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
|
||||
res.push(curr_el);
|
||||
read += el_size;
|
||||
}
|
||||
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>> {
|
||||
pub fn bytes_be_to_vec_fr(input: &[u8]) -> Result<(Vec<Fr>, usize), ConversionError> {
|
||||
let mut read: usize = 0;
|
||||
if input.len() < 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let len = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
|
||||
read += 8;
|
||||
let el_size = fr_byte_size();
|
||||
if input.len() < 8 + len * el_size {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8 + len * el_size,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let mut res: Vec<Fr> = Vec::with_capacity(len);
|
||||
for i in 0..len {
|
||||
let (curr_el, _) = bytes_be_to_fr(&input[8 + el_size * i..8 + el_size * (i + 1)]);
|
||||
res.push(curr_el);
|
||||
read += el_size;
|
||||
}
|
||||
Ok((res, read))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_le_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
|
||||
if input.len() < 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let nof_elem = usize::try_from(u64::from_le_bytes(input[0..8].try_into()?))?;
|
||||
if nof_elem == 0 {
|
||||
Ok(vec![])
|
||||
} else {
|
||||
if input.len() < 8 + nof_elem * 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8 + nof_elem * 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let elements: Vec<usize> = input[8..]
|
||||
.chunks(8)
|
||||
.take(nof_elem)
|
||||
.map(|ch| usize::from_le_bytes(ch[0..8].try_into().unwrap()))
|
||||
.collect();
|
||||
Ok(elements)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bytes_be_to_vec_usize(input: &[u8]) -> Result<Vec<usize>, ConversionError> {
|
||||
if input.len() < 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let nof_elem = usize::try_from(u64::from_be_bytes(input[0..8].try_into()?))?;
|
||||
if nof_elem == 0 {
|
||||
Ok(vec![])
|
||||
} else {
|
||||
if input.len() < 8 + nof_elem * 8 {
|
||||
return Err(ConversionError::InsufficientData {
|
||||
expected: 8 + nof_elem * 8,
|
||||
actual: input.len(),
|
||||
});
|
||||
}
|
||||
let elements: Vec<usize> = input[8..]
|
||||
.chunks(8)
|
||||
.take(nof_elem)
|
||||
.map(|ch| usize::from_be_bytes(ch[0..8].try_into().unwrap()))
|
||||
.collect();
|
||||
Ok(elements)
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
|
||||
/// On 32-bit systems, the result is zero-padded to 8 bytes.
|
||||
/// On 64-bit systems, it directly represents the `usize` value.
|
||||
#[inline(always)]
|
||||
pub fn normalize_usize(input: usize) -> [u8; 8] {
|
||||
pub fn normalize_usize_le(input: usize) -> [u8; 8] {
|
||||
let mut bytes = [0u8; 8];
|
||||
let input_bytes = input.to_le_bytes();
|
||||
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
|
||||
bytes
|
||||
}
|
||||
|
||||
/// Normalizes a `usize` into an 8-byte array, ensuring consistency across architectures.
|
||||
/// On 32-bit systems, the result is zero-padded to 8 bytes.
|
||||
/// On 64-bit systems, it directly represents the `usize` value.
|
||||
#[inline(always)]
|
||||
pub fn normalize_usize_be(input: usize) -> [u8; 8] {
|
||||
let mut bytes = [0u8; 8];
|
||||
let input_bytes = input.to_be_bytes();
|
||||
bytes[..input_bytes.len()].copy_from_slice(&input_bytes);
|
||||
bytes
|
||||
}
|
||||
|
||||
#[inline(always)] // using for test
|
||||
pub fn generate_input_buffer() -> Cursor<String> {
|
||||
Cursor::new(json!({}).to_string())
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Debug, Zeroize, ZeroizeOnDrop, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize,
|
||||
)]
|
||||
pub struct IdSecret(ark_bn254::Fr);
|
||||
|
||||
impl IdSecret {
|
||||
pub fn rand<R: Rng + ?Sized>(rng: &mut R) -> Self {
|
||||
let mut fr = Fr::rand(rng);
|
||||
let res = Self::from(&mut fr);
|
||||
// No need to zeroize fr (already zeroiz'ed in from implementation)
|
||||
#[allow(clippy::let_and_return)]
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes_le(input: &[u8]) -> (Self, usize) {
|
||||
let el_size = fr_byte_size();
|
||||
let b_uint = BigUint::from_bytes_le(&input[0..el_size]);
|
||||
let mut fr = Fr::from(b_uint);
|
||||
let res = IdSecret::from(&mut fr);
|
||||
// Note: no zeroize on b_uint as it has been moved
|
||||
(res, el_size)
|
||||
}
|
||||
|
||||
pub(crate) fn to_bytes_le(&self) -> Zeroizing<Vec<u8>> {
|
||||
let input_biguint: BigUint = self.0.into();
|
||||
let mut res = input_biguint.to_bytes_le();
|
||||
res.resize(fr_byte_size(), 0);
|
||||
Zeroizing::new(res)
|
||||
}
|
||||
|
||||
pub(crate) fn to_bytes_be(&self) -> Zeroizing<Vec<u8>> {
|
||||
let input_biguint: BigUint = self.0.into();
|
||||
let mut res = input_biguint.to_bytes_be();
|
||||
let to_insert_count = fr_byte_size().saturating_sub(res.len());
|
||||
if to_insert_count > 0 {
|
||||
// Insert multi 0 at index 0
|
||||
res.splice(0..0, std::iter::repeat_n(0, to_insert_count));
|
||||
}
|
||||
Zeroizing::new(res)
|
||||
}
|
||||
|
||||
/// Warning: this can leak the secret value
|
||||
/// Warning: Leaked value is of type 'U256' which implement Copy (every copy will not be zeroized)
|
||||
pub(crate) fn to_u256(&self) -> U256 {
|
||||
let mut big_int = self.0.into_bigint();
|
||||
let res = U256::from_limbs(big_int.0);
|
||||
big_int.zeroize();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&mut Fr> for IdSecret {
|
||||
fn from(value: &mut Fr) -> Self {
|
||||
let id_secret = Self(*value);
|
||||
value.zeroize();
|
||||
id_secret
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for IdSecret {
|
||||
type Target = Fr;
|
||||
|
||||
/// Deref to &Fr
|
||||
///
|
||||
/// Warning: this can leak the secret value
|
||||
/// Warning: Leaked value is of type 'Fr' which implement Copy (every copy will not be zeroized)
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub enum FrOrSecret {
|
||||
IdSecret(IdSecret),
|
||||
Fr(Fr),
|
||||
}
|
||||
|
||||
impl From<Fr> for FrOrSecret {
|
||||
fn from(value: Fr) -> Self {
|
||||
FrOrSecret::Fr(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IdSecret> for FrOrSecret {
|
||||
fn from(value: IdSecret) -> Self {
|
||||
FrOrSecret::IdSecret(value)
|
||||
}
|
||||
}
|
||||
|
||||
499
rln/tests/ffi.rs
499
rln/tests/ffi.rs
@@ -3,10 +3,10 @@
|
||||
mod test {
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
use rln::circuit::*;
|
||||
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
|
||||
use rln::protocol::*;
|
||||
use rln::circuit::{Fr, TEST_TREE_DEPTH};
|
||||
use rln::ffi::*;
|
||||
use rln::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash};
|
||||
use rln::protocol::{deserialize_identity_tuple_le, *};
|
||||
use rln::public::RLN;
|
||||
use rln::utils::*;
|
||||
use serde_json::json;
|
||||
@@ -14,6 +14,7 @@ mod test {
|
||||
use std::io::Read;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::time::{Duration, Instant};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
const NO_OF_LEAVES: usize = 256;
|
||||
|
||||
@@ -21,13 +22,13 @@ mod test {
|
||||
let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit();
|
||||
let input_config = json!({}).to_string();
|
||||
let input_buffer = &Buffer::from(input_config.as_bytes());
|
||||
let success = new(TEST_TREE_HEIGHT, input_buffer, rln_pointer.as_mut_ptr());
|
||||
let success = new(TEST_TREE_DEPTH, input_buffer, rln_pointer.as_mut_ptr());
|
||||
assert!(success, "RLN object creation failed");
|
||||
unsafe { &mut *rln_pointer.assume_init() }
|
||||
}
|
||||
|
||||
fn set_leaves_init(rln_pointer: &mut RLN, leaves: &[Fr]) {
|
||||
let leaves_ser = vec_fr_to_bytes_le(&leaves).unwrap();
|
||||
let leaves_ser = vec_fr_to_bytes_le(leaves);
|
||||
let input_buffer = &Buffer::from(leaves_ser.as_ref());
|
||||
let success = init_tree_with_leaves(rln_pointer, input_buffer);
|
||||
assert!(success, "init tree with leaves call failed");
|
||||
@@ -49,14 +50,14 @@ mod test {
|
||||
root
|
||||
}
|
||||
|
||||
fn identity_pair_gen(rln_pointer: &mut RLN) -> (Fr, Fr) {
|
||||
fn identity_pair_gen() -> (IdSecret, Fr) {
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
|
||||
let success = key_gen(output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
|
||||
let (identity_secret_hash, read) = IdSecret::from_bytes_le(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
@@ -80,7 +81,7 @@ mod test {
|
||||
// We first add leaves one by one specifying the index
|
||||
for (i, leaf) in leaves.iter().enumerate() {
|
||||
// We prepare the rate_commitment and we set the leaf at provided index
|
||||
let leaf_ser = fr_to_bytes_le(&leaf);
|
||||
let leaf_ser = fr_to_bytes_le(leaf);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_leaf(rln_pointer, i, input_buffer);
|
||||
assert!(success, "set leaf call failed");
|
||||
@@ -90,12 +91,12 @@ mod test {
|
||||
let root_single = get_tree_root(rln_pointer);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let leaf_ser = fr_to_bytes_le(&leaf);
|
||||
let leaf_ser = fr_to_bytes_le(leaf);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
@@ -108,7 +109,7 @@ mod test {
|
||||
assert_eq!(root_single, root_next);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
@@ -131,7 +132,7 @@ mod test {
|
||||
let root_delete = get_tree_root(rln_pointer);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We get the root of the empty tree
|
||||
@@ -156,7 +157,7 @@ mod test {
|
||||
// random number between 0..no_of_leaves
|
||||
let mut rng = thread_rng();
|
||||
let set_index = rng.gen_range(0..NO_OF_LEAVES) as usize;
|
||||
println!("set_index: {}", set_index);
|
||||
println!("set_index: {set_index}");
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
@@ -164,13 +165,13 @@ mod test {
|
||||
// We get the root of the tree obtained adding leaves in batch
|
||||
let root_batch_with_init = get_tree_root(rln_pointer);
|
||||
|
||||
// `init_tree_with_leaves` resets the tree to the height it was initialized with, using `set_tree`
|
||||
// `init_tree_with_leaves` resets the tree to the depth it was initialized with, using `set_tree`
|
||||
|
||||
// We add leaves in a batch starting from index 0..set_index
|
||||
set_leaves_init(rln_pointer, &leaves[0..set_index]);
|
||||
|
||||
// We add the remaining n leaves in a batch starting from index set_index
|
||||
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]).unwrap();
|
||||
let leaves_n = vec_fr_to_bytes_le(&leaves[set_index..]);
|
||||
let buffer = &Buffer::from(leaves_n.as_ref());
|
||||
let success = set_leaves_from(rln_pointer, set_index, buffer);
|
||||
assert!(success, "set leaves from call failed");
|
||||
@@ -183,12 +184,12 @@ mod test {
|
||||
);
|
||||
|
||||
// We reset the tree to default
|
||||
let success = set_tree(rln_pointer, TEST_TREE_HEIGHT);
|
||||
let success = set_tree(rln_pointer, TEST_TREE_DEPTH);
|
||||
assert!(success, "set tree call failed");
|
||||
|
||||
// We add leaves one by one using the internal index (new leaves goes in next available position)
|
||||
for leaf in &leaves {
|
||||
let leaf_ser = fr_to_bytes_le(&leaf);
|
||||
let leaf_ser = fr_to_bytes_le(leaf);
|
||||
let input_buffer = &Buffer::from(leaf_ser.as_ref());
|
||||
let success = set_next_leaf(rln_pointer, input_buffer);
|
||||
assert!(success, "set next leaf call failed");
|
||||
@@ -220,17 +221,12 @@ mod test {
|
||||
let last_leaf_index = NO_OF_LEAVES - 1;
|
||||
let indices = vec![last_leaf_index as u8];
|
||||
let last_leaf = vec![*last_leaf];
|
||||
let indices = vec_u8_to_bytes_le(&indices).unwrap();
|
||||
let indices = vec_u8_to_bytes_le(&indices);
|
||||
let indices_buffer = &Buffer::from(indices.as_ref());
|
||||
let leaves = vec_fr_to_bytes_le(&last_leaf).unwrap();
|
||||
let leaves = vec_fr_to_bytes_le(&last_leaf);
|
||||
let leaves_buffer = &Buffer::from(leaves.as_ref());
|
||||
|
||||
let success = atomic_operation(
|
||||
rln_pointer,
|
||||
last_leaf_index as usize,
|
||||
leaves_buffer,
|
||||
indices_buffer,
|
||||
);
|
||||
let success = atomic_operation(rln_pointer, last_leaf_index, leaves_buffer, indices_buffer);
|
||||
assert!(success, "atomic operation call failed");
|
||||
|
||||
// We get the root of the tree obtained after a no-op
|
||||
@@ -247,13 +243,13 @@ mod test {
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let bad_index = (1 << TEST_TREE_HEIGHT) - rng.gen_range(0..NO_OF_LEAVES) as usize;
|
||||
let bad_index = (1 << TEST_TREE_DEPTH) - rng.gen_range(0..NO_OF_LEAVES) as usize;
|
||||
|
||||
// Get root of empty tree
|
||||
let root_empty = get_tree_root(rln_pointer);
|
||||
|
||||
// We add leaves in a batch into the tree
|
||||
let leaves = vec_fr_to_bytes_le(&leaves).unwrap();
|
||||
let leaves = vec_fr_to_bytes_le(&leaves);
|
||||
let buffer = &Buffer::from(leaves.as_ref());
|
||||
let success = set_leaves_from(rln_pointer, bad_index, buffer);
|
||||
assert!(!success, "set leaves from call succeeded");
|
||||
@@ -271,8 +267,11 @@ mod test {
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// generate identity
|
||||
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
|
||||
let id_commitment = utils_poseidon_hash(&[identity_secret_hash]);
|
||||
let mut identity_secret_hash_ = hash_to_field_le(b"test-merkle-proof");
|
||||
let identity_secret_hash = IdSecret::from(&mut identity_secret_hash_);
|
||||
let mut to_hash = [*identity_secret_hash.clone()];
|
||||
let id_commitment = utils_poseidon_hash(&to_hash);
|
||||
to_hash[0].zeroize();
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
|
||||
@@ -305,7 +304,7 @@ mod test {
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
|
||||
let (path_elements, read) = bytes_le_to_vec_fr(&result_data).unwrap();
|
||||
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..].to_vec()).unwrap();
|
||||
let (identity_path_index, _) = bytes_le_to_vec_u8(&result_data[read..]).unwrap();
|
||||
|
||||
// We check correct computation of the path and indexes
|
||||
let expected_path_elements: Vec<Fr> = [
|
||||
@@ -365,7 +364,7 @@ mod test {
|
||||
|
||||
for _ in 0..sample_size {
|
||||
// We generate random witness instances and relative proof values
|
||||
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
|
||||
let rln_witness = random_rln_witness(TEST_TREE_DEPTH);
|
||||
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
|
||||
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
@@ -393,7 +392,7 @@ mod test {
|
||||
let success = verify(rln_pointer, input_buffer, proof_is_valid_ptr);
|
||||
verify_time += now.elapsed().as_nanos();
|
||||
assert!(success, "verify call failed");
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
}
|
||||
|
||||
println!(
|
||||
@@ -415,12 +414,9 @@ mod test {
|
||||
// We obtain the root from the RLN instance
|
||||
let root_rln_folder = get_tree_root(rln_pointer);
|
||||
|
||||
#[cfg(feature = "arkzkey")]
|
||||
let zkey_path = "./resources/tree_height_20/rln_final.arkzkey";
|
||||
#[cfg(not(feature = "arkzkey"))]
|
||||
let zkey_path = "./resources/tree_height_20/rln_final.zkey";
|
||||
let mut zkey_file = File::open(&zkey_path).expect("no file found");
|
||||
let metadata = std::fs::metadata(&zkey_path).expect("unable to read metadata");
|
||||
let zkey_path = "./resources/tree_depth_20/rln_final.arkzkey";
|
||||
let mut zkey_file = File::open(zkey_path).expect("no file found");
|
||||
let metadata = std::fs::metadata(zkey_path).expect("unable to read metadata");
|
||||
let mut zkey_buffer = vec![0; metadata.len() as usize];
|
||||
zkey_file
|
||||
.read_exact(&mut zkey_buffer)
|
||||
@@ -428,9 +424,9 @@ mod test {
|
||||
|
||||
let zkey_data = &Buffer::from(&zkey_buffer[..]);
|
||||
|
||||
let graph_data = "./resources/tree_height_20/graph.bin";
|
||||
let mut graph_file = File::open(&graph_data).expect("no file found");
|
||||
let metadata = std::fs::metadata(&graph_data).expect("unable to read metadata");
|
||||
let graph_data = "./resources/tree_depth_20/graph.bin";
|
||||
let mut graph_file = File::open(graph_data).expect("no file found");
|
||||
let metadata = std::fs::metadata(graph_data).expect("unable to read metadata");
|
||||
let mut graph_buffer = vec![0; metadata.len() as usize];
|
||||
graph_file
|
||||
.read_exact(&mut graph_buffer)
|
||||
@@ -443,7 +439,7 @@ mod test {
|
||||
let tree_config = "".to_string();
|
||||
let tree_config_buffer = &Buffer::from(tree_config.as_bytes());
|
||||
let success = new_with_params(
|
||||
TEST_TREE_HEIGHT,
|
||||
TEST_TREE_DEPTH,
|
||||
zkey_data,
|
||||
graph_data,
|
||||
tree_config_buffer,
|
||||
@@ -476,7 +472,7 @@ mod test {
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen();
|
||||
let identity_index: usize = NO_OF_LEAVES;
|
||||
|
||||
// We generate a random signal
|
||||
@@ -484,9 +480,9 @@ mod test {
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -525,7 +521,7 @@ mod test {
|
||||
let proof_is_valid_ptr = &mut proof_is_valid as *mut bool;
|
||||
let success = verify_rln_proof(rln_pointer, input_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -543,7 +539,7 @@ mod test {
|
||||
set_leaves_init(rln_pointer, &leaves);
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen();
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
let identity_index: usize = NO_OF_LEAVES;
|
||||
|
||||
@@ -552,9 +548,9 @@ mod test {
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -600,7 +596,7 @@ mod test {
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be valid
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
|
||||
// We then try to verify against some random values not containing the correct one.
|
||||
for _ in 0..5 {
|
||||
@@ -614,7 +610,7 @@ mod test {
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be invalid.
|
||||
assert_eq!(proof_is_valid, false);
|
||||
assert!(!proof_is_valid);
|
||||
|
||||
// We finally include the correct root
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
@@ -630,7 +626,7 @@ mod test {
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be valid.
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -640,7 +636,7 @@ mod test {
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen();
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
@@ -663,9 +659,9 @@ mod test {
|
||||
let signal2: [u8; 32] = rng.gen();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
// We generate a random rln_identifier
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
// We generate a external nullifier
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
// We choose a message_id satisfy 0 <= message_id < MESSAGE_LIMIT
|
||||
@@ -674,7 +670,7 @@ mod test {
|
||||
// We prepare input for generate_rln_proof API
|
||||
// input_data is [ identity_secret<32> | id_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal<var> ]
|
||||
let prove_input1 = prepare_prove_input(
|
||||
identity_secret_hash,
|
||||
identity_secret_hash.clone(),
|
||||
identity_index,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
@@ -683,7 +679,7 @@ mod test {
|
||||
);
|
||||
|
||||
let prove_input2 = prepare_prove_input(
|
||||
identity_secret_hash,
|
||||
identity_secret_hash.clone(),
|
||||
identity_index,
|
||||
user_message_limit,
|
||||
message_id,
|
||||
@@ -718,12 +714,12 @@ mod test {
|
||||
|
||||
// We check if the recovered identity secret hash corresponds to the original one
|
||||
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, *identity_secret_hash);
|
||||
|
||||
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen();
|
||||
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
|
||||
|
||||
// We set as leaf id_commitment, its index would be equal to 1 since at 0 there is id_commitment
|
||||
@@ -772,146 +768,16 @@ mod test {
|
||||
|
||||
// ensure that the recovered secret does not match with either of the
|
||||
// used secrets in proof generation
|
||||
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_seeded_keygen_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
assert_ne!(
|
||||
recovered_identity_secret_hash_new,
|
||||
*identity_secret_hash_new
|
||||
);
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
|
||||
16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_seeded_extended_keygen_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity tuple from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success =
|
||||
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple(result_data);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
);
|
||||
let expected_identity_nullifier_seed_bytes = str_to_fr(
|
||||
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
|
||||
16,
|
||||
);
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
|
||||
16,
|
||||
);
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
|
||||
16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
identity_trapdoor,
|
||||
expected_identity_trapdoor_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
identity_nullifier,
|
||||
expected_identity_nullifier_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_hash_to_field_ffi() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
let input_buffer = &Buffer::from(signal.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "hash call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
|
||||
// We read the returned proof and we append proof values for verify
|
||||
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Test Poseidon hash FFI
|
||||
fn test_poseidon_hash_ffi() {
|
||||
// generate random number between 1..ROUND_PARAMS.len()
|
||||
let mut rng = thread_rng();
|
||||
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
|
||||
let mut inputs = Vec::with_capacity(number_of_inputs);
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
|
||||
let input_buffer = &Buffer::from(inputs_ser.as_ref());
|
||||
|
||||
let expected_hash = utils_poseidon_hash(inputs.as_ref());
|
||||
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
|
||||
assert!(success, "poseidon hash call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (received_hash, _) = bytes_le_to_fr(&result_data);
|
||||
|
||||
assert_eq!(received_hash, expected_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_leaf_ffi() {
|
||||
// We create a RLN instance
|
||||
let no_of_leaves = 1 << TEST_TREE_HEIGHT;
|
||||
let no_of_leaves = 1 << TEST_TREE_DEPTH;
|
||||
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
@@ -920,13 +786,12 @@ mod test {
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success =
|
||||
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "seeded key gen call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (_, _, _, id_commitment) = deserialize_identity_tuple(result_data);
|
||||
let (_, _, _, id_commitment) = deserialize_identity_tuple_le(result_data);
|
||||
|
||||
// We insert the id_commitment into the tree at a random index
|
||||
let mut rng = thread_rng();
|
||||
@@ -990,15 +855,14 @@ mod stateless_test {
|
||||
use rand::Rng;
|
||||
use rln::circuit::*;
|
||||
use rln::ffi::generate_rln_proof_with_witness;
|
||||
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
|
||||
use rln::poseidon_tree::PoseidonTree;
|
||||
use rln::ffi::*;
|
||||
use rln::hashers::{hash_to_field_le, poseidon_hash as utils_poseidon_hash, PoseidonHash};
|
||||
use rln::protocol::*;
|
||||
use rln::public::RLN;
|
||||
use rln::utils::*;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::time::{Duration, Instant};
|
||||
use utils::ZerokitMerkleTree;
|
||||
use utils::{OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
type ConfigOf<T> = <T as ZerokitMerkleTree>::Config;
|
||||
|
||||
@@ -1009,14 +873,14 @@ mod stateless_test {
|
||||
unsafe { &mut *rln_pointer.assume_init() }
|
||||
}
|
||||
|
||||
fn identity_pair_gen(rln_pointer: &mut RLN) -> (Fr, Fr) {
|
||||
fn identity_pair_gen() -> (IdSecret, Fr) {
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = key_gen(rln_pointer, output_buffer.as_mut_ptr());
|
||||
let success = key_gen(output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
|
||||
let (identity_secret_hash, read) = IdSecret::from_bytes_le(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
|
||||
(identity_secret_hash, id_commitment)
|
||||
}
|
||||
|
||||
@@ -1033,43 +897,44 @@ mod stateless_test {
|
||||
#[test]
|
||||
fn test_recover_id_secret_stateless_ffi() {
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen();
|
||||
|
||||
let user_message_limit = Fr::from(100);
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit]);
|
||||
tree.update_next(rate_commitment).unwrap();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We generate two proofs using same epoch but different signals.
|
||||
// We generate a random signal
|
||||
let mut rng = thread_rng();
|
||||
let signal1: [u8; 32] = rng.gen();
|
||||
let x1 = hash_to_field(&signal1);
|
||||
let x1 = hash_to_field_le(&signal1);
|
||||
|
||||
let signal2: [u8; 32] = rng.gen();
|
||||
let x2 = hash_to_field(&signal2);
|
||||
let x2 = hash_to_field_le(&signal2);
|
||||
|
||||
let identity_index = tree.leaves_set();
|
||||
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
let rln_witness1 = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
identity_secret_hash.clone(),
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x1,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1079,8 +944,9 @@ mod stateless_test {
|
||||
let serialized1 = serialize_witness(&rln_witness1).unwrap();
|
||||
|
||||
let rln_witness2 = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
identity_secret_hash.clone(),
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x2,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1115,26 +981,28 @@ mod stateless_test {
|
||||
assert!(!serialized_identity_secret_hash.is_empty());
|
||||
|
||||
// We check if the recovered identity secret hash corresponds to the original one
|
||||
let (recovered_identity_secret_hash, _) = bytes_le_to_fr(&serialized_identity_secret_hash);
|
||||
let (recovered_identity_secret_hash, _) =
|
||||
IdSecret::from_bytes_le(&serialized_identity_secret_hash);
|
||||
assert_eq!(recovered_identity_secret_hash, identity_secret_hash);
|
||||
|
||||
// We now test that computing identity_secret_hash is unsuccessful if shares computed from two different identity secret hashes but within same epoch are passed
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash_new, id_commitment_new) = identity_pair_gen();
|
||||
let rate_commitment_new = utils_poseidon_hash(&[id_commitment_new, user_message_limit]);
|
||||
tree.update_next(rate_commitment_new).unwrap();
|
||||
|
||||
// We generate a random signals
|
||||
let signal3: [u8; 32] = rng.gen();
|
||||
let x3 = hash_to_field(&signal3);
|
||||
let x3 = hash_to_field_le(&signal3);
|
||||
|
||||
let identity_index_new = tree.leaves_set();
|
||||
let merkle_proof_new = tree.proof(identity_index_new).expect("proof should exist");
|
||||
|
||||
let rln_witness3 = rln_witness_from_values(
|
||||
identity_secret_hash_new,
|
||||
&merkle_proof_new,
|
||||
identity_secret_hash_new.clone(),
|
||||
merkle_proof_new.get_path_elements(),
|
||||
merkle_proof_new.get_path_index(),
|
||||
x3,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1166,23 +1034,26 @@ mod stateless_test {
|
||||
|
||||
// ensure that the recovered secret does not match with either of the
|
||||
// used secrets in proof generation
|
||||
assert_ne!(recovered_identity_secret_hash_new, identity_secret_hash_new);
|
||||
assert_ne!(
|
||||
recovered_identity_secret_hash_new,
|
||||
*identity_secret_hash_new
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_with_roots_stateless_ffi() {
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
let mut tree: OptimalMerkleTree<PoseidonHash> = OptimalMerkleTree::new(
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
ConfigOf::<OptimalMerkleTree<PoseidonHash>>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen(rln_pointer);
|
||||
let (identity_secret_hash, id_commitment) = identity_pair_gen();
|
||||
|
||||
let identity_index = tree.leaves_set();
|
||||
let user_message_limit = Fr::from(100);
|
||||
@@ -1190,22 +1061,23 @@ mod stateless_test {
|
||||
tree.update_next(rate_commitment).unwrap();
|
||||
|
||||
// We generate a random epoch
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = utils_poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
// We generate two proofs using same epoch but different signals.
|
||||
// We generate a random signal
|
||||
let mut rng = thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
let x = hash_to_field(&signal);
|
||||
let x = hash_to_field_le(&signal);
|
||||
|
||||
let merkle_proof = tree.proof(identity_index).expect("proof should exist");
|
||||
|
||||
// We prepare input for generate_rln_proof API
|
||||
let rln_witness = rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -1229,7 +1101,7 @@ mod stateless_test {
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be valid
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
|
||||
// We serialize in the roots buffer some random values and we check that the proof is not verified since doesn't contain the correct root the proof refers to
|
||||
for _ in 0..5 {
|
||||
@@ -1243,7 +1115,7 @@ mod stateless_test {
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be invalid.
|
||||
assert_eq!(proof_is_valid, false);
|
||||
assert!(!proof_is_valid);
|
||||
|
||||
// We get the root of the tree obtained adding one leaf per time
|
||||
let root = tree.root();
|
||||
@@ -1258,7 +1130,7 @@ mod stateless_test {
|
||||
verify_with_roots(rln_pointer, input_buffer, roots_buffer, proof_is_valid_ptr);
|
||||
assert!(success, "verify call failed");
|
||||
// Proof should be valid.
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1275,7 +1147,7 @@ mod stateless_test {
|
||||
|
||||
for _ in 0..sample_size {
|
||||
// We generate random witness instances and relative proof values
|
||||
let rln_witness = random_rln_witness(TEST_TREE_HEIGHT);
|
||||
let rln_witness = random_rln_witness(TEST_TREE_DEPTH);
|
||||
let proof_values = proof_values_from_witness(&rln_witness).unwrap();
|
||||
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
@@ -1303,7 +1175,7 @@ mod stateless_test {
|
||||
let success = verify(rln_pointer, input_buffer, proof_is_valid_ptr);
|
||||
verify_time += now.elapsed().as_nanos();
|
||||
assert!(success, "verify call failed");
|
||||
assert_eq!(proof_is_valid, true);
|
||||
assert!(proof_is_valid);
|
||||
}
|
||||
|
||||
println!(
|
||||
@@ -1315,23 +1187,34 @@ mod stateless_test {
|
||||
Duration::from_nanos((verify_time / sample_size).try_into().unwrap())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod general_tests {
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
use rln::circuit::*;
|
||||
use rln::ffi::{hash as ffi_hash, poseidon_hash as ffi_poseidon_hash, *};
|
||||
use rln::hashers::{
|
||||
hash_to_field_be, hash_to_field_le, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS,
|
||||
};
|
||||
use rln::protocol::*;
|
||||
use rln::utils::*;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_seeded_keygen_stateless_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity pair from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = seeded_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
let success = seeded_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..].to_vec());
|
||||
let (id_commitment, _) = bytes_le_to_fr(&result_data[read..]);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
@@ -1350,23 +1233,47 @@ mod stateless_test {
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seeded_keygen_big_endian_ffi() {
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = seeded_key_gen(input_buffer, output_buffer.as_mut_ptr(), false);
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_secret_hash, read) = bytes_be_to_fr(&result_data);
|
||||
let (id_commitment, _) = bytes_be_to_fr(&result_data[read..]);
|
||||
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
);
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
|
||||
16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_seeded_extended_keygen_stateless_ffi() {
|
||||
// We create a RLN instance
|
||||
let rln_pointer = create_rln_instance();
|
||||
|
||||
// We generate a new identity tuple from an input seed
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success =
|
||||
seeded_extended_key_gen(rln_pointer, input_buffer, output_buffer.as_mut_ptr());
|
||||
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple(result_data);
|
||||
deserialize_identity_tuple_le(result_data);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
@@ -1401,6 +1308,50 @@ mod stateless_test {
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seeded_extended_keygen_big_endian_ffi() {
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
let input_buffer = &Buffer::from(seed_bytes);
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = seeded_extended_key_gen(input_buffer, output_buffer.as_mut_ptr(), false);
|
||||
assert!(success, "seeded key gen call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple_be(result_data);
|
||||
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
);
|
||||
let expected_identity_nullifier_seed_bytes = str_to_fr(
|
||||
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
|
||||
16,
|
||||
);
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
|
||||
16,
|
||||
);
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
|
||||
16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
identity_trapdoor,
|
||||
expected_identity_trapdoor_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
identity_nullifier,
|
||||
expected_identity_nullifier_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes.unwrap()
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests hash to field using FFI APIs
|
||||
fn test_hash_to_field_stateless_ffi() {
|
||||
@@ -1410,7 +1361,7 @@ mod stateless_test {
|
||||
// We prepare id_commitment and we set the leaf at provided index
|
||||
let input_buffer = &Buffer::from(signal.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr());
|
||||
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "hash call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
|
||||
@@ -1418,7 +1369,25 @@ mod stateless_test {
|
||||
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field(&signal);
|
||||
let hash2 = hash_to_field_le(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_to_field_big_endian_ffi() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
let input_buffer = &Buffer::from(signal.as_ref());
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_hash(input_buffer, output_buffer.as_mut_ptr(), false);
|
||||
assert!(success, "hash call failed");
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let serialized_hash = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (hash1, _) = bytes_be_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field_be(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
@@ -1433,13 +1402,13 @@ mod stateless_test {
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs).unwrap();
|
||||
let inputs_ser = vec_fr_to_bytes_le(&inputs);
|
||||
let input_buffer = &Buffer::from(inputs_ser.as_ref());
|
||||
|
||||
let expected_hash = utils_poseidon_hash(inputs.as_ref());
|
||||
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr());
|
||||
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr(), true);
|
||||
assert!(success, "poseidon hash call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
@@ -1448,4 +1417,28 @@ mod stateless_test {
|
||||
|
||||
assert_eq!(received_hash, expected_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poseidon_hash_big_endian_ffi() {
|
||||
let mut rng = thread_rng();
|
||||
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
|
||||
let mut inputs = Vec::with_capacity(number_of_inputs);
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let inputs_ser = vec_fr_to_bytes_be(&inputs);
|
||||
let input_buffer = &Buffer::from(inputs_ser.as_ref());
|
||||
|
||||
let expected_hash = utils_poseidon_hash(inputs.as_ref());
|
||||
|
||||
let mut output_buffer = MaybeUninit::<Buffer>::uninit();
|
||||
let success = ffi_poseidon_hash(input_buffer, output_buffer.as_mut_ptr(), false);
|
||||
assert!(success, "poseidon hash call failed");
|
||||
|
||||
let output_buffer = unsafe { output_buffer.assume_init() };
|
||||
let result_data = <&[u8]>::from(&output_buffer).to_vec();
|
||||
let (received_hash, _) = bytes_be_to_fr(&result_data);
|
||||
|
||||
assert_eq!(received_hash, expected_hash);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +1,37 @@
|
||||
////////////////////////////////////////////////////////////
|
||||
/// Tests
|
||||
// Tests
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#![cfg(not(feature = "stateless"))]
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rln::hashers::{poseidon_hash, PoseidonHash};
|
||||
use rln::{circuit::*, poseidon_tree::PoseidonTree};
|
||||
use rln::{
|
||||
circuit::{Fr, TEST_TREE_DEPTH},
|
||||
poseidon_tree::PoseidonTree,
|
||||
};
|
||||
use utils::{FullMerkleTree, OptimalMerkleTree, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
#[test]
|
||||
// The test is checked correctness for `FullMerkleTree` and `OptimalMerkleTree` with Poseidon hash
|
||||
// The test checked correctness for `FullMerkleTree` and `OptimalMerkleTree` with Poseidon hash
|
||||
fn test_zerokit_merkle_implementations() {
|
||||
let sample_size = 100;
|
||||
let leaves: Vec<Fr> = (0..sample_size).map(|s| Fr::from(s)).collect();
|
||||
let leaves: Vec<Fr> = (0..sample_size).map(Fr::from).collect();
|
||||
|
||||
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_HEIGHT).unwrap();
|
||||
let mut tree_full = FullMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
|
||||
let mut tree_opt = OptimalMerkleTree::<PoseidonHash>::default(TEST_TREE_DEPTH).unwrap();
|
||||
|
||||
for i in 0..sample_size.try_into().unwrap() {
|
||||
tree_full.set(i, leaves[i]).unwrap();
|
||||
for (i, leave) in leaves
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.take(sample_size.try_into().unwrap())
|
||||
{
|
||||
tree_full.set(i, leave).unwrap();
|
||||
let proof = tree_full.proof(i).expect("index should be set");
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
tree_opt.set(i, leaves[i]).unwrap();
|
||||
tree_opt.set(i, leave).unwrap();
|
||||
assert_eq!(tree_opt.root(), tree_full.root());
|
||||
let proof = tree_opt.proof(i).expect("index should be set");
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
@@ -98,7 +107,7 @@ mod test {
|
||||
|
||||
// check remove_indices_and_set_leaves inside override_range function
|
||||
assert!(tree.get_empty_leaves_indices().is_empty());
|
||||
let leaves_2: Vec<Fr> = (0..2).map(|s| Fr::from(s as i32)).collect();
|
||||
let leaves_2: Vec<Fr> = (0..2).map(Fr::from).collect();
|
||||
tree.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
|
||||
@@ -113,7 +122,7 @@ mod test {
|
||||
.unwrap();
|
||||
assert_eq!(tree.get_empty_leaves_indices(), vec![2, 3]);
|
||||
|
||||
let leaves_4: Vec<Fr> = (0..4).map(|s| Fr::from(s as i32)).collect();
|
||||
let leaves_4: Vec<Fr> = (0..4).map(Fr::from).collect();
|
||||
// check if the indexes for write and delete are the same
|
||||
tree.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
#![cfg(not(feature = "stateless"))]
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use ark_ff::BigInt;
|
||||
use rln::circuit::{graph_from_folder, zkey_from_folder};
|
||||
use rln::circuit::{Fr, TEST_TREE_HEIGHT};
|
||||
use rln::hashers::{hash_to_field, poseidon_hash};
|
||||
use rln::circuit::{Fr, TEST_TREE_DEPTH};
|
||||
use rln::hashers::{hash_to_field_le, poseidon_hash};
|
||||
use rln::poseidon_tree::PoseidonTree;
|
||||
use rln::protocol::*;
|
||||
use rln::protocol::{
|
||||
deserialize_proof_values, deserialize_witness, generate_proof, keygen,
|
||||
proof_values_from_witness, rln_witness_from_json, rln_witness_from_values,
|
||||
rln_witness_to_json, seeded_keygen, serialize_proof_values, serialize_witness,
|
||||
verify_proof, RLNWitnessInput,
|
||||
};
|
||||
use rln::utils::str_to_fr;
|
||||
use utils::{ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
|
||||
@@ -17,19 +24,19 @@ mod test {
|
||||
let leaf_index = 3;
|
||||
|
||||
// generate identity
|
||||
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
|
||||
let identity_secret_hash = hash_to_field_le(b"test-merkle-proof");
|
||||
let id_commitment = poseidon_hash(&[identity_secret_hash]);
|
||||
let rate_commitment = poseidon_hash(&[id_commitment, 100.into()]);
|
||||
|
||||
// generate merkle tree
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
tree.set(leaf_index, rate_commitment.into()).unwrap();
|
||||
tree.set(leaf_index, rate_commitment).unwrap();
|
||||
|
||||
// We check correct computation of the root
|
||||
let root = tree.root();
|
||||
@@ -95,26 +102,27 @@ mod test {
|
||||
//// generate merkle tree
|
||||
let default_leaf = Fr::from(0);
|
||||
let mut tree = PoseidonTree::new(
|
||||
TEST_TREE_HEIGHT,
|
||||
TEST_TREE_DEPTH,
|
||||
default_leaf,
|
||||
ConfigOf::<PoseidonTree>::default(),
|
||||
)
|
||||
.unwrap();
|
||||
tree.set(leaf_index, rate_commitment.into()).unwrap();
|
||||
tree.set(leaf_index, rate_commitment).unwrap();
|
||||
|
||||
let merkle_proof = tree.proof(leaf_index).expect("proof should exist");
|
||||
|
||||
let signal = b"hey hey";
|
||||
let x = hash_to_field(signal);
|
||||
let x = hash_to_field_le(signal);
|
||||
|
||||
// We set the remaining values to random ones
|
||||
let epoch = hash_to_field(b"test-epoch");
|
||||
let rln_identifier = hash_to_field(b"test-rln-identifier");
|
||||
let epoch = hash_to_field_le(b"test-epoch");
|
||||
let rln_identifier = hash_to_field_le(b"test-rln-identifier");
|
||||
let external_nullifier = poseidon_hash(&[epoch, rln_identifier]);
|
||||
|
||||
rln_witness_from_values(
|
||||
identity_secret_hash,
|
||||
&merkle_proof,
|
||||
merkle_proof.get_path_elements(),
|
||||
merkle_proof.get_path_index(),
|
||||
x,
|
||||
external_nullifier,
|
||||
user_message_limit,
|
||||
@@ -137,11 +145,11 @@ mod test {
|
||||
assert_eq!(rln_witness_deser, rln_witness);
|
||||
|
||||
// Let's generate a zkSNARK proof
|
||||
let proof = generate_proof(&proving_key, &rln_witness_deser, &graph_data).unwrap();
|
||||
let proof = generate_proof(proving_key, &rln_witness_deser, graph_data).unwrap();
|
||||
let proof_values = proof_values_from_witness(&rln_witness_deser).unwrap();
|
||||
|
||||
// Let's verify the proof
|
||||
let verified = verify_proof(&verification_key, &proof, &proof_values);
|
||||
let verified = verify_proof(verification_key, &proof, &proof_values);
|
||||
|
||||
assert!(verified.unwrap());
|
||||
}
|
||||
@@ -160,12 +168,12 @@ mod test {
|
||||
let graph_data = graph_from_folder();
|
||||
|
||||
// Let's generate a zkSNARK proof
|
||||
let proof = generate_proof(&proving_key, &rln_witness_deser, &graph_data).unwrap();
|
||||
let proof = generate_proof(proving_key, &rln_witness_deser, graph_data).unwrap();
|
||||
|
||||
let proof_values = proof_values_from_witness(&rln_witness_deser).unwrap();
|
||||
|
||||
// Let's verify the proof
|
||||
let success = verify_proof(&verification_key, &proof, &proof_values).unwrap();
|
||||
let success = verify_proof(verification_key, &proof, &proof_values).unwrap();
|
||||
|
||||
assert!(success);
|
||||
}
|
||||
|
||||
@@ -3,16 +3,35 @@ mod test {
|
||||
#[cfg(not(feature = "stateless"))]
|
||||
use {
|
||||
ark_ff::BigInt,
|
||||
rln::{circuit::TEST_TREE_HEIGHT, protocol::compute_tree_root},
|
||||
rln::{
|
||||
circuit::TEST_TREE_DEPTH,
|
||||
protocol::compute_tree_root,
|
||||
public::RLN,
|
||||
utils::{
|
||||
bytes_le_to_vec_fr, bytes_le_to_vec_u8, bytes_le_to_vec_usize, fr_to_bytes_le,
|
||||
generate_input_buffer, IdSecret,
|
||||
},
|
||||
},
|
||||
zeroize::Zeroize,
|
||||
};
|
||||
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rand::Rng;
|
||||
use rln::circuit::Fr;
|
||||
use rln::hashers::{hash_to_field, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS};
|
||||
use rln::protocol::deserialize_identity_tuple;
|
||||
use rln::public::{hash as public_hash, poseidon_hash as public_poseidon_hash, RLN};
|
||||
use rln::utils::*;
|
||||
use rln::hashers::{
|
||||
hash_to_field_be, hash_to_field_le, poseidon_hash as utils_poseidon_hash, ROUND_PARAMS,
|
||||
};
|
||||
use rln::protocol::{
|
||||
deserialize_identity_pair_be, deserialize_identity_pair_le, deserialize_identity_tuple_be,
|
||||
deserialize_identity_tuple_le,
|
||||
};
|
||||
use rln::public::{
|
||||
hash as public_hash, poseidon_hash as public_poseidon_hash, seeded_extended_key_gen,
|
||||
seeded_key_gen,
|
||||
};
|
||||
use rln::utils::{
|
||||
bytes_be_to_fr, bytes_le_to_fr, str_to_fr, vec_fr_to_bytes_be, vec_fr_to_bytes_le,
|
||||
};
|
||||
use std::io::Cursor;
|
||||
|
||||
#[test]
|
||||
@@ -22,11 +41,16 @@ mod test {
|
||||
let leaf_index = 3;
|
||||
let user_message_limit = 1;
|
||||
|
||||
let mut rln = RLN::new(TEST_TREE_HEIGHT, generate_input_buffer()).unwrap();
|
||||
let mut rln = RLN::new(TEST_TREE_DEPTH, generate_input_buffer()).unwrap();
|
||||
|
||||
// generate identity
|
||||
let identity_secret_hash = hash_to_field(b"test-merkle-proof");
|
||||
let id_commitment = utils_poseidon_hash(&vec![identity_secret_hash]);
|
||||
let mut identity_secret_hash_ = hash_to_field_le(b"test-merkle-proof");
|
||||
let identity_secret_hash = IdSecret::from(&mut identity_secret_hash_);
|
||||
|
||||
let mut to_hash = [*identity_secret_hash.clone()];
|
||||
let id_commitment = utils_poseidon_hash(&to_hash);
|
||||
to_hash[0].zeroize();
|
||||
|
||||
let rate_commitment = utils_poseidon_hash(&[id_commitment, user_message_limit.into()]);
|
||||
|
||||
// check that leaves indices is empty
|
||||
@@ -66,7 +90,7 @@ mod test {
|
||||
|
||||
let buffer_inner = buffer.into_inner();
|
||||
let (path_elements, read) = bytes_le_to_vec_fr(&buffer_inner).unwrap();
|
||||
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..].to_vec()).unwrap();
|
||||
let (identity_path_index, _) = bytes_le_to_vec_u8(&buffer_inner[read..]).unwrap();
|
||||
|
||||
// We check correct computation of the path and indexes
|
||||
let expected_path_elements: Vec<Fr> = [
|
||||
@@ -102,9 +126,9 @@ mod test {
|
||||
|
||||
// check subtree root computation for leaf 0 for all corresponding node until the root
|
||||
let l_idx = 0;
|
||||
for n in (1..=TEST_TREE_HEIGHT).rev() {
|
||||
let idx_l = l_idx * (1 << (TEST_TREE_HEIGHT - n));
|
||||
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_HEIGHT - n));
|
||||
for n in (1..=TEST_TREE_DEPTH).rev() {
|
||||
let idx_l = l_idx * (1 << (TEST_TREE_DEPTH - n));
|
||||
let idx_r = (l_idx + 1) * (1 << (TEST_TREE_DEPTH - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
@@ -136,19 +160,46 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_seeded_keygen() {
|
||||
let rln = RLN::default();
|
||||
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
let mut input_buffer = Cursor::new(&seed_bytes);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
rln.seeded_key_gen(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
seeded_key_gen(&mut input_buffer, &mut output_buffer, true).unwrap();
|
||||
let serialized_output = output_buffer.into_inner();
|
||||
|
||||
let (identity_secret_hash, read) = bytes_le_to_fr(&serialized_output);
|
||||
let (id_commitment, _) = bytes_le_to_fr(&serialized_output[read..].to_vec());
|
||||
let (identity_secret_hash, id_commitment) = deserialize_identity_pair_le(serialized_output);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0xbf16d2b5c0d6f9d9d561e05bfca16a81b4b873bb063508fae360d8c74cef51f",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seeded_keygen_big_endian() {
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
let mut input_buffer = Cursor::new(&seed_bytes);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
seeded_key_gen(&mut input_buffer, &mut output_buffer, false).unwrap();
|
||||
let serialized_output = output_buffer.into_inner();
|
||||
|
||||
let (identity_secret_hash, id_commitment) = deserialize_identity_pair_be(serialized_output);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
@@ -171,19 +222,60 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_seeded_extended_keygen() {
|
||||
let rln = RLN::default();
|
||||
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
let mut input_buffer = Cursor::new(&seed_bytes);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
rln.seeded_extended_key_gen(&mut input_buffer, &mut output_buffer)
|
||||
.unwrap();
|
||||
seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, true).unwrap();
|
||||
let serialized_output = output_buffer.into_inner();
|
||||
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple(serialized_output);
|
||||
deserialize_identity_tuple_le(serialized_output);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
"0x766ce6c7e7a01bdf5b3f257616f603918c30946fa23480f2859c597817e6716",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_identity_nullifier_seed_bytes = str_to_fr(
|
||||
"0x1f18714c7bc83b5bca9e89d404cf6f2f585bc4c0f7ed8b53742b7e2b298f50b4",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_identity_secret_hash_seed_bytes = str_to_fr(
|
||||
"0x2aca62aaa7abaf3686fff2caf00f55ab9462dc12db5b5d4bcf3994e671f8e521",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
let expected_id_commitment_seed_bytes = str_to_fr(
|
||||
"0x68b66aa0a8320d2e56842581553285393188714c48f9b17acd198b4f1734c5c",
|
||||
16,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(identity_trapdoor, expected_identity_trapdoor_seed_bytes);
|
||||
assert_eq!(identity_nullifier, expected_identity_nullifier_seed_bytes);
|
||||
assert_eq!(
|
||||
identity_secret_hash,
|
||||
expected_identity_secret_hash_seed_bytes
|
||||
);
|
||||
assert_eq!(id_commitment, expected_id_commitment_seed_bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seeded_extended_keygen_big_endian() {
|
||||
let seed_bytes: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
|
||||
let mut input_buffer = Cursor::new(&seed_bytes);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
seeded_extended_key_gen(&mut input_buffer, &mut output_buffer, false).unwrap();
|
||||
let serialized_output = output_buffer.into_inner();
|
||||
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) =
|
||||
deserialize_identity_tuple_be(serialized_output);
|
||||
|
||||
// We check against expected values
|
||||
let expected_identity_trapdoor_seed_bytes = str_to_fr(
|
||||
@@ -224,11 +316,28 @@ mod test {
|
||||
let mut input_buffer = Cursor::new(&signal);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_hash(&mut input_buffer, &mut output_buffer).unwrap();
|
||||
public_hash(&mut input_buffer, &mut output_buffer, true).unwrap();
|
||||
let serialized_hash = output_buffer.into_inner();
|
||||
let (hash1, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field(&signal);
|
||||
let hash2 = hash_to_field_le(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_to_field_big_endian() {
|
||||
let mut rng = thread_rng();
|
||||
let signal: [u8; 32] = rng.gen();
|
||||
|
||||
let mut input_buffer = Cursor::new(&signal);
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_hash(&mut input_buffer, &mut output_buffer, false).unwrap();
|
||||
let serialized_hash = output_buffer.into_inner();
|
||||
let (hash1, _) = bytes_be_to_fr(&serialized_hash);
|
||||
|
||||
let hash2 = hash_to_field_be(&signal);
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
@@ -243,13 +352,33 @@ mod test {
|
||||
}
|
||||
let expected_hash = utils_poseidon_hash(&inputs);
|
||||
|
||||
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs).unwrap());
|
||||
let mut input_buffer = Cursor::new(vec_fr_to_bytes_le(&inputs));
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_poseidon_hash(&mut input_buffer, &mut output_buffer).unwrap();
|
||||
public_poseidon_hash(&mut input_buffer, &mut output_buffer, true).unwrap();
|
||||
let serialized_hash = output_buffer.into_inner();
|
||||
let (hash, _) = bytes_le_to_fr(&serialized_hash);
|
||||
|
||||
assert_eq!(hash, expected_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poseidon_hash_big_endian() {
|
||||
let mut rng = thread_rng();
|
||||
let number_of_inputs = rng.gen_range(1..ROUND_PARAMS.len());
|
||||
let mut inputs = Vec::with_capacity(number_of_inputs);
|
||||
for _ in 0..number_of_inputs {
|
||||
inputs.push(Fr::rand(&mut rng));
|
||||
}
|
||||
let expected_hash = utils_poseidon_hash(&inputs);
|
||||
|
||||
let mut input_buffer = Cursor::new(vec_fr_to_bytes_be(&inputs));
|
||||
let mut output_buffer = Cursor::new(Vec::<u8>::new());
|
||||
|
||||
public_poseidon_hash(&mut input_buffer, &mut output_buffer, false).unwrap();
|
||||
let serialized_hash = output_buffer.into_inner();
|
||||
let (hash, _) = bytes_be_to_fr(&serialized_hash);
|
||||
|
||||
assert_eq!(hash, expected_hash);
|
||||
}
|
||||
}
|
||||
|
||||
411
rln/tests/utils.rs
Normal file
411
rln/tests/utils.rs
Normal file
@@ -0,0 +1,411 @@
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rln::utils::{
|
||||
bytes_be_to_fr, bytes_be_to_vec_fr, bytes_be_to_vec_u8, bytes_be_to_vec_usize,
|
||||
bytes_le_to_fr, bytes_le_to_vec_fr, bytes_le_to_vec_u8, bytes_le_to_vec_usize,
|
||||
fr_to_bytes_be, fr_to_bytes_le, normalize_usize_be, normalize_usize_le, str_to_fr,
|
||||
vec_fr_to_bytes_be, vec_fr_to_bytes_le, vec_u8_to_bytes_be, vec_u8_to_bytes_le,
|
||||
};
|
||||
|
||||
use ark_std::{rand::thread_rng, UniformRand};
|
||||
use rln::circuit::Fr;
|
||||
|
||||
#[test]
|
||||
fn test_normalize_usize_le() {
|
||||
// Test basic cases
|
||||
assert_eq!(normalize_usize_le(0), [0, 0, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(normalize_usize_le(1), [1, 0, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(normalize_usize_le(255), [255, 0, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(normalize_usize_le(256), [0, 1, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(normalize_usize_le(65535), [255, 255, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(normalize_usize_le(65536), [0, 0, 1, 0, 0, 0, 0, 0]);
|
||||
|
||||
// Test 32-bit boundary
|
||||
assert_eq!(
|
||||
normalize_usize_le(4294967295),
|
||||
[255, 255, 255, 255, 0, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(normalize_usize_le(4294967296), [0, 0, 0, 0, 1, 0, 0, 0]);
|
||||
|
||||
// Test maximum value
|
||||
assert_eq!(
|
||||
normalize_usize_le(usize::MAX),
|
||||
[255, 255, 255, 255, 255, 255, 255, 255]
|
||||
);
|
||||
|
||||
// Test that result is always 8 bytes
|
||||
assert_eq!(normalize_usize_le(0).len(), 8);
|
||||
assert_eq!(normalize_usize_le(usize::MAX).len(), 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_usize_be() {
|
||||
// Test basic cases
|
||||
assert_eq!(normalize_usize_be(0), [0, 0, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(normalize_usize_be(1), [0, 0, 0, 0, 0, 0, 0, 1]);
|
||||
assert_eq!(normalize_usize_be(255), [0, 0, 0, 0, 0, 0, 0, 255]);
|
||||
assert_eq!(normalize_usize_be(256), [0, 0, 0, 0, 0, 0, 1, 0]);
|
||||
assert_eq!(normalize_usize_be(65535), [0, 0, 0, 0, 0, 0, 255, 255]);
|
||||
assert_eq!(normalize_usize_be(65536), [0, 0, 0, 0, 0, 1, 0, 0]);
|
||||
|
||||
// Test 32-bit boundary
|
||||
assert_eq!(
|
||||
normalize_usize_be(4294967295),
|
||||
[0, 0, 0, 0, 255, 255, 255, 255]
|
||||
);
|
||||
assert_eq!(normalize_usize_be(4294967296), [0, 0, 0, 1, 0, 0, 0, 0]);
|
||||
|
||||
// Test maximum value
|
||||
assert_eq!(
|
||||
normalize_usize_be(usize::MAX),
|
||||
[255, 255, 255, 255, 255, 255, 255, 255]
|
||||
);
|
||||
|
||||
// Test that result is always 8 bytes
|
||||
assert_eq!(normalize_usize_be(0).len(), 8);
|
||||
assert_eq!(normalize_usize_be(usize::MAX).len(), 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_usize_endianness() {
|
||||
// Test that little-endian and big-endian produce different results for non-zero values
|
||||
let test_values = vec![1, 255, 256, 65535, 65536, 4294967295, 4294967296];
|
||||
|
||||
for &value in &test_values {
|
||||
let le_result = normalize_usize_le(value);
|
||||
let be_result = normalize_usize_be(value);
|
||||
|
||||
// For non-zero values, LE and BE should be different
|
||||
assert_ne!(
|
||||
le_result, be_result,
|
||||
"LE and BE should differ for value {value}"
|
||||
);
|
||||
|
||||
// Both should be 8 bytes
|
||||
assert_eq!(le_result.len(), 8);
|
||||
assert_eq!(be_result.len(), 8);
|
||||
}
|
||||
|
||||
// Zero should be the same in both endianness
|
||||
assert_eq!(normalize_usize_le(0), normalize_usize_be(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_usize_roundtrip() {
|
||||
// Test that we can reconstruct the original value from the normalized bytes
|
||||
let test_values = vec![
|
||||
0,
|
||||
1,
|
||||
255,
|
||||
256,
|
||||
65535,
|
||||
65536,
|
||||
4294967295,
|
||||
4294967296,
|
||||
usize::MAX,
|
||||
];
|
||||
|
||||
for &value in &test_values {
|
||||
let le_bytes = normalize_usize_le(value);
|
||||
let be_bytes = normalize_usize_be(value);
|
||||
|
||||
// Reconstruct from little-endian bytes
|
||||
let reconstructed_le = usize::from_le_bytes(le_bytes);
|
||||
assert_eq!(
|
||||
reconstructed_le, value,
|
||||
"LE roundtrip failed for value {value}"
|
||||
);
|
||||
|
||||
// Reconstruct from big-endian bytes
|
||||
let reconstructed_be = usize::from_be_bytes(be_bytes);
|
||||
assert_eq!(
|
||||
reconstructed_be, value,
|
||||
"BE roundtrip failed for value {value}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_usize_edge_cases() {
|
||||
// Test edge cases and boundary values
|
||||
let edge_cases = vec![
|
||||
0,
|
||||
1,
|
||||
255,
|
||||
256,
|
||||
65535,
|
||||
65536,
|
||||
16777215, // 2^24 - 1
|
||||
16777216, // 2^24
|
||||
4294967295, // 2^32 - 1
|
||||
4294967296, // 2^32
|
||||
1099511627775, // 2^40 - 1
|
||||
1099511627776, // 2^40
|
||||
281474976710655, // 2^48 - 1
|
||||
281474976710656, // 2^48
|
||||
72057594037927935, // 2^56 - 1
|
||||
72057594037927936, // 2^56
|
||||
usize::MAX,
|
||||
];
|
||||
|
||||
for &value in &edge_cases {
|
||||
let le_result = normalize_usize_le(value);
|
||||
let be_result = normalize_usize_be(value);
|
||||
|
||||
// Both should be 8 bytes
|
||||
assert_eq!(le_result.len(), 8);
|
||||
assert_eq!(be_result.len(), 8);
|
||||
|
||||
// Roundtrip should work
|
||||
assert_eq!(usize::from_le_bytes(le_result), value);
|
||||
assert_eq!(usize::from_be_bytes(be_result), value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_usize_architecture_independence() {
|
||||
// Test that the functions work consistently regardless of the underlying architecture
|
||||
// This test ensures that the functions provide consistent 8-byte output
|
||||
// even on 32-bit systems where usize might be 4 bytes
|
||||
|
||||
let test_values = vec![0, 1, 255, 256, 65535, 65536, 4294967295, 4294967296];
|
||||
|
||||
for &value in &test_values {
|
||||
let le_result = normalize_usize_le(value);
|
||||
let be_result = normalize_usize_be(value);
|
||||
|
||||
// Always 8 bytes regardless of architecture
|
||||
assert_eq!(le_result.len(), 8);
|
||||
assert_eq!(be_result.len(), 8);
|
||||
|
||||
// The result should be consistent with the original value
|
||||
assert_eq!(usize::from_le_bytes(le_result), value);
|
||||
assert_eq!(usize::from_be_bytes(be_result), value);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fr_serialization_roundtrip() {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
// Test multiple random Fr values
|
||||
for _ in 0..10 {
|
||||
let fr = Fr::rand(&mut rng);
|
||||
|
||||
// Test little-endian roundtrip
|
||||
let le_bytes = fr_to_bytes_le(&fr);
|
||||
let (reconstructed_le, _) = bytes_le_to_fr(&le_bytes);
|
||||
assert_eq!(fr, reconstructed_le);
|
||||
|
||||
// Test big-endian roundtrip
|
||||
let be_bytes = fr_to_bytes_be(&fr);
|
||||
let (reconstructed_be, _) = bytes_be_to_fr(&be_bytes);
|
||||
assert_eq!(fr, reconstructed_be);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_fr_serialization_roundtrip() {
|
||||
let mut rng = thread_rng();
|
||||
|
||||
// Test with different vector sizes
|
||||
for size in [0, 1, 5, 10] {
|
||||
let fr_vec: Vec<Fr> = (0..size).map(|_| Fr::rand(&mut rng)).collect();
|
||||
|
||||
// Test little-endian roundtrip
|
||||
let le_bytes = vec_fr_to_bytes_le(&fr_vec);
|
||||
let (reconstructed_le, _) = bytes_le_to_vec_fr(&le_bytes).unwrap();
|
||||
assert_eq!(fr_vec, reconstructed_le);
|
||||
|
||||
// Test big-endian roundtrip
|
||||
let be_bytes = vec_fr_to_bytes_be(&fr_vec);
|
||||
let (reconstructed_be, _) = bytes_be_to_vec_fr(&be_bytes).unwrap();
|
||||
assert_eq!(fr_vec, reconstructed_be);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_u8_serialization_roundtrip() {
|
||||
// Test with different vector sizes and content
|
||||
let test_cases = vec![
|
||||
vec![],
|
||||
vec![0],
|
||||
vec![255],
|
||||
vec![1, 2, 3, 4, 5],
|
||||
vec![0, 255, 128, 64, 32, 16, 8, 4, 2, 1],
|
||||
(0..100).collect::<Vec<u8>>(),
|
||||
];
|
||||
|
||||
for test_case in test_cases {
|
||||
// Test little-endian roundtrip
|
||||
let le_bytes = vec_u8_to_bytes_le(&test_case);
|
||||
let (reconstructed_le, _) = bytes_le_to_vec_u8(&le_bytes).unwrap();
|
||||
assert_eq!(test_case, reconstructed_le);
|
||||
|
||||
// Test big-endian roundtrip
|
||||
let be_bytes = vec_u8_to_bytes_be(&test_case);
|
||||
let (reconstructed_be, _) = bytes_be_to_vec_u8(&be_bytes).unwrap();
|
||||
assert_eq!(test_case, reconstructed_be);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec_usize_serialization_roundtrip() {
|
||||
// Test with different vector sizes and content
|
||||
let test_cases = vec![
|
||||
vec![],
|
||||
vec![0],
|
||||
vec![usize::MAX],
|
||||
vec![1, 2, 3, 4, 5],
|
||||
vec![0, 255, 65535, 4294967295, usize::MAX],
|
||||
(0..10).collect::<Vec<usize>>(),
|
||||
];
|
||||
|
||||
for test_case in test_cases {
|
||||
// Test little-endian roundtrip
|
||||
let le_bytes = {
|
||||
let mut bytes = Vec::new();
|
||||
bytes.extend_from_slice(&normalize_usize_le(test_case.len()));
|
||||
for &value in &test_case {
|
||||
bytes.extend_from_slice(&normalize_usize_le(value));
|
||||
}
|
||||
bytes
|
||||
};
|
||||
let reconstructed_le = bytes_le_to_vec_usize(&le_bytes).unwrap();
|
||||
assert_eq!(test_case, reconstructed_le);
|
||||
|
||||
// Test big-endian roundtrip
|
||||
let be_bytes = {
|
||||
let mut bytes = Vec::new();
|
||||
bytes.extend_from_slice(&normalize_usize_be(test_case.len()));
|
||||
for &value in &test_case {
|
||||
bytes.extend_from_slice(&normalize_usize_be(value));
|
||||
}
|
||||
bytes
|
||||
};
|
||||
let reconstructed_be = bytes_be_to_vec_usize(&be_bytes).unwrap();
|
||||
assert_eq!(test_case, reconstructed_be);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_str_to_fr() {
|
||||
// Test valid hex strings
|
||||
let test_cases = vec![
|
||||
("0x0", 16, Fr::from(0u64)),
|
||||
("0x1", 16, Fr::from(1u64)),
|
||||
("0xff", 16, Fr::from(255u64)),
|
||||
("0x100", 16, Fr::from(256u64)),
|
||||
];
|
||||
|
||||
for (input, radix, expected) in test_cases {
|
||||
let result = str_to_fr(input, radix).unwrap();
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
|
||||
// Test invalid inputs
|
||||
assert!(str_to_fr("invalid", 16).is_err());
|
||||
assert!(str_to_fr("0x", 16).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_endianness_differences() {
|
||||
let mut rng = thread_rng();
|
||||
let fr = Fr::rand(&mut rng);
|
||||
|
||||
// Test that LE and BE produce different byte representations
|
||||
let le_bytes = fr_to_bytes_le(&fr);
|
||||
let be_bytes = fr_to_bytes_be(&fr);
|
||||
|
||||
// They should be different (unless the value is symmetric)
|
||||
if le_bytes != be_bytes {
|
||||
// Verify they can both be reconstructed correctly
|
||||
let (reconstructed_le, _) = bytes_le_to_fr(&le_bytes);
|
||||
let (reconstructed_be, _) = bytes_be_to_fr(&be_bytes);
|
||||
assert_eq!(fr, reconstructed_le);
|
||||
assert_eq!(fr, reconstructed_be);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_handling() {
|
||||
// Test with valid length but insufficient data
|
||||
let valid_length_invalid_data = vec![0u8; 8]; // Length 0, but no data
|
||||
assert!(bytes_le_to_vec_u8(&valid_length_invalid_data).is_ok());
|
||||
assert!(bytes_be_to_vec_u8(&valid_length_invalid_data).is_ok());
|
||||
assert!(bytes_le_to_vec_fr(&valid_length_invalid_data).is_ok());
|
||||
assert!(bytes_be_to_vec_fr(&valid_length_invalid_data).is_ok());
|
||||
assert!(bytes_le_to_vec_usize(&valid_length_invalid_data).is_ok());
|
||||
assert!(bytes_be_to_vec_usize(&valid_length_invalid_data).is_ok());
|
||||
|
||||
// Test with reasonable length but insufficient data for vector deserialization
|
||||
let reasonable_length = {
|
||||
let mut bytes = vec![0u8; 8];
|
||||
bytes[0] = 1; // Length 1
|
||||
bytes
|
||||
};
|
||||
// This should fail because we don't have enough data for the vector elements
|
||||
assert!(bytes_le_to_vec_u8(&reasonable_length).is_err());
|
||||
assert!(bytes_be_to_vec_u8(&reasonable_length).is_err());
|
||||
assert!(bytes_le_to_vec_fr(&reasonable_length).is_err());
|
||||
assert!(bytes_be_to_vec_fr(&reasonable_length).is_err());
|
||||
assert!(bytes_le_to_vec_usize(&reasonable_length).is_err());
|
||||
assert!(bytes_be_to_vec_usize(&reasonable_length).is_err());
|
||||
|
||||
// Test with valid data for u8 vector
|
||||
let valid_u8_data_le = {
|
||||
let mut bytes = vec![0u8; 9];
|
||||
bytes[..8].copy_from_slice(&(1u64.to_le_bytes())); // Length 1, little-endian
|
||||
bytes[8] = 42; // One byte of data
|
||||
bytes
|
||||
};
|
||||
let valid_u8_data_be = {
|
||||
let mut bytes = vec![0u8; 9];
|
||||
bytes[..8].copy_from_slice(&(1u64.to_be_bytes())); // Length 1, big-endian
|
||||
bytes[8] = 42; // One byte of data
|
||||
bytes
|
||||
};
|
||||
assert!(bytes_le_to_vec_u8(&valid_u8_data_le).is_ok());
|
||||
assert!(bytes_be_to_vec_u8(&valid_u8_data_be).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_vectors() {
|
||||
// Test empty vector serialization/deserialization
|
||||
let empty_fr: Vec<Fr> = vec![];
|
||||
let empty_u8: Vec<u8> = vec![];
|
||||
let empty_usize: Vec<usize> = vec![];
|
||||
|
||||
// Test Fr vectors
|
||||
let le_fr_bytes = vec_fr_to_bytes_le(&empty_fr);
|
||||
let be_fr_bytes = vec_fr_to_bytes_be(&empty_fr);
|
||||
let (reconstructed_le_fr, _) = bytes_le_to_vec_fr(&le_fr_bytes).unwrap();
|
||||
let (reconstructed_be_fr, _) = bytes_be_to_vec_fr(&be_fr_bytes).unwrap();
|
||||
assert_eq!(empty_fr, reconstructed_le_fr);
|
||||
assert_eq!(empty_fr, reconstructed_be_fr);
|
||||
|
||||
// Test u8 vectors
|
||||
let le_u8_bytes = vec_u8_to_bytes_le(&empty_u8);
|
||||
let be_u8_bytes = vec_u8_to_bytes_be(&empty_u8);
|
||||
let (reconstructed_le_u8, _) = bytes_le_to_vec_u8(&le_u8_bytes).unwrap();
|
||||
let (reconstructed_be_u8, _) = bytes_be_to_vec_u8(&be_u8_bytes).unwrap();
|
||||
assert_eq!(empty_u8, reconstructed_le_u8);
|
||||
assert_eq!(empty_u8, reconstructed_be_u8);
|
||||
|
||||
// Test usize vectors
|
||||
let le_usize_bytes = {
|
||||
let mut bytes = Vec::new();
|
||||
bytes.extend_from_slice(&normalize_usize_le(0));
|
||||
bytes
|
||||
};
|
||||
let be_usize_bytes = {
|
||||
let mut bytes = Vec::new();
|
||||
bytes.extend_from_slice(&normalize_usize_be(0));
|
||||
bytes
|
||||
};
|
||||
let reconstructed_le_usize = bytes_le_to_vec_usize(&le_usize_bytes).unwrap();
|
||||
let reconstructed_be_usize = bytes_be_to_vec_usize(&be_usize_bytes).unwrap();
|
||||
assert_eq!(empty_usize, reconstructed_le_usize);
|
||||
assert_eq!(empty_usize, reconstructed_be_usize);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "zerokit_utils"
|
||||
version = "0.5.2"
|
||||
version = "0.7.0"
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "Various utilities for Zerokit"
|
||||
@@ -12,28 +12,26 @@ repository = "https://github.com/vacp2p/zerokit"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
ark-ff = { version = "0.5.0", default-features = false, features = [
|
||||
"parallel",
|
||||
] }
|
||||
num-bigint = { version = "0.4.6", default-features = false, features = [
|
||||
"rand",
|
||||
] }
|
||||
color-eyre = "0.6.3"
|
||||
pmtree = { package = "vacp2p_pmtree", version = "2.0.2", optional = true }
|
||||
ark-ff = { version = "0.5.0", default-features = false }
|
||||
num-bigint = { version = "0.4.6", default-features = false }
|
||||
pmtree = { package = "vacp2p_pmtree", version = "2.0.3", optional = true }
|
||||
sled = "0.34.7"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0.141"
|
||||
lazy_static = "1.5.0"
|
||||
hex = "0.4"
|
||||
hex = "0.4.3"
|
||||
rayon = "1.10.0"
|
||||
thiserror = "2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
ark-bn254 = { version = "0.5.0", features = ["std"] }
|
||||
num-traits = "0.2.19"
|
||||
hex-literal = "1.0.0"
|
||||
hex-literal = "0.4.1"
|
||||
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
criterion = { version = "0.7.0", features = ["html_reports"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
parallel = ["ark-ff/parallel"]
|
||||
pmtree-ft = ["pmtree"]
|
||||
|
||||
[[bench]]
|
||||
@@ -43,3 +41,6 @@ harness = false
|
||||
[[bench]]
|
||||
name = "poseidon_benchmark"
|
||||
harness = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -1,39 +1,45 @@
|
||||
# Zerokit Utils Crate
|
||||
|
||||
[](https://crates.io/crates/zerokit_utils)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
Cryptographic primitives for zero-knowledge applications, featuring efficient Merkle tree implementations and a Poseidon hash function.
|
||||
**Zerokit Utils** provides essential cryptographic primitives optimized for zero-knowledge applications.
|
||||
This crate features efficient Merkle tree implementations and a Poseidon hash function,
|
||||
designed to be robust and performant.
|
||||
|
||||
## Overview
|
||||
|
||||
This crate provides core cryptographic components optimized for zero-knowledge proof systems:
|
||||
|
||||
1. Multiple Merkle tree implementations with different space/time tradeoffs
|
||||
2. A Poseidon hash implementation
|
||||
- **Multiple Merkle Trees**: Various implementations optimised for the trade-off between space and time.
|
||||
- **Poseidon Hash Function**: An efficient hashing algorithm suitable for ZK contexts, with customizable parameters.
|
||||
- **Parallel Performance**: Leverages Rayon for significant speed-ups in Merkle tree computations.
|
||||
- **Arkworks Compatibility**: Poseidon hash implementation is designed to work seamlessly
|
||||
with Arkworks field traits and data structures.
|
||||
|
||||
## Merkle Tree Implementations
|
||||
|
||||
The crate supports two interchangeable Merkle tree implementations:
|
||||
Merkle trees are fundamental data structures for verifying data integrity and set membership.
|
||||
Zerokit Utils offers two interchangeable implementations:
|
||||
|
||||
- **FullMerkleTree**
|
||||
- Stores each tree node in memory
|
||||
- **OptimalMerkleTree**
|
||||
- Only stores nodes used to prove accumulation of set leaves
|
||||
### Understanding Merkle Tree Terminology
|
||||
|
||||
### Implementation notes
|
||||
To better understand the structure and parameters of our Merkle trees, here's a quick glossary:
|
||||
|
||||
Glossary:
|
||||
- **Depth (`depth`)**: level of leaves if we count from root.
|
||||
If the root is at level 0, leaves are at level `depth`.
|
||||
- **Number of Levels**: `depth + 1`.
|
||||
- **Capacity (Number of Leaves)**: $2^{\text{depth}}$. This is the maximum number of leaves the tree can hold.
|
||||
- **Total Number of Nodes**: $2^{(\text{depth} + 1)} - 1$ for a full binary tree.
|
||||
|
||||
* depth: level of leaves if we count from levels from 0
|
||||
* number of levels: depth + 1
|
||||
* capacity (== number of leaves) -- 1 << depth
|
||||
* total number of nodes: 1 << (depth + 1)) - 1
|
||||
**Example for a tree with `depth: 3`**:
|
||||
|
||||
So for instance:
|
||||
* depth: 3
|
||||
* number of levels: 4
|
||||
* capacity (number of leaves): 8
|
||||
* total number of nodes: 15
|
||||
- Number of Levels: 4 (levels 0, 1, 2, 3)
|
||||
- Capacity (Number of Leaves): $2^3 = 8$
|
||||
- Total Number of Nodes: $2^{(3+1)} - 1 = 15$
|
||||
|
||||
Visual representation of a Merkle tree with `depth: 3`:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
@@ -53,39 +59,55 @@ flowchart TD
|
||||
N6 -->|Leaf| L8
|
||||
```
|
||||
|
||||
### Available Implementations
|
||||
|
||||
- **FullMerkleTree**
|
||||
- Stores all tree nodes in memory.
|
||||
- Use Case: Use when memory is abundant and operation speed is critical.
|
||||
|
||||
- **OptimalMerkleTree**
|
||||
- Stores only the nodes required to prove the accumulation of set leaves (i.e., authentication paths).
|
||||
- Use Case: Suited for environments where memory efficiency is a higher priority than raw speed.
|
||||
|
||||
#### Parallel Processing with Rayon
|
||||
|
||||
Both `OptimalMerkleTree` and `FullMerkleTree` internally utilize the Rayon crate
|
||||
to accelerate computations through data parallelism.
|
||||
This can lead to significant performance improvements, particularly during updates to large Merkle trees.
|
||||
|
||||
## Poseidon Hash Implementation
|
||||
|
||||
This crate provides an implementation to compute the Poseidon hash round constants and MDS matrices:
|
||||
This crate provides an implementation for computing Poseidon hash round constants and MDS matrices.
|
||||
Key characteristics include:
|
||||
|
||||
- **Customizable parameters**: Supports different security levels and input sizes
|
||||
- **Arkworks-friendly**: Adapted to work over arkworks field traits and custom data structures
|
||||
- **Customizable parameters**: Supports various security levels and input sizes,
|
||||
allowing you to tailor the hash function to your specific needs.
|
||||
- **Arkworks-friendly**: Adapted to integrate smoothly with Arkworks field traits and custom data structures.
|
||||
|
||||
### Security Note
|
||||
### ⚠️ Security Note
|
||||
|
||||
The MDS matrices are generated iteratively using the Grain LFSR until certain criteria are met.
|
||||
According to the paper, such matrices must respect specific conditions which are checked by 3 different algorithms in the reference implementation.
|
||||
The MDS matrices used in the Poseidon hash function are generated iteratively
|
||||
using the Grain LFSR (Linear Feedback Shift Register) algorithm until specific cryptographic criteria are met.
|
||||
|
||||
These validation algorithms are not currently implemented in this crate.
|
||||
For the hardcoded parameters, the first random matrix generated satisfies these conditions.
|
||||
If using different parameters, you should check against the reference implementation how many matrices are generated before outputting the correct one,
|
||||
and pass this number to the `skip_matrices` parameter of the `find_poseidon_ark_and_mds` function.
|
||||
- The reference Poseidon implementation includes validation algorithms to ensure these criteria are satisfied.
|
||||
These validation algorithms are not currently implemented in this crate.
|
||||
- For the hardcoded parameters provided within this crate,
|
||||
the initially generated random matrix has been verified to meet these conditions.
|
||||
- If you intend to use custom parameters, it is crucial to verify your generated MDS matrix.
|
||||
You should consult the Poseidon reference implementation to determine
|
||||
how many matrices are typically skipped before a valid one is found.
|
||||
This count should then be passed as the `skip_matrices parameter` to the `find_poseidon_ark_and_mds`
|
||||
function in this crate.
|
||||
|
||||
## Installation
|
||||
|
||||
Add Zerokit Utils to your Rust project:
|
||||
Add zerokit-utils as a dependency to your Cargo.toml file:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
zerokit-utils = "0.5.1"
|
||||
zerokit-utils = "0.6.0"
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- **FullMerkleTree**: Use when memory is abundant and operation speed is critical
|
||||
- **OptimalMerkleTree**: Use when memory efficiency is more important than raw speed
|
||||
- **Poseidon**: Offers a good balance between security and performance for ZK applications
|
||||
|
||||
## Building and Testing
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use hex_literal::hex;
|
||||
use lazy_static::lazy_static;
|
||||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use lazy_static::lazy_static;
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
use zerokit_utils::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
@@ -47,55 +47,78 @@ impl FromStr for TestFr {
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref LEAVES: [TestFr; 4] = [
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
]
|
||||
.map(TestFr);
|
||||
static ref LEAVES: Vec<TestFr> = {
|
||||
let mut leaves = Vec::with_capacity(1 << 20);
|
||||
for i in 0..(1 << 20) {
|
||||
let mut bytes = [0u8; 32];
|
||||
bytes[28..].copy_from_slice(&(i as u32).to_be_bytes());
|
||||
leaves.push(TestFr(bytes));
|
||||
}
|
||||
leaves
|
||||
};
|
||||
static ref INDICES: Vec<usize> = (0..(1 << 20)).collect();
|
||||
}
|
||||
|
||||
const NOF_LEAVES: usize = 8192;
|
||||
|
||||
pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
|
||||
let mut tree =
|
||||
OptimalMerkleTree::<Keccak256>::new(2, TestFr([0; 32]), OptimalMerkleConfig::default())
|
||||
OptimalMerkleTree::<Keccak256>::new(20, TestFr([0; 32]), OptimalMerkleConfig::default())
|
||||
.unwrap();
|
||||
|
||||
for i in 0..NOF_LEAVES {
|
||||
tree.set(i, LEAVES[i % LEAVES.len()]).unwrap();
|
||||
}
|
||||
|
||||
c.bench_function("OptimalMerkleTree::set", |b| {
|
||||
let mut index = NOF_LEAVES;
|
||||
b.iter(|| {
|
||||
tree.set(0, LEAVES[0]).unwrap();
|
||||
tree.set(index % (1 << 20), LEAVES[index % LEAVES.len()])
|
||||
.unwrap();
|
||||
index = (index + 1) % (1 << 20);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::delete", |b| {
|
||||
let mut index = 0;
|
||||
b.iter(|| {
|
||||
tree.delete(0).unwrap();
|
||||
tree.delete(index % NOF_LEAVES).unwrap();
|
||||
tree.set(index % NOF_LEAVES, LEAVES[index % LEAVES.len()])
|
||||
.unwrap();
|
||||
index = (index + 1) % NOF_LEAVES;
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::override_range", |b| {
|
||||
let mut offset = 0;
|
||||
b.iter(|| {
|
||||
tree.override_range(0, LEAVES.into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::compute_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.compute_root().unwrap();
|
||||
let range = offset..offset + NOF_LEAVES;
|
||||
tree.override_range(
|
||||
offset,
|
||||
LEAVES[range.clone()].iter().cloned(),
|
||||
INDICES[range.clone()].iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
offset = (offset + NOF_LEAVES) % (1 << 20);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("OptimalMerkleTree::get", |b| {
|
||||
let mut index = 0;
|
||||
b.iter(|| {
|
||||
tree.get(0).unwrap();
|
||||
tree.get(index % NOF_LEAVES).unwrap();
|
||||
index = (index + 1) % NOF_LEAVES;
|
||||
})
|
||||
});
|
||||
|
||||
// check intermediate node getter which required additional computation of sub root index
|
||||
c.bench_function("OptimalMerkleTree::get_subtree_root", |b| {
|
||||
let mut level = 1;
|
||||
let mut index = 0;
|
||||
b.iter(|| {
|
||||
tree.get_subtree_root(1, 0).unwrap();
|
||||
tree.get_subtree_root(level % 20, index % (1 << (20 - (level % 20))))
|
||||
.unwrap();
|
||||
index = (index + 1) % (1 << (20 - (level % 20)));
|
||||
level = 1 + (level % 20);
|
||||
})
|
||||
});
|
||||
|
||||
@@ -108,43 +131,61 @@ pub fn optimal_merkle_tree_benchmark(c: &mut Criterion) {
|
||||
|
||||
pub fn full_merkle_tree_benchmark(c: &mut Criterion) {
|
||||
let mut tree =
|
||||
FullMerkleTree::<Keccak256>::new(2, TestFr([0; 32]), FullMerkleConfig::default()).unwrap();
|
||||
FullMerkleTree::<Keccak256>::new(20, TestFr([0; 32]), FullMerkleConfig::default()).unwrap();
|
||||
|
||||
for i in 0..NOF_LEAVES {
|
||||
tree.set(i, LEAVES[i % LEAVES.len()]).unwrap();
|
||||
}
|
||||
|
||||
c.bench_function("FullMerkleTree::set", |b| {
|
||||
let mut index = NOF_LEAVES;
|
||||
b.iter(|| {
|
||||
tree.set(0, LEAVES[0]).unwrap();
|
||||
tree.set(index % (1 << 20), LEAVES[index % LEAVES.len()])
|
||||
.unwrap();
|
||||
index = (index + 1) % (1 << 20);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::delete", |b| {
|
||||
let mut index = 0;
|
||||
b.iter(|| {
|
||||
tree.delete(0).unwrap();
|
||||
tree.delete(index % NOF_LEAVES).unwrap();
|
||||
tree.set(index % NOF_LEAVES, LEAVES[index % LEAVES.len()])
|
||||
.unwrap();
|
||||
index = (index + 1) % NOF_LEAVES;
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::override_range", |b| {
|
||||
let mut offset = 0;
|
||||
b.iter(|| {
|
||||
tree.override_range(0, LEAVES.into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::compute_root", |b| {
|
||||
b.iter(|| {
|
||||
tree.compute_root().unwrap();
|
||||
let range = offset..offset + NOF_LEAVES;
|
||||
tree.override_range(
|
||||
offset,
|
||||
LEAVES[range.clone()].iter().cloned(),
|
||||
INDICES[range.clone()].iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
offset = (offset + NOF_LEAVES) % (1 << 20);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("FullMerkleTree::get", |b| {
|
||||
let mut index = 0;
|
||||
b.iter(|| {
|
||||
tree.get(0).unwrap();
|
||||
tree.get(index % NOF_LEAVES).unwrap();
|
||||
index = (index + 1) % NOF_LEAVES;
|
||||
})
|
||||
});
|
||||
|
||||
// check intermediate node getter which required additional computation of sub root index
|
||||
c.bench_function("FullMerkleTree::get_subtree_root", |b| {
|
||||
let mut level = 1;
|
||||
let mut index = 0;
|
||||
b.iter(|| {
|
||||
tree.get_subtree_root(1, 0).unwrap();
|
||||
tree.get_subtree_root(level % 20, index % (1 << (20 - (level % 20))))
|
||||
.unwrap();
|
||||
index = (index + 1) % (1 << (20 - (level % 20)));
|
||||
level = 1 + (level % 20);
|
||||
})
|
||||
});
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::hint::black_box;
|
||||
|
||||
use ark_bn254::Fr;
|
||||
use criterion::{
|
||||
black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput,
|
||||
};
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput};
|
||||
use zerokit_utils::Poseidon;
|
||||
|
||||
const ROUND_PARAMS: [(usize, usize, usize, usize); 8] = [
|
||||
|
||||
33
utils/src/merkle_tree/error.rs
Normal file
33
utils/src/merkle_tree/error.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ZerokitMerkleTreeError {
|
||||
#[error("Invalid index")]
|
||||
InvalidIndex,
|
||||
// InvalidProof,
|
||||
#[error("Leaf index out of bounds")]
|
||||
InvalidLeaf,
|
||||
#[error("Level exceeds tree depth")]
|
||||
InvalidLevel,
|
||||
#[error("Subtree index out of bounds")]
|
||||
InvalidSubTreeIndex,
|
||||
#[error("Start level is != from end level")]
|
||||
InvalidStartAndEndLevel,
|
||||
#[error("set_range got too many leaves")]
|
||||
TooManySet,
|
||||
#[error("Unknown error while computing merkle proof")]
|
||||
ComputingProofError,
|
||||
#[error("Invalid witness length (!= tree depth)")]
|
||||
InvalidWitness,
|
||||
#[cfg(feature = "pmtree-ft")]
|
||||
#[error("Pmtree error: {0}")]
|
||||
PmtreeErrorKind(#[from] pmtree::PmtreeErrorKind),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum FromConfigError {
|
||||
#[error("Error while reading pmtree config: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("Error while creating pmtree config: missing path")]
|
||||
MissingPath,
|
||||
#[error("Error while creating pmtree config: path already exists")]
|
||||
PathExists,
|
||||
}
|
||||
@@ -1,28 +1,29 @@
|
||||
use crate::merkle_tree::{FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
use color_eyre::{Report, Result};
|
||||
use std::{
|
||||
cmp::max,
|
||||
fmt::Debug,
|
||||
iter::{once, repeat_n, successors},
|
||||
iter::{once, repeat_n},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::merkle_tree::{
|
||||
error::{FromConfigError, ZerokitMerkleTreeError},
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
};
|
||||
////////////////////////////////////////////////////////////
|
||||
///// Full Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
/// Merkle tree with all leaf and intermediate hashes stored
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct FullMerkleTree<H: Hasher> {
|
||||
pub struct FullMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
/// The depth of the tree, i.e. the number of levels from leaf to root
|
||||
depth: usize,
|
||||
|
||||
/// The nodes cached from the empty part of the tree (where leaves are set to default).
|
||||
/// Since the rightmost part of the tree is usually changed much later than its creation,
|
||||
/// we can prove accumulation of elements in the leftmost part, with no need to initialize the full tree
|
||||
/// and by caching few intermediate nodes to the root computed from default leaves
|
||||
cached_nodes: Vec<H::Fr>,
|
||||
|
||||
/// The tree nodes
|
||||
nodes: Vec<H::Fr>,
|
||||
|
||||
@@ -30,11 +31,11 @@ pub struct FullMerkleTree<H: Hasher> {
|
||||
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
|
||||
cached_leaves_indices: Vec<u8>,
|
||||
|
||||
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
// (deletions leave next_index unchanged)
|
||||
/// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
/// (deletions leave next_index unchanged)
|
||||
next_index: usize,
|
||||
|
||||
// metadata that an application may use to store additional information
|
||||
/// metadata that an application may use to store additional information
|
||||
metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -56,9 +57,9 @@ pub struct FullMerkleProof<H: Hasher>(pub Vec<FullMerkleBranch<H>>);
|
||||
pub struct FullMerkleConfig(());
|
||||
|
||||
impl FromStr for FullMerkleConfig {
|
||||
type Err = Report;
|
||||
type Err = FromConfigError;
|
||||
|
||||
fn from_str(_s: &str) -> Result<Self> {
|
||||
fn from_str(_s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(FullMerkleConfig::default())
|
||||
}
|
||||
}
|
||||
@@ -72,85 +73,89 @@ where
|
||||
type Hasher = H;
|
||||
type Config = FullMerkleConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
FullMerkleTree::<H>::new(depth, Self::Hasher::default_leaf(), Self::Config::default())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(depth: usize, initial_leaf: FrOf<Self::Hasher>, _config: Self::Config) -> Result<Self> {
|
||||
/// depth - the depth of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(
|
||||
depth: usize,
|
||||
default_leaf: FrOf<Self::Hasher>,
|
||||
_config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
// Compute cache node values, leaf to root
|
||||
let cached_nodes = successors(Some(initial_leaf), |prev| Some(H::hash(&[*prev, *prev])))
|
||||
.take(depth + 1)
|
||||
.collect::<Vec<_>>();
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
for i in 0..depth {
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
|
||||
}
|
||||
cached_nodes.reverse();
|
||||
|
||||
// Compute node values
|
||||
let nodes = cached_nodes
|
||||
.iter()
|
||||
.rev()
|
||||
.enumerate()
|
||||
.flat_map(|(levels, hash)| repeat_n(hash, 1 << levels))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
debug_assert!(nodes.len() == (1 << (depth + 1)) - 1);
|
||||
|
||||
let next_index = 0;
|
||||
|
||||
Ok(Self {
|
||||
depth,
|
||||
cached_nodes,
|
||||
nodes,
|
||||
cached_leaves_indices: vec![0; 1 << depth],
|
||||
next_index,
|
||||
next_index: 0,
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Returns the depth of the tree
|
||||
/// Returns the depth of the tree
|
||||
fn depth(&self) -> usize {
|
||||
self.depth
|
||||
}
|
||||
|
||||
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
/// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
fn capacity(&self) -> usize {
|
||||
1 << self.depth
|
||||
}
|
||||
|
||||
// Returns the total number of leaves set
|
||||
/// Returns the total number of leaves set
|
||||
fn leaves_set(&self) -> usize {
|
||||
self.next_index
|
||||
}
|
||||
|
||||
// Returns the root of the tree
|
||||
/// Returns the root of the tree
|
||||
fn root(&self) -> FrOf<Self::Hasher> {
|
||||
self.nodes[0]
|
||||
}
|
||||
|
||||
// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<()> {
|
||||
/// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, leaf: usize, hash: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.set_range(leaf, once(hash))?;
|
||||
self.next_index = max(self.next_index, leaf + 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get a leaf from the specified tree index
|
||||
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>> {
|
||||
/// Get a leaf from the specified tree index
|
||||
fn get(&self, leaf: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(Report::msg("leaf index out of bounds"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
Ok(self.nodes[self.capacity() + leaf - 1])
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
|
||||
/// Returns the root of the subtree at level n and index
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidIndex);
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
@@ -160,7 +165,7 @@ where
|
||||
let mut idx = self.capacity() + index - 1;
|
||||
let mut nd = self.depth;
|
||||
loop {
|
||||
let parent = self.parent(idx).unwrap();
|
||||
let parent = self.parent(idx).expect("parent should exist");
|
||||
nd -= 1;
|
||||
if nd == n {
|
||||
return Ok(self.nodes[parent]);
|
||||
@@ -171,6 +176,8 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the indices of the leaves that are empty
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize> {
|
||||
self.cached_leaves_indices
|
||||
.iter()
|
||||
@@ -181,40 +188,45 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Sets tree nodes, starting from start index
|
||||
// Function proper of FullMerkleTree implementation
|
||||
fn set_range<I: IntoIterator<Item = FrOf<Self::Hasher>>>(
|
||||
/// Sets multiple leaves from the specified tree index
|
||||
fn set_range<I: ExactSizeIterator<Item = FrOf<Self::Hasher>>>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
hashes: I,
|
||||
) -> Result<()> {
|
||||
leaves: I,
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
let index = self.capacity() + start - 1;
|
||||
let mut count = 0;
|
||||
// first count number of hashes, and check that they fit in the tree
|
||||
// first count number of leaves, and check that they fit in the tree
|
||||
// then insert into the tree
|
||||
let hashes = hashes.into_iter().collect::<Vec<_>>();
|
||||
if hashes.len() + start > self.capacity() {
|
||||
return Err(Report::msg("provided hashes do not fit in the tree"));
|
||||
let leaves = leaves.into_iter().collect::<Vec<_>>();
|
||||
if leaves.len() + start > self.capacity() {
|
||||
return Err(ZerokitMerkleTreeError::TooManySet);
|
||||
}
|
||||
hashes.into_iter().for_each(|hash| {
|
||||
leaves.into_iter().for_each(|hash| {
|
||||
self.nodes[index + count] = hash;
|
||||
self.cached_leaves_indices[start + count] = 1;
|
||||
count += 1;
|
||||
});
|
||||
if count != 0 {
|
||||
self.update_nodes(index, index + (count - 1))?;
|
||||
self.update_hashes(index, index + (count - 1))?;
|
||||
self.next_index = max(self.next_index, start + count);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
|
||||
/// Overrides a range of leaves while resetting specified indices to default and preserving unaffected values.
|
||||
fn override_range<I, J>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: IntoIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: IntoIterator<Item = usize>,
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: ExactSizeIterator<Item = usize>,
|
||||
{
|
||||
let indices = indices.into_iter().collect::<Vec<_>>();
|
||||
let min_index = *indices.first().unwrap();
|
||||
let min_index = *indices.first().expect("indices should not be empty");
|
||||
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let max_index = start + leaves_vec.len();
|
||||
@@ -237,17 +249,16 @@ where
|
||||
}
|
||||
|
||||
self.set_range(start, set_values.into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()> {
|
||||
/// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
/// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
@@ -257,9 +268,9 @@ where
|
||||
}
|
||||
|
||||
// Computes a merkle proof the leaf at the specified index
|
||||
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>> {
|
||||
fn proof(&self, leaf: usize) -> Result<FullMerkleProof<H>, ZerokitMerkleTreeError> {
|
||||
if leaf >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
let mut index = self.capacity() + leaf - 1;
|
||||
let mut path = Vec::with_capacity(self.depth + 1);
|
||||
@@ -276,30 +287,29 @@ where
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, hash: &FrOf<Self::Hasher>, proof: &FullMerkleProof<H>) -> Result<bool> {
|
||||
fn verify(
|
||||
&self,
|
||||
hash: &FrOf<Self::Hasher>,
|
||||
proof: &FullMerkleProof<H>,
|
||||
) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
Ok(proof.compute_root_from(hash) == self.root())
|
||||
}
|
||||
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
|
||||
Ok(self.root())
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
|
||||
Ok(self.metadata.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
// Utilities for updating the tree nodes
|
||||
impl<H: Hasher> FullMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
// Utilities for updating the tree nodes
|
||||
|
||||
/// For a given node index, return the parent node index
|
||||
/// Returns None if there is no parent (root node)
|
||||
fn parent(&self, index: usize) -> Option<usize> {
|
||||
@@ -315,23 +325,60 @@ where
|
||||
(index << 1) + 1
|
||||
}
|
||||
|
||||
/// Returns the depth level of a node based on its index in the flattened tree.
|
||||
fn levels(&self, index: usize) -> usize {
|
||||
// `n.next_power_of_two()` will return `n` iff `n` is a power of two.
|
||||
// The extra offset corrects this.
|
||||
(index + 2).next_power_of_two().trailing_zeros() as usize - 1
|
||||
}
|
||||
|
||||
fn update_nodes(&mut self, start: usize, end: usize) -> Result<()> {
|
||||
if self.levels(start) != self.levels(end) {
|
||||
return Err(Report::msg("self.levels(start) != self.levels(end)"));
|
||||
/// Updates parent hashes after modifying a range of nodes at the same level.
|
||||
///
|
||||
/// - `start_index`: The first index at the current level that was updated.
|
||||
/// - `end_index`: The last index (inclusive) at the same level that was updated.
|
||||
fn update_hashes(
|
||||
&mut self,
|
||||
start_index: usize,
|
||||
end_index: usize,
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// Ensure the range is within the same tree level
|
||||
if self.levels(start_index) != self.levels(end_index) {
|
||||
return Err(ZerokitMerkleTreeError::InvalidStartAndEndLevel);
|
||||
}
|
||||
if let (Some(start), Some(end)) = (self.parent(start), self.parent(end)) {
|
||||
for parent in start..=end {
|
||||
let child = self.first_child(parent);
|
||||
self.nodes[parent] = H::hash(&[self.nodes[child], self.nodes[child + 1]]);
|
||||
|
||||
// Compute parent indices for the range
|
||||
if let (Some(start_parent), Some(end_parent)) =
|
||||
(self.parent(start_index), self.parent(end_index))
|
||||
{
|
||||
// Use parallel processing when the number of pairs exceeds the threshold
|
||||
if end_parent - start_parent + 1 >= MIN_PARALLEL_NODES {
|
||||
let updates: Vec<(usize, H::Fr)> = (start_parent..=end_parent)
|
||||
.into_par_iter()
|
||||
.map(|parent| {
|
||||
let left_child = self.first_child(parent);
|
||||
let right_child = left_child + 1;
|
||||
let hash = H::hash(&[self.nodes[left_child], self.nodes[right_child]]);
|
||||
(parent, hash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
for (parent, hash) in updates {
|
||||
self.nodes[parent] = hash;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, fallback to sequential update for small ranges
|
||||
for parent in start_parent..=end_parent {
|
||||
let left_child = self.first_child(parent);
|
||||
let right_child = left_child + 1;
|
||||
self.nodes[parent] =
|
||||
H::hash(&[self.nodes[left_child], self.nodes[right_child]]);
|
||||
}
|
||||
}
|
||||
self.update_nodes(start, end)?;
|
||||
|
||||
// Recurse to update upper levels
|
||||
self.update_hashes(start_parent, end_parent)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,20 +8,25 @@
|
||||
// and https://github.com/worldcoin/semaphore-rs/blob/d462a4372f1fd9c27610f2acfe4841fab1d396aa/src/merkle_tree.rs
|
||||
|
||||
//!
|
||||
//! # To do
|
||||
//! # TODO
|
||||
//!
|
||||
//! * Disk based storage backend (using mmaped files should be easy)
|
||||
//! * Implement serialization for tree and Merkle proof
|
||||
|
||||
use std::str::FromStr;
|
||||
use crate::merkle_tree::error::ZerokitMerkleTreeError;
|
||||
use std::{
|
||||
fmt::{Debug, Display},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use color_eyre::Result;
|
||||
/// Enables parallel hashing when there are at least 8 nodes (4 pairs to hash), justifying the overhead.
|
||||
pub const MIN_PARALLEL_NODES: usize = 8;
|
||||
|
||||
/// In the Hasher trait we define the node type, the default leaf
|
||||
/// and the hash function used to initialize a Merkle Tree implementation
|
||||
pub trait Hasher {
|
||||
/// Type of the leaf and tree node
|
||||
type Fr: Clone + Copy + Eq + Default + std::fmt::Debug + std::fmt::Display + FromStr;
|
||||
type Fr: Clone + Copy + Eq + Default + Debug + Display + FromStr + Send + Sync;
|
||||
|
||||
/// Returns the default tree leaf
|
||||
fn default_leaf() -> Self::Fr;
|
||||
@@ -39,35 +44,52 @@ pub trait ZerokitMerkleTree {
|
||||
type Hasher: Hasher;
|
||||
type Config: Default + FromStr;
|
||||
|
||||
fn default(depth: usize) -> Result<Self>
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError>
|
||||
where
|
||||
Self: Sized;
|
||||
fn new(depth: usize, default_leaf: FrOf<Self::Hasher>, config: Self::Config) -> Result<Self>
|
||||
fn new(
|
||||
depth: usize,
|
||||
default_leaf: FrOf<Self::Hasher>,
|
||||
config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError>
|
||||
where
|
||||
Self: Sized;
|
||||
fn depth(&self) -> usize;
|
||||
fn capacity(&self) -> usize;
|
||||
fn leaves_set(&self) -> usize;
|
||||
fn root(&self) -> FrOf<Self::Hasher>;
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>>;
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<FrOf<Self::Hasher>>;
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>) -> Result<()>;
|
||||
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<()>
|
||||
fn get_subtree_root(
|
||||
&self,
|
||||
n: usize,
|
||||
index: usize,
|
||||
) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
|
||||
fn set(&mut self, index: usize, leaf: FrOf<Self::Hasher>)
|
||||
-> Result<(), ZerokitMerkleTreeError>;
|
||||
fn set_range<I>(&mut self, start: usize, leaves: I) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>;
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>>;
|
||||
fn get(&self, index: usize) -> Result<FrOf<Self::Hasher>, ZerokitMerkleTreeError>;
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize>;
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, to_remove_indices: J) -> Result<()>
|
||||
fn override_range<I, J>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
to_remove_indices: J,
|
||||
) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: ExactSizeIterator<Item = usize>;
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<()>;
|
||||
fn delete(&mut self, index: usize) -> Result<()>;
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof>;
|
||||
fn verify(&self, leaf: &FrOf<Self::Hasher>, witness: &Self::Proof) -> Result<bool>;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()>;
|
||||
fn metadata(&self) -> Result<Vec<u8>>;
|
||||
fn close_db_connection(&mut self) -> Result<()>;
|
||||
fn update_next(&mut self, leaf: FrOf<Self::Hasher>) -> Result<(), ZerokitMerkleTreeError>;
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError>;
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError>;
|
||||
fn verify(
|
||||
&self,
|
||||
leaf: &FrOf<Self::Hasher>,
|
||||
witness: &Self::Proof,
|
||||
) -> Result<bool, ZerokitMerkleTreeError>;
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError>;
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError>;
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError>;
|
||||
}
|
||||
|
||||
pub trait ZerokitMerkleProof {
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
pub mod error;
|
||||
pub mod full_merkle_tree;
|
||||
#[allow(clippy::module_inception)]
|
||||
pub mod merkle_tree;
|
||||
pub mod optimal_merkle_tree;
|
||||
pub use self::full_merkle_tree::*;
|
||||
pub use self::merkle_tree::*;
|
||||
pub use self::optimal_merkle_tree::*;
|
||||
|
||||
pub use self::full_merkle_tree::{FullMerkleConfig, FullMerkleProof, FullMerkleTree};
|
||||
pub use self::merkle_tree::{
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
};
|
||||
pub use self::optimal_merkle_tree::{OptimalMerkleConfig, OptimalMerkleProof, OptimalMerkleTree};
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use crate::merkle_tree::{Hasher, ZerokitMerkleProof, ZerokitMerkleTree};
|
||||
use crate::FrOf;
|
||||
use color_eyre::{Report, Result};
|
||||
use std::cmp::min;
|
||||
use std::collections::HashMap;
|
||||
use std::str::FromStr;
|
||||
use std::{cmp::max, fmt::Debug};
|
||||
use std::{cmp::max, collections::HashMap, fmt::Debug, str::FromStr};
|
||||
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::merkle_tree::{
|
||||
error::{FromConfigError, ZerokitMerkleTreeError},
|
||||
FrOf, Hasher, ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
};
|
||||
////////////////////////////////////////////////////////////
|
||||
///// Optimal Merkle Tree Implementation
|
||||
////////////////////////////////////////////////////////////
|
||||
@@ -31,11 +32,11 @@ where
|
||||
/// Set to 0 if the leaf is empty and set to 1 in otherwise.
|
||||
cached_leaves_indices: Vec<u8>,
|
||||
|
||||
// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
// (deletions leave next_index unchanged)
|
||||
/// The next available (i.e., never used) tree index. Equivalently, the number of leaves added to the tree
|
||||
/// (deletions leave next_index unchanged)
|
||||
next_index: usize,
|
||||
|
||||
// metadata that an application may use to store additional information
|
||||
/// metadata that an application may use to store additional information
|
||||
metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -48,17 +49,14 @@ pub struct OptimalMerkleProof<H: Hasher>(pub Vec<(H::Fr, u8)>);
|
||||
pub struct OptimalMerkleConfig(());
|
||||
|
||||
impl FromStr for OptimalMerkleConfig {
|
||||
type Err = Report;
|
||||
type Err = FromConfigError;
|
||||
|
||||
fn from_str(_s: &str) -> Result<Self> {
|
||||
fn from_str(_s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(OptimalMerkleConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
///// Implementations
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
/// Implementations
|
||||
impl<H: Hasher> ZerokitMerkleTree for OptimalMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
@@ -67,22 +65,28 @@ where
|
||||
type Hasher = H;
|
||||
type Config = OptimalMerkleConfig;
|
||||
|
||||
fn default(depth: usize) -> Result<Self> {
|
||||
fn default(depth: usize) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
OptimalMerkleTree::<H>::new(depth, H::default_leaf(), Self::Config::default())
|
||||
}
|
||||
|
||||
/// Creates a new `MerkleTree`
|
||||
/// depth - the height of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(depth: usize, default_leaf: H::Fr, _config: Self::Config) -> Result<Self> {
|
||||
/// depth - the depth of the tree made only of hash nodes. 2^depth is the maximum number of leaves hash nodes
|
||||
fn new(
|
||||
depth: usize,
|
||||
default_leaf: H::Fr,
|
||||
_config: Self::Config,
|
||||
) -> Result<Self, ZerokitMerkleTreeError> {
|
||||
// Compute cache node values, leaf to root
|
||||
let mut cached_nodes: Vec<H::Fr> = Vec::with_capacity(depth + 1);
|
||||
cached_nodes.push(default_leaf);
|
||||
for i in 0..depth {
|
||||
cached_nodes.push(H::hash(&[cached_nodes[i]; 2]));
|
||||
}
|
||||
cached_nodes.reverse();
|
||||
|
||||
Ok(OptimalMerkleTree {
|
||||
cached_nodes,
|
||||
depth,
|
||||
cached_nodes,
|
||||
nodes: HashMap::with_capacity(1 << depth),
|
||||
cached_leaves_indices: vec![0; 1 << depth],
|
||||
next_index: 0,
|
||||
@@ -90,36 +94,57 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn close_db_connection(&mut self) -> Result<()> {
|
||||
fn close_db_connection(&mut self) -> Result<(), ZerokitMerkleTreeError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Returns the depth of the tree
|
||||
/// Returns the depth of the tree
|
||||
fn depth(&self) -> usize {
|
||||
self.depth
|
||||
}
|
||||
|
||||
// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
/// Returns the capacity of the tree, i.e. the maximum number of accumulatable leaves
|
||||
fn capacity(&self) -> usize {
|
||||
1 << self.depth
|
||||
}
|
||||
|
||||
// Returns the total number of leaves set
|
||||
/// Returns the total number of leaves set
|
||||
fn leaves_set(&self) -> usize {
|
||||
self.next_index
|
||||
}
|
||||
|
||||
// Returns the root of the tree
|
||||
/// Returns the root of the tree
|
||||
fn root(&self) -> H::Fr {
|
||||
self.get_node(0, 0)
|
||||
}
|
||||
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr> {
|
||||
/// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<(), ZerokitMerkleTreeError> {
|
||||
if index >= self.capacity() {
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
self.nodes.insert((self.depth, index), leaf);
|
||||
self.update_hashes(index, 1)?;
|
||||
self.next_index = max(self.next_index, index + 1);
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a leaf from the specified tree index
|
||||
fn get(&self, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
if index >= self.capacity() {
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
Ok(self.get_node(self.depth, index))
|
||||
}
|
||||
|
||||
/// Returns the root of the subtree at level n and index
|
||||
fn get_subtree_root(&self, n: usize, index: usize) -> Result<H::Fr, ZerokitMerkleTreeError> {
|
||||
if n > self.depth() {
|
||||
return Err(Report::msg("level exceeds depth size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLevel);
|
||||
}
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
if n == 0 {
|
||||
Ok(self.root())
|
||||
@@ -130,26 +155,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// Sets a leaf at the specified tree index
|
||||
fn set(&mut self, index: usize, leaf: H::Fr) -> Result<()> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
self.nodes.insert((self.depth, index), leaf);
|
||||
self.update_hashes(index, 1)?;
|
||||
self.next_index = max(self.next_index, index + 1);
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get a leaf from the specified tree index
|
||||
fn get(&self, index: usize) -> Result<H::Fr> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
}
|
||||
Ok(self.get_node(self.depth, index))
|
||||
}
|
||||
|
||||
/// Returns the indices of the leaves that are empty
|
||||
fn get_empty_leaves_indices(&self) -> Vec<usize> {
|
||||
self.cached_leaves_indices
|
||||
.iter()
|
||||
@@ -160,16 +166,16 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Sets multiple leaves from the specified tree index
|
||||
/// Sets multiple leaves from the specified tree index
|
||||
fn set_range<I: ExactSizeIterator<Item = H::Fr>>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// check if the range is valid
|
||||
let leaves_len = leaves.len();
|
||||
if start + leaves_len > self.capacity() {
|
||||
return Err(Report::msg("provided range exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::TooManySet);
|
||||
}
|
||||
for (i, leaf) in leaves.enumerate() {
|
||||
self.nodes.insert((self.depth, start + i), leaf);
|
||||
@@ -180,13 +186,19 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn override_range<I, J>(&mut self, start: usize, leaves: I, indices: J) -> Result<()>
|
||||
/// Overrides a range of leaves while resetting specified indices to default and preserving unaffected values.
|
||||
fn override_range<I, J>(
|
||||
&mut self,
|
||||
start: usize,
|
||||
leaves: I,
|
||||
indices: J,
|
||||
) -> Result<(), ZerokitMerkleTreeError>
|
||||
where
|
||||
I: ExactSizeIterator<Item = FrOf<Self::Hasher>>,
|
||||
J: ExactSizeIterator<Item = usize>,
|
||||
{
|
||||
let indices = indices.into_iter().collect::<Vec<_>>();
|
||||
let min_index = *indices.first().unwrap();
|
||||
let min_index = *indices.first().expect("indices should not be empty");
|
||||
let leaves_vec = leaves.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let max_index = start + leaves_vec.len();
|
||||
@@ -195,7 +207,7 @@ where
|
||||
|
||||
for i in min_index..start {
|
||||
if !indices.contains(&i) {
|
||||
let value = self.get_leaf(i);
|
||||
let value = self.get(i)?;
|
||||
set_values[i - min_index] = value;
|
||||
}
|
||||
}
|
||||
@@ -209,17 +221,16 @@ where
|
||||
}
|
||||
|
||||
self.set_range(start, set_values.into_iter())
|
||||
.map_err(|e| Report::msg(e.to_string()))
|
||||
}
|
||||
|
||||
// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: H::Fr) -> Result<()> {
|
||||
/// Sets a leaf at the next available index
|
||||
fn update_next(&mut self, leaf: H::Fr) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.set(self.next_index, leaf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<()> {
|
||||
/// Deletes a leaf at a certain index by setting it to its default value (next_index is not updated)
|
||||
fn delete(&mut self, index: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// We reset the leaf only if we previously set a leaf at that index
|
||||
if index < self.next_index {
|
||||
self.set(index, H::default_leaf())?;
|
||||
@@ -228,17 +239,20 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Computes a merkle proof the leaf at the specified index
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof> {
|
||||
/// Computes a merkle proof the leaf at the specified index
|
||||
fn proof(&self, index: usize) -> Result<Self::Proof, ZerokitMerkleTreeError> {
|
||||
if index >= self.capacity() {
|
||||
return Err(Report::msg("index exceeds set size"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidLeaf);
|
||||
}
|
||||
let mut witness = Vec::<(H::Fr, u8)>::with_capacity(self.depth);
|
||||
let mut i = index;
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
i ^= 1;
|
||||
witness.push((self.get_node(depth, i), (1 - (i & 1)).try_into().unwrap()));
|
||||
witness.push((
|
||||
self.get_node(depth, i),
|
||||
(1 - (i & 1)).try_into().expect("0 or 1 expected"),
|
||||
));
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
if depth == 0 {
|
||||
@@ -246,130 +260,100 @@ where
|
||||
}
|
||||
}
|
||||
if i != 0 {
|
||||
Err(Report::msg("i != 0"))
|
||||
Err(ZerokitMerkleTreeError::ComputingProofError)
|
||||
} else {
|
||||
Ok(OptimalMerkleProof(witness))
|
||||
}
|
||||
}
|
||||
|
||||
// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool> {
|
||||
/// Verifies a Merkle proof with respect to the input leaf and the tree root
|
||||
fn verify(&self, leaf: &H::Fr, witness: &Self::Proof) -> Result<bool, ZerokitMerkleTreeError> {
|
||||
if witness.length() != self.depth {
|
||||
return Err(Report::msg("witness length doesn't match tree depth"));
|
||||
return Err(ZerokitMerkleTreeError::InvalidWitness);
|
||||
}
|
||||
let expected_root = witness.compute_root_from(leaf);
|
||||
Ok(expected_root.eq(&self.root()))
|
||||
}
|
||||
|
||||
fn compute_root(&mut self) -> Result<FrOf<Self::Hasher>> {
|
||||
self.recalculate_from(0)?;
|
||||
Ok(self.root())
|
||||
}
|
||||
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<()> {
|
||||
fn set_metadata(&mut self, metadata: &[u8]) -> Result<(), ZerokitMerkleTreeError> {
|
||||
self.metadata = metadata.to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn metadata(&self) -> Result<Vec<u8>> {
|
||||
fn metadata(&self) -> Result<Vec<u8>, ZerokitMerkleTreeError> {
|
||||
Ok(self.metadata.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
// Utilities for updating the tree nodes
|
||||
impl<H: Hasher> OptimalMerkleTree<H>
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
// Utilities for updating the tree nodes
|
||||
|
||||
/// Returns the value of a node at a specific (depth, index).
|
||||
/// Falls back to a cached default if the node hasn't been set.
|
||||
fn get_node(&self, depth: usize, index: usize) -> H::Fr {
|
||||
let node = *self
|
||||
*self
|
||||
.nodes
|
||||
.get(&(depth, index))
|
||||
.unwrap_or_else(|| &self.cached_nodes[depth]);
|
||||
node
|
||||
.unwrap_or(&self.cached_nodes[depth])
|
||||
}
|
||||
|
||||
pub fn get_leaf(&self, index: usize) -> H::Fr {
|
||||
self.get_node(self.depth, index)
|
||||
}
|
||||
|
||||
fn hash_couple(&mut self, depth: usize, index: usize) -> H::Fr {
|
||||
/// Computes the hash of a node’s two children at the given depth.
|
||||
/// If the index is odd, it is rounded down to the nearest even index.
|
||||
fn hash_couple(&self, depth: usize, index: usize) -> H::Fr {
|
||||
let b = index & !1;
|
||||
H::hash(&[self.get_node(depth, b), self.get_node(depth, b + 1)])
|
||||
}
|
||||
|
||||
fn recalculate_from(&mut self, index: usize) -> Result<()> {
|
||||
let mut i = index;
|
||||
let mut depth = self.depth;
|
||||
loop {
|
||||
let h = self.hash_couple(depth, i);
|
||||
i >>= 1;
|
||||
depth -= 1;
|
||||
self.nodes.insert((depth, i), h);
|
||||
self.cached_leaves_indices[index] = 1;
|
||||
if depth == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if depth != 0 {
|
||||
return Err(Report::msg("did not reach the depth"));
|
||||
}
|
||||
if i != 0 {
|
||||
return Err(Report::msg("did not go through all indexes"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update hashes after some leaves have been set or updated
|
||||
/// index - first leaf index (which has been set or updated)
|
||||
/// length - number of elements set or updated
|
||||
fn update_hashes(&mut self, index: usize, length: usize) -> Result<()> {
|
||||
// parent depth & index (used to store in the tree)
|
||||
let mut parent_depth = self.depth - 1; // tree depth (or leaves depth) - 1
|
||||
let mut parent_index = index >> 1;
|
||||
let mut parent_index_bak = parent_index;
|
||||
// maximum index at this depth
|
||||
let parent_max_index_0 = (1 << parent_depth) / 2;
|
||||
// Based on given length (number of elements we will update)
|
||||
// we could restrict the parent_max_index
|
||||
let current_index_max = if (index + length) % 2 == 0 {
|
||||
index + length + 2
|
||||
} else {
|
||||
index + length + 1
|
||||
};
|
||||
let mut parent_max_index = min(current_index_max >> 1, parent_max_index_0);
|
||||
|
||||
// current depth & index (used to compute the hash)
|
||||
// current depth initially == tree depth (or leaves depth)
|
||||
/// Updates parent hashes after modifying a range of leaf nodes.
|
||||
///
|
||||
/// - `start`: Starting leaf index that was updated.
|
||||
/// - `length`: Number of consecutive leaves that were updated.
|
||||
fn update_hashes(&mut self, start: usize, length: usize) -> Result<(), ZerokitMerkleTreeError> {
|
||||
// Start at the leaf level
|
||||
let mut current_depth = self.depth;
|
||||
let mut current_index = if index % 2 == 0 { index } else { index - 1 };
|
||||
let mut current_index_bak = current_index;
|
||||
|
||||
loop {
|
||||
// Hash 2 values at (current depth, current_index) & (current_depth, current_index + 1)
|
||||
let n_hash = self.hash_couple(current_depth, current_index);
|
||||
// Insert this hash at (parent_depth, parent_index)
|
||||
self.nodes.insert((parent_depth, parent_index), n_hash);
|
||||
// Round down to include the left sibling in the pair (if start is odd)
|
||||
let mut current_index = start & !1;
|
||||
|
||||
if parent_depth == 0 {
|
||||
// We just set the root hash of the tree - nothing to do anymore
|
||||
break;
|
||||
}
|
||||
// Incr parent index
|
||||
parent_index += 1;
|
||||
// Incr current index (+2 because we've just hashed current index & current_index + 1)
|
||||
current_index += 2;
|
||||
if parent_index >= parent_max_index {
|
||||
// reset (aka decr depth & reset indexes)
|
||||
parent_depth -= 1;
|
||||
parent_index = parent_index_bak >> 1;
|
||||
parent_index_bak = parent_index;
|
||||
parent_max_index >>= 1;
|
||||
current_depth -= 1;
|
||||
current_index = current_index_bak >> 1;
|
||||
current_index_bak = current_index;
|
||||
// Compute the max index at this level, round up to include the last updated leaf’s right sibling (if start + length is odd)
|
||||
let mut current_index_max = (start + length + 1) & !1;
|
||||
|
||||
// Traverse from the leaf level up to the root
|
||||
while current_depth > 0 {
|
||||
// Compute the parent level (one level above the current)
|
||||
let parent_depth = current_depth - 1;
|
||||
|
||||
// Use parallel processing when the number of pairs exceeds the threshold
|
||||
if current_index_max - current_index >= MIN_PARALLEL_NODES {
|
||||
let updates: Vec<((usize, usize), H::Fr)> = (current_index..current_index_max)
|
||||
.step_by(2)
|
||||
.collect::<Vec<_>>()
|
||||
.into_par_iter()
|
||||
.map(|index| {
|
||||
// Hash two child nodes at positions (current_depth, index) and (current_depth, index + 1)
|
||||
let hash = self.hash_couple(current_depth, index);
|
||||
// Return the computed parent hash and its position at
|
||||
((parent_depth, index >> 1), hash)
|
||||
})
|
||||
.collect();
|
||||
|
||||
for (parent, hash) in updates {
|
||||
self.nodes.insert(parent, hash);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, fallback to sequential update for small ranges
|
||||
for index in (current_index..current_index_max).step_by(2) {
|
||||
let hash = self.hash_couple(current_depth, index);
|
||||
self.nodes.insert((parent_depth, index >> 1), hash);
|
||||
}
|
||||
}
|
||||
|
||||
// Move up one level in the tree
|
||||
current_index >>= 1;
|
||||
current_index_max = (current_index_max + 1) >> 1;
|
||||
current_depth -= 1;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -383,7 +367,7 @@ where
|
||||
type Index = u8;
|
||||
type Hasher = H;
|
||||
|
||||
// Returns the length of a Merkle proof
|
||||
/// Returns the length of a Merkle proof
|
||||
fn length(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
pub mod sled_adapter;
|
||||
pub use self::sled_adapter::*;
|
||||
pub use self::sled_adapter::SledDB;
|
||||
pub use pmtree;
|
||||
pub use sled::*;
|
||||
pub use sled::{Config, Mode};
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
pub mod poseidon_hash;
|
||||
pub use self::poseidon_hash::*;
|
||||
pub use poseidon_hash::Poseidon;
|
||||
|
||||
pub mod poseidon_constants;
|
||||
pub use self::poseidon_constants::*;
|
||||
|
||||
@@ -6,7 +6,7 @@ pub mod test {
|
||||
use tiny_keccak::{Hasher as _, Keccak};
|
||||
use zerokit_utils::{
|
||||
FullMerkleConfig, FullMerkleTree, Hasher, OptimalMerkleConfig, OptimalMerkleTree,
|
||||
ZerokitMerkleProof, ZerokitMerkleTree,
|
||||
ZerokitMerkleProof, ZerokitMerkleTree, MIN_PARALLEL_NODES,
|
||||
};
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
struct Keccak256;
|
||||
@@ -42,7 +42,7 @@ pub mod test {
|
||||
type Err = std::string::FromUtf8Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(TestFr(s.as_bytes().try_into().unwrap()))
|
||||
Ok(TestFr(s.as_bytes().try_into().expect("Invalid length")))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ pub mod test {
|
||||
fn from(value: u32) -> Self {
|
||||
let mut bytes: Vec<u8> = vec![0; 28];
|
||||
bytes.extend_from_slice(&value.to_be_bytes());
|
||||
TestFr(bytes.as_slice().try_into().unwrap())
|
||||
TestFr(bytes.as_slice().try_into().expect("Invalid length"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,12 +58,12 @@ pub mod test {
|
||||
|
||||
fn default_full_merkle_tree(depth: usize) -> FullMerkleTree<Keccak256> {
|
||||
FullMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), FullMerkleConfig::default())
|
||||
.unwrap()
|
||||
.expect("Failed to create FullMerkleTree")
|
||||
}
|
||||
|
||||
fn default_optimal_merkle_tree(depth: usize) -> OptimalMerkleTree<Keccak256> {
|
||||
OptimalMerkleTree::<Keccak256>::new(depth, TestFr([0; 32]), OptimalMerkleConfig::default())
|
||||
.unwrap()
|
||||
.expect("Failed to create OptimalMerkleTree")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -83,21 +83,101 @@ pub mod test {
|
||||
let nof_leaves = 4;
|
||||
let leaves: Vec<TestFr> = (1..=nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree.root(), default_tree_root);
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree_full.root(), default_tree_root);
|
||||
for i in 0..nof_leaves {
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree.root(), roots[i]);
|
||||
tree_full.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
assert_eq!(tree_full.root(), roots[i]);
|
||||
}
|
||||
|
||||
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree.root(), default_tree_root);
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
assert_eq!(tree_opt.root(), default_tree_root);
|
||||
for i in 0..nof_leaves {
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
assert_eq!(tree.root(), roots[i]);
|
||||
tree_opt.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
assert_eq!(tree_opt.root(), roots[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_range() {
|
||||
let depth = 4;
|
||||
let leaves: Vec<TestFr> = (0..(1 << depth) as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(depth);
|
||||
let root_before = tree_full.root();
|
||||
tree_full
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
let root_after = tree_full.root();
|
||||
assert_ne!(root_before, root_after);
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
let root_before = tree_opt.root();
|
||||
tree_opt
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
let root_after = tree_opt.root();
|
||||
assert_ne!(root_before, root_after);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_next() {
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
|
||||
for i in 0..4 {
|
||||
let leaf = TestFr::from(i as u32);
|
||||
tree_full.update_next(leaf).expect("Failed to update leaf");
|
||||
tree_opt.update_next(leaf).expect("Failed to update leaf");
|
||||
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), leaf);
|
||||
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), leaf);
|
||||
}
|
||||
|
||||
assert_eq!(tree_full.leaves_set(), 4);
|
||||
assert_eq!(tree_opt.leaves_set(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_and_reset() {
|
||||
let index = 1;
|
||||
let original_leaf = TestFr::from(42);
|
||||
let new_leaf = TestFr::from(99);
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_full
|
||||
.set(index, original_leaf)
|
||||
.expect("Failed to set leaf");
|
||||
let root_with_original = tree_full.root();
|
||||
|
||||
tree_full.delete(index).expect("Failed to delete leaf");
|
||||
let root_after_delete = tree_full.root();
|
||||
assert_ne!(root_with_original, root_after_delete);
|
||||
|
||||
tree_full.set(index, new_leaf).expect("Failed to set leaf");
|
||||
let root_after_reset = tree_full.root();
|
||||
|
||||
assert_ne!(root_after_delete, root_after_reset);
|
||||
assert_ne!(root_with_original, root_after_reset);
|
||||
assert_eq!(tree_full.get(index).expect("Failed to get leaf"), new_leaf);
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_opt
|
||||
.set(index, original_leaf)
|
||||
.expect("Failed to set leaf");
|
||||
let root_with_original = tree_opt.root();
|
||||
|
||||
tree_opt.delete(index).expect("Failed to delete leaf");
|
||||
let root_after_delete = tree_opt.root();
|
||||
assert_ne!(root_with_original, root_after_delete);
|
||||
|
||||
tree_opt.set(index, new_leaf).expect("Failed to set leaf");
|
||||
let root_after_reset = tree_opt.root();
|
||||
|
||||
assert_ne!(root_after_delete, root_after_reset);
|
||||
assert_ne!(root_with_original, root_after_reset);
|
||||
assert_eq!(tree_opt.get(index).expect("Failed to get leaf"), new_leaf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_empty_leaves_indices() {
|
||||
let depth = 4;
|
||||
@@ -123,31 +203,29 @@ pub mod test {
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
|
||||
// Check situation when the number of items to insert is less than the number of items to delete
|
||||
// check situation when the number of items to insert is less than the number of items to delete
|
||||
tree_full
|
||||
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
.expect("Failed to override range");
|
||||
|
||||
// check if the indexes for write and delete are the same
|
||||
tree_full
|
||||
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![]);
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), Vec::<usize>::new());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_full
|
||||
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree_full
|
||||
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_full.get_empty_leaves_indices(), vec![0, 1]);
|
||||
|
||||
//// Optimal Merkle Tree Trest
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
let _ = tree_opt.set_range(0, leaves.clone().into_iter());
|
||||
assert!(tree_opt.get_empty_leaves_indices().is_empty());
|
||||
@@ -164,27 +242,27 @@ pub mod test {
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec_idxs);
|
||||
}
|
||||
|
||||
// Check situation when the number of items to insert is less than the number of items to delete
|
||||
// check situation when the number of items to insert is less than the number of items to delete
|
||||
tree_opt
|
||||
.override_range(0, leaves_2.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
.expect("Failed to override range");
|
||||
|
||||
// check if the indexes for write and delete are the same
|
||||
tree_opt
|
||||
.override_range(0, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![]);
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), Vec::<usize>::new());
|
||||
|
||||
// check if indexes for deletion are before indexes for overwriting
|
||||
tree_opt
|
||||
.override_range(4, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1, 2, 3]);
|
||||
|
||||
// check if the indices for write and delete do not overlap completely
|
||||
tree_opt
|
||||
.override_range(2, leaves_4.clone().into_iter(), [0, 1, 2, 3].into_iter())
|
||||
.unwrap();
|
||||
.expect("Failed to override range");
|
||||
assert_eq!(tree_opt.get_empty_leaves_indices(), vec![0, 1]);
|
||||
}
|
||||
|
||||
@@ -194,18 +272,25 @@ pub mod test {
|
||||
let nof_leaves: usize = 4;
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree_full = default_optimal_merkle_tree(depth);
|
||||
let mut tree_full = default_full_merkle_tree(depth);
|
||||
let _ = tree_full.set_range(0, leaves.iter().cloned());
|
||||
|
||||
for i in 0..nof_leaves {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree_full.get(i).unwrap(),
|
||||
tree_full.get_subtree_root(depth, i).unwrap()
|
||||
tree_full.get(i).expect("Failed to get leaf"),
|
||||
tree_full
|
||||
.get_subtree_root(depth, i)
|
||||
.expect("Failed to get subtree root")
|
||||
);
|
||||
|
||||
// check root
|
||||
assert_eq!(tree_full.root(), tree_full.get_subtree_root(0, i).unwrap());
|
||||
assert_eq!(
|
||||
tree_full.root(),
|
||||
tree_full
|
||||
.get_subtree_root(0, i)
|
||||
.expect("Failed to get subtree root")
|
||||
);
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
@@ -215,26 +300,39 @@ pub mod test {
|
||||
let idx_r = (i + 1) * (1 << (depth - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree_full.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree_full.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree_full.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
let prev_l = tree_full
|
||||
.get_subtree_root(n, idx_l)
|
||||
.expect("Failed to get subtree root");
|
||||
let prev_r = tree_full
|
||||
.get_subtree_root(n, idx_r)
|
||||
.expect("Failed to get subtree root");
|
||||
let subroot = tree_full
|
||||
.get_subtree_root(n - 1, idx_sr)
|
||||
.expect("Failed to get subtree root");
|
||||
|
||||
// check intermediate nodes
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
|
||||
}
|
||||
}
|
||||
|
||||
let mut tree_opt = default_full_merkle_tree(depth);
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
let _ = tree_opt.set_range(0, leaves.iter().cloned());
|
||||
|
||||
for i in 0..nof_leaves {
|
||||
// check leaves
|
||||
assert_eq!(
|
||||
tree_opt.get(i).unwrap(),
|
||||
tree_opt.get_subtree_root(depth, i).unwrap()
|
||||
tree_opt.get(i).expect("Failed to get leaf"),
|
||||
tree_opt
|
||||
.get_subtree_root(depth, i)
|
||||
.expect("Failed to get subtree root")
|
||||
);
|
||||
// check root
|
||||
assert_eq!(tree_opt.root(), tree_opt.get_subtree_root(0, i).unwrap());
|
||||
assert_eq!(
|
||||
tree_opt.root(),
|
||||
tree_opt
|
||||
.get_subtree_root(0, i)
|
||||
.expect("Failed to get subtree root")
|
||||
);
|
||||
}
|
||||
|
||||
// check intermediate nodes
|
||||
@@ -244,9 +342,15 @@ pub mod test {
|
||||
let idx_r = (i + 1) * (1 << (depth - n));
|
||||
let idx_sr = idx_l;
|
||||
|
||||
let prev_l = tree_opt.get_subtree_root(n, idx_l).unwrap();
|
||||
let prev_r = tree_opt.get_subtree_root(n, idx_r).unwrap();
|
||||
let subroot = tree_opt.get_subtree_root(n - 1, idx_sr).unwrap();
|
||||
let prev_l = tree_opt
|
||||
.get_subtree_root(n, idx_l)
|
||||
.expect("Failed to get subtree root");
|
||||
let prev_r = tree_opt
|
||||
.get_subtree_root(n, idx_r)
|
||||
.expect("Failed to get subtree root");
|
||||
let subroot = tree_opt
|
||||
.get_subtree_root(n - 1, idx_sr)
|
||||
.expect("Failed to get subtree root");
|
||||
|
||||
// check intermediate nodes
|
||||
assert_eq!(Keccak256::hash(&[prev_l, prev_r]), subroot);
|
||||
@@ -259,61 +363,83 @@ pub mod test {
|
||||
let nof_leaves = 4;
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
// We thest the FullMerkleTree implementation
|
||||
let mut tree = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
// We test the FullMerkleTree implementation
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
for i in 0..nof_leaves {
|
||||
// We set the leaves
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
tree_full.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree.proof(i).expect("index should be set");
|
||||
let proof = tree_full.proof(i).expect("Failed to compute proof");
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree.verify(&leaves[i], &proof).unwrap());
|
||||
assert!(tree_full
|
||||
.verify(&leaves[i], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree_full.root());
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree.verify(&leaves[(i + 1) % nof_leaves], &proof).unwrap());
|
||||
assert!(!tree_full
|
||||
.verify(&leaves[(i + 1) % nof_leaves], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
}
|
||||
|
||||
// We test the OptimalMerkleTree implementation
|
||||
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
for i in 0..nof_leaves {
|
||||
// We set the leaves
|
||||
tree.set(i, leaves[i]).unwrap();
|
||||
tree_opt.set(i, leaves[i]).expect("Failed to set leaf");
|
||||
|
||||
// We compute a merkle proof
|
||||
let proof = tree.proof(i).expect("index should be set");
|
||||
let proof = tree_opt.proof(i).expect("Failed to compute proof");
|
||||
|
||||
// We verify if the merkle proof corresponds to the right leaf index
|
||||
assert_eq!(proof.leaf_index(), i);
|
||||
|
||||
// We verify the proof
|
||||
assert!(tree.verify(&leaves[i], &proof).unwrap());
|
||||
assert!(tree_opt
|
||||
.verify(&leaves[i], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
|
||||
// We ensure that the Merkle proof and the leaf generate the same root as the tree
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree.root());
|
||||
assert_eq!(proof.compute_root_from(&leaves[i]), tree_opt.root());
|
||||
|
||||
// We check that the proof is not valid for another leaf
|
||||
assert!(!tree.verify(&leaves[(i + 1) % nof_leaves], &proof).unwrap());
|
||||
assert!(!tree_opt
|
||||
.verify(&leaves[(i + 1) % nof_leaves], &proof)
|
||||
.expect("Failed to verify proof"));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_fail() {
|
||||
let tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
let tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
|
||||
let invalid_leaf = TestFr::from(12345);
|
||||
|
||||
let proof_full = tree_full.proof(0).expect("Failed to compute proof");
|
||||
let proof_opt = tree_opt.proof(0).expect("Failed to compute proof");
|
||||
|
||||
// Should fail because no leaf was set
|
||||
assert!(!tree_full
|
||||
.verify(&invalid_leaf, &proof_full)
|
||||
.expect("Failed to verify proof"));
|
||||
assert!(!tree_opt
|
||||
.verify(&invalid_leaf, &proof_opt)
|
||||
.expect("Failed to verify proof"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_override_range() {
|
||||
let nof_leaves = 4;
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
|
||||
let mut tree = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
|
||||
// We set the leaves
|
||||
tree.set_range(0, leaves.iter().cloned()).unwrap();
|
||||
|
||||
let new_leaves = [
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
hex!("0000000000000000000000000000000000000000000000000000000000000006"),
|
||||
@@ -322,17 +448,70 @@ pub mod test {
|
||||
|
||||
let to_delete_indices: [usize; 2] = [0, 1];
|
||||
|
||||
// We override the leaves
|
||||
tree.override_range(
|
||||
0, // start from the end of the initial leaves
|
||||
new_leaves.iter().cloned(),
|
||||
to_delete_indices.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
let mut tree_full = default_full_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_full
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
|
||||
tree_full
|
||||
.override_range(
|
||||
0,
|
||||
new_leaves.iter().cloned(),
|
||||
to_delete_indices.iter().cloned(),
|
||||
)
|
||||
.expect("Failed to override range");
|
||||
|
||||
// ensure that the leaves are set correctly
|
||||
for (i, &new_leaf) in new_leaves.iter().enumerate() {
|
||||
assert_eq!(tree.get_leaf(i), new_leaf);
|
||||
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), new_leaf);
|
||||
}
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(DEFAULT_DEPTH);
|
||||
tree_opt
|
||||
.set_range(0, leaves.iter().cloned())
|
||||
.expect("Failed to set leaves");
|
||||
|
||||
tree_opt
|
||||
.override_range(
|
||||
0,
|
||||
new_leaves.iter().cloned(),
|
||||
to_delete_indices.iter().cloned(),
|
||||
)
|
||||
.expect("Failed to override range");
|
||||
|
||||
for (i, &new_leaf) in new_leaves.iter().enumerate() {
|
||||
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), new_leaf);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_override_range_parallel_triggered() {
|
||||
let depth = 13;
|
||||
let nof_leaves = 8192;
|
||||
|
||||
// number of leaves larger than MIN_PARALLEL_NODES to trigger parallel hashing
|
||||
assert!(MIN_PARALLEL_NODES < nof_leaves);
|
||||
|
||||
let leaves: Vec<TestFr> = (0..nof_leaves as u32).map(TestFr::from).collect();
|
||||
let indices: Vec<usize> = (0..nof_leaves).collect();
|
||||
|
||||
let mut tree_full = default_full_merkle_tree(depth);
|
||||
|
||||
tree_full
|
||||
.override_range(0, leaves.iter().cloned(), indices.iter().cloned())
|
||||
.expect("Failed to override range");
|
||||
|
||||
for (i, &leaf) in leaves.iter().enumerate() {
|
||||
assert_eq!(tree_full.get(i).expect("Failed to get leaf"), leaf);
|
||||
}
|
||||
|
||||
let mut tree_opt = default_optimal_merkle_tree(depth);
|
||||
|
||||
tree_opt
|
||||
.override_range(0, leaves.iter().cloned(), indices.iter().cloned())
|
||||
.expect("Failed to override range");
|
||||
|
||||
for (i, &leaf) in leaves.iter().enumerate() {
|
||||
assert_eq!(tree_opt.get(i).expect("Failed to get leaf"), leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user