Compare commits

...

10 Commits

Author SHA1 Message Date
github-actions[bot]
ffbb4ad2f9 ci: update version string in docs 2025-02-05 23:01:51 +00:00
dante
20ac99fdbf fix: ezkl-gpu install 2025-02-05 18:01:26 -05:00
dante
532fa65e93 fix: patch python release pipeline for v4 2025-02-05 17:59:35 -05:00
dante
cfe5db545c fix: npm and pypi releases 2025-02-05 17:26:36 -05:00
dante
21ad56aea1 refactor: serial lookup commits for metal (#928) 2025-02-05 16:54:12 -05:00
dante
4ed7e0fd29 fix: use variable len domain for poseidon (#927) 2025-02-05 16:52:28 -05:00
dante
05d1f10615 docs: advanced security notices (#926)
---------

Co-authored-by: jason <jason.morton@gmail.com>
2025-02-05 15:14:29 +00:00
dante
9a8c754e45 fix: use onnx convention when integer dividing (#925) 2025-02-05 09:32:44 +00:00
dante
d82766d413 fix: force prover det on argmax/min for collisions (#923) 2025-02-04 12:08:34 +00:00
dante
820a80122b fix: range-check graph input and outputs (#921) 2025-02-04 02:33:27 +00:00
43 changed files with 915 additions and 682 deletions

View File

@@ -47,6 +47,10 @@ jobs:
curl -L https://github.com/WebAssembly/binaryen/releases/download/version_116/binaryen-version_116-x86_64-linux.tar.gz | tar xzf -
export PATH=$PATH:$PWD/binaryen-version_116/bin
wasm-opt --version
- name: Build wasm files for both web and nodejs compilation targets
run: |
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
wasm-pack build --release --target web --out-dir ./pkg/web . -- -Z build-std="panic_abort,std" --features web
- name: Create package.json in pkg folder
shell: bash
run: |

View File

@@ -73,10 +73,10 @@ jobs:
- name: Install built wheel
if: matrix.target == 'x86_64'
run: |
pip install ezkl-gpu --no-index --find-links dist --force-reinstall
pip install ezkl --no-index --find-links dist --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wheels
path: dist

View File

@@ -73,9 +73,9 @@ jobs:
python -c "import ezkl"
- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wheels
name: dist-macos-${{ matrix.target }}
path: dist
windows:
@@ -130,9 +130,9 @@ jobs:
python -c "import ezkl"
- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wheels
name: dist-windows-${{ matrix.target }}
path: dist
linux:
@@ -203,9 +203,9 @@ jobs:
python -c "import ezkl"
- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wheels
name: dist-linux-${{ matrix.target }}
path: dist
musllinux:
@@ -271,9 +271,9 @@ jobs:
python3 -c "import ezkl"
- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wheels
name: name: dist-musllinux-${{ matrix.target }}
path: dist
musllinux-cross:
@@ -334,9 +334,9 @@ jobs:
python3 -c "import ezkl"
- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wheels
name: dist-musllinux-${{ matrix.platform.target }}
path: dist
pypi-publish:
@@ -349,7 +349,9 @@ jobs:
steps:
- uses: actions/download-artifact@v3
with:
name: wheels
pattern: dist-*
merge-multiple: true
path: dist
- name: List Files
run: ls -R

View File

@@ -20,6 +20,7 @@ env:
jobs:
fr-age-test:
needs: [build, library-tests, docs, python-tests, python-integration-tests]
permissions:
contents: read
runs-on: large-self-hosted
@@ -32,8 +33,12 @@ jobs:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: fr age Mock
run: cargo test --release --verbose tests::large_mock_::large_tests_6_expects -- --include-ignored
run: cargo nextest run --release --verbose tests::large_mock_::large_tests_6_expects -- --include-ignored
build:
permissions:
@@ -158,13 +163,13 @@ jobs:
# - name: Conv overflow (wasi)
# run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
- name: lookup overflow
run: cargo nextest run --release lookup_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
run: cargo nextest run --release lookup_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
- name: Matmul overflow
run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
run: RUST_LOG=debug cargo nextest run --release matmul_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
- name: Conv overflow
run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
run: RUST_LOG=debug cargo nextest run --release conv_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
- name: Conv + relu overflow
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
ultra-overflow-tests:
permissions:
@@ -195,13 +200,13 @@ jobs:
# - name: Conv overflow (wasi)
# run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
- name: lookup overflow
run: cargo nextest run --release lookup_ultra_overflow --no-capture -- --include-ignored
run: cargo nextest run lookup_ultra_overflow --no-capture -- --include-ignored
- name: Matmul overflow
run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture -- --include-ignored
- name: Conv overflow
run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture -- --include-ignored
run: RUST_LOG=debug cargo nextest run --release conv_col_ultra_overflow --no-capture -- --include-ignored
- name: Conv + relu overflow
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture -- --include-ignored
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture -- --include-ignored
model-serialization:
permissions:
@@ -226,7 +231,7 @@ jobs:
wasm32-tests:
permissions:
contents: read
runs-on: ubuntu-latest
runs-on: non-gpu
steps:
- uses: actions/checkout@v4
with:
@@ -256,7 +261,6 @@ jobs:
permissions:
contents: read
runs-on: non-gpu
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
- uses: actions/checkout@v4
with:
@@ -271,53 +275,53 @@ jobs:
crate: cargo-nextest
locked: true
# - name: The Worm Mock
# run: cargo nextest run --release --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
- name: public outputs and bounded lookup log
run: cargo nextest run --release --verbose tests::mock_bounded_lookup_log --test-threads 32
- name: public outputs and tolerance > 0
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 16
- name: kzg inputs
run: cargo nextest run --release --verbose tests::mock_kzg_input_::t --test-threads 32
- name: kzg params
run: cargo nextest run --release --verbose tests::mock_kzg_params_::t --test-threads 32
- name: kzg outputs
run: cargo nextest run --release --verbose tests::mock_kzg_output_::t --test-threads 32
- name: kzg inputs + params + outputs
run: cargo nextest run --release --verbose tests::mock_kzg_all_::t --test-threads 32
- name: Mock fixed inputs
run: cargo nextest run --release --verbose tests::mock_fixed_inputs_ --test-threads 32
- name: Mock fixed outputs
run: cargo nextest run --release --verbose tests::mock_fixed_outputs --test-threads 32
- name: Mock accuracy calibration
run: cargo nextest run --release --verbose tests::mock_accuracy_cal_tests::a
- name: hashed inputs
run: cargo nextest run --release --verbose tests::mock_hashed_input_::t --test-threads 32
- name: hashed params
run: cargo nextest run --release --verbose tests::mock_hashed_params_::t --test-threads 32
- name: hashed params public inputs
run: cargo nextest run --release --verbose tests::mock_hashed_params_public_inputs_::t --test-threads 32
- name: hashed outputs
run: cargo nextest run --release --verbose tests::mock_hashed_output_::t --test-threads 32
- name: hashed inputs + params + outputs
run: cargo nextest run --release --verbose tests::mock_hashed_all_::t --test-threads 32
- name: hashed inputs + fixed params
run: cargo nextest run --release --verbose tests::mock_hashed_output_fixed_params_::t --test-threads 32
# run: cargo nextest run --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
- name: MNIST Gan Mock
run: cargo nextest run --release --verbose tests::large_mock_::large_tests_4_expects -- --include-ignored
run: cargo nextest run --verbose tests::large_mock_::large_tests_4_expects -- --include-ignored
- name: NanoGPT Mock
run: cargo nextest run --release --verbose tests::large_mock_::large_tests_1_expects -- --include-ignored
run: cargo nextest run --verbose tests::large_mock_::large_tests_1_expects -- --include-ignored
- name: Self Attention Mock
run: cargo nextest run --release --verbose tests::large_mock_::large_tests_0_expects -- --include-ignored
run: cargo nextest run --verbose tests::large_mock_::large_tests_0_expects -- --include-ignored
- name: Multihead Attention Mock
run: cargo nextest run --release --verbose tests::large_mock_::large_tests_2_expects -- --include-ignored
run: cargo nextest run --verbose tests::large_mock_::large_tests_2_expects -- --include-ignored
- name: public outputs
run: cargo nextest run --release --verbose tests::mock_public_outputs_ --test-threads 32
run: cargo nextest run --verbose tests::mock_public_outputs_ --test-threads 32
- name: public inputs
run: cargo nextest run --release --verbose tests::mock_public_inputs_ --test-threads 32
run: cargo nextest run --verbose tests::mock_public_inputs_ --test-threads 32
- name: fixed params
run: cargo nextest run --release --verbose tests::mock_fixed_params_ --test-threads 32
run: cargo nextest run --verbose tests::mock_fixed_params_ --test-threads 32
- name: public outputs and bounded lookup log
run: cargo nextest run --verbose tests::mock_bounded_lookup_log --test-threads 32
- name: public outputs and tolerance > 0
run: cargo nextest run --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10
run: cargo nextest run --verbose tests::mock_large_batch_public_outputs_ --test-threads 16
- name: kzg inputs
run: cargo nextest run --verbose tests::mock_kzg_input_::t --test-threads 32
- name: kzg params
run: cargo nextest run --verbose tests::mock_kzg_params_::t --test-threads 32
- name: kzg outputs
run: cargo nextest run --verbose tests::mock_kzg_output_::t --test-threads 32
- name: kzg inputs + params + outputs
run: cargo nextest run --verbose tests::mock_kzg_all_::t --test-threads 32
- name: Mock fixed inputs
run: cargo nextest run --verbose tests::mock_fixed_inputs_ --test-threads 32
- name: Mock fixed outputs
run: cargo nextest run --verbose tests::mock_fixed_outputs --test-threads 32
- name: Mock accuracy calibration
run: cargo nextest run --verbose tests::mock_accuracy_cal_tests::a
- name: hashed inputs
run: cargo nextest run --verbose tests::mock_hashed_input_::t --test-threads 32
- name: hashed params
run: cargo nextest run --verbose tests::mock_hashed_params_::t --test-threads 32
- name: hashed params public inputs
run: cargo nextest run --verbose tests::mock_hashed_params_public_inputs_::t --test-threads 32
- name: hashed outputs
run: cargo nextest run --verbose tests::mock_hashed_output_::t --test-threads 32
- name: hashed inputs + params + outputs
run: cargo nextest run --verbose tests::mock_hashed_all_::t --test-threads 32
- name: hashed inputs + fixed params
run: cargo nextest run --verbose tests::mock_hashed_output_fixed_params_::t --test-threads 32
prove-and-verify-evm-tests:
permissions:
@@ -360,7 +364,7 @@ jobs:
NODE_ENV: development
- name: Build wasm package for nodejs target.
run: |
wasm-pack build --release --target nodejs --out-dir ./in-browser-evm-verifier/nodejs . -- -Z build-std="panic_abort,std"
wasm-pack build --target nodejs --out-dir ./in-browser-evm-verifier/nodejs . -- -Z build-std="panic_abort,std"
- name: Replace memory definition in nodejs
run: |
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" in-browser-evm-verifier/nodejs/ezkl.js
@@ -374,35 +378,35 @@ jobs:
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify tests (EVM + reusable verifier + col-overflow)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + kzg inputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_input_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_kzg_input_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + kzg params)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_params_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain outputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_output_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_output_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs & outputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_output_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_output_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs & kzg outputs + params)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_kzg_output_kzg_params_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_kzg_output_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain outputs & kzg inputs + params)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_output_kzg_input_kzg_params_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_output_kzg_input_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain all kzg)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_all_kzg_params_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_all_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs & outputs hashes)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_output_hashed_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_output_hashed_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + hashed inputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_hashed_input_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_hashed_input_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + hashed params)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_hashed_params_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_hashed_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + hashed outputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_hashed_output_prove_and_verify --test-threads 1
run: cargo nextest run --verbose tests_evm::kzg_evm_hashed_output_prove_and_verify --test-threads 1
# prove-and-verify-tests-metal:
# permissions:
@@ -436,7 +440,7 @@ jobs:
# crate: cargo-nextest
# locked: true
# - name: KZG prove and verify tests (public outputs)
# run: cargo nextest run --release --features macos-metal --verbose tests::kzg_prove_and_verify_::t --no-capture
# run: cargo nextest run --features macos-metal --verbose tests::kzg_prove_and_verify_::t --no-capture
prove-and-verify-tests:
permissions:
@@ -485,40 +489,40 @@ jobs:
locked: true
- name: Build wasm package for nodejs target.
run: |
wasm-pack build --release --target nodejs --out-dir ./tests/wasm/nodejs . -- -Z build-std="panic_abort,std"
wasm-pack build --target nodejs --out-dir ./tests/wasm/nodejs . -- -Z build-std="panic_abort,std"
- name: Replace memory definition in nodejs
run: |
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" tests/wasm/nodejs/ezkl.js
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w
run: cargo nextest run --verbose tests::kzg_prove_and_verify_with_overflow_::w
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_
run: cargo nextest run --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_
- name: KZG prove and verify tests (hashed inputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_hashed_inputs_
run: cargo nextest run --verbose tests::kzg_prove_and_verify_with_overflow_hashed_inputs_
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_tight_lookup_::t
run: cargo nextest run --verbose tests::kzg_prove_and_verify_tight_lookup_::t
- name: IPA prove and verify tests
run: cargo nextest run --release --verbose tests::ipa_prove_and_verify_::t --test-threads 1
run: cargo nextest run --verbose tests::ipa_prove_and_verify_::t --test-threads 1
- name: IPA prove and verify tests (ipa outputs)
run: cargo nextest run --release --verbose tests::ipa_prove_and_verify_ipa_output
run: cargo nextest run --verbose tests::ipa_prove_and_verify_ipa_output
- name: KZG prove and verify tests single inner col
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_single_col
run: cargo nextest run --verbose tests::kzg_prove_and_verify_single_col
- name: KZG prove and verify tests triple inner col
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_triple_col
run: cargo nextest run --verbose tests::kzg_prove_and_verify_triple_col
- name: KZG prove and verify tests quadruple inner col
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_quadruple_col
run: cargo nextest run --verbose tests::kzg_prove_and_verify_quadruple_col
- name: KZG prove and verify tests octuple inner col
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_octuple_col --test-threads 8
run: cargo nextest run --verbose tests::kzg_prove_and_verify_octuple_col --test-threads 8
- name: KZG prove and verify tests (kzg outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output
run: cargo nextest run --verbose tests::kzg_prove_and_verify_kzg_output
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t
run: cargo nextest run --verbose tests::kzg_prove_and_verify_::t
- name: KZG prove and verify tests (public inputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input
run: cargo nextest run --verbose tests::kzg_prove_and_verify_public_input
- name: KZG prove and verify tests (fixed params)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params
run: cargo nextest run --verbose tests::kzg_prove_and_verify_fixed_params
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed
run: cargo nextest run --verbose tests::kzg_prove_and_verify_hashed
# prove-and-verify-tests-gpu:
# runs-on: GPU
@@ -541,21 +545,21 @@ jobs:
# crate: cargo-nextest
# locked: true
# - name: KZG prove and verify tests (kzg outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + fixed params + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public inputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
# - name: KZG prove and verify tests (fixed params)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
# - name: KZG prove and verify tests (hashed outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
# run: cargo nextest run --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-mock-aggr-tests:
permissions:
@@ -576,7 +580,7 @@ jobs:
crate: cargo-nextest
locked: true
- name: Mock aggr tests (KZG)
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_mock_prove_and_verify_ --test-threads 8
run: cargo nextest run --verbose tests_aggr::kzg_aggr_mock_prove_and_verify_ --test-threads 8
# prove-and-verify-aggr-tests-gpu:
# runs-on: GPU
@@ -617,7 +621,7 @@ jobs:
crate: cargo-nextest
locked: true
- name: KZG tests
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored
run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored
prove-and-verify-aggr-evm-tests:
permissions:
@@ -642,7 +646,7 @@ jobs:
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify aggr tests
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
run: cargo nextest run --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
examples:
permissions:
@@ -691,7 +695,7 @@ jobs:
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --profile=test-runs
- name: Run pytest
run: source .env/bin/activate; pip install pytest-asyncio; pytest -vv
@@ -719,15 +723,15 @@ jobs:
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --profile=test-runs
- name: Public inputs
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_public_inputs_
run: source .env/bin/activate; cargo nextest run --verbose tests::accuracy_measurement_public_inputs_
- name: fixed params
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_fixed_params_
run: source .env/bin/activate; cargo nextest run --verbose tests::accuracy_measurement_fixed_params_
- name: Public outputs
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_public_outputs_
run: source .env/bin/activate; cargo nextest run --verbose tests::accuracy_measurement_public_outputs_
- name: Public outputs + resources
run: source .env/bin/activate; cargo nextest run --release --verbose tests::resources_accuracy_measurement_public_outputs_
run: source .env/bin/activate; cargo nextest run --verbose tests::resources_accuracy_measurement_public_outputs_
python-integration-tests:
permissions:
@@ -776,7 +780,9 @@ jobs:
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt; python -m ensurepip --upgrade
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --profile=test-runs
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --test-threads 1
- name: Voice tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: Neural bow
@@ -796,8 +802,6 @@ jobs:
# # now dump the contents of the file into a file called kaggle.json
# echo $KAGGLE_API_KEY > /home/ubuntu/.kaggle/kaggle.json
# chmod 600 /home/ubuntu/.kaggle/kaggle.json
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --test-threads 1
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
# - name: Reusable verifier tutorial

8
Cargo.lock generated
View File

@@ -944,7 +944,7 @@ dependencies = [
"bitflags 2.5.0",
"cexpr",
"clang-sys",
"itertools 0.12.1",
"itertools 0.11.0",
"lazy_static",
"lazycell",
"log",
@@ -2397,7 +2397,7 @@ dependencies = [
[[package]]
name = "halo2_gadgets"
version = "0.2.0"
source = "git+https://github.com/zkonduit/halo2#d7ecad83c7439fa1cb450ee4a89c2d0b45604ceb"
source = "git+https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d"
dependencies = [
"arrayvec 0.7.4",
"bitvec",
@@ -2414,7 +2414,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.3.0"
source = "git+https://github.com/zkonduit/halo2#bf9d0057a82443be48c4779bbe14961c18fb5996#bf9d0057a82443be48c4779bbe14961c18fb5996"
source = "git+https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d#f441c920be45f8f05d2c06a173d82e8885a5ed4d"
dependencies = [
"bincode",
"blake2b_simd",
@@ -3139,7 +3139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
"windows-targets 0.52.6",
"windows-targets 0.48.5",
]
[[package]]

View File

@@ -276,10 +276,11 @@ macos-metal = ["halo2_proofs/macos"]
ios-metal = ["halo2_proofs/ios"]
[patch.'https://github.com/zkonduit/halo2']
halo2_proofs = { git = "https://github.com/zkonduit/halo2#bf9d0057a82443be48c4779bbe14961c18fb5996", package = "halo2_proofs" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d", package = "halo2_proofs" }
[patch.'https://github.com/zkonduit/halo2#0654e92bdf725fd44d849bfef3643870a8c7d50b']
halo2_proofs = { git = "https://github.com/zkonduit/halo2#bf9d0057a82443be48c4779bbe14961c18fb5996", package = "halo2_proofs" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d", package = "halo2_proofs" }
[patch.crates-io]
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
@@ -288,9 +289,13 @@ uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "fea
rustflags = ["-C", "relocation-model=pic"]
lto = "fat"
codegen-units = 1
# panic = "abort"
#panic = "abort"
[profile.test-runs]
inherits = "dev"
opt-level = 3
[package.metadata.wasm-pack.profile.release]
wasm-opt = [
"-O4",

View File

@@ -150,6 +150,13 @@ Ezkl is unaudited, beta software undergoing rapid development. There may be bugs
> NOTE: Because operations are quantized when they are converted from an onnx file to a zk-circuit, outputs in python and ezkl may differ slightly.
### Advanced security topics
Check out `docs/advanced_security` for more advanced information on potential threat vectors.
### no warranty
Copyright (c) 2024 Zkonduit Inc. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

View File

@@ -0,0 +1,41 @@
## EZKL Security Note: Public Commitments and Low-Entropy Data
> **Disclaimer:** this a more technical post that requires some prior knowledge of how ZK proving systems like Halo2 operate, and in particular in how these APIs are constructed. For background reading we highly recommend the [Halo2 book](https://zcash.github.io/halo2/) and [Halo2 Club](https://halo2.club/).
## Overview of commitments in EZKL
A common design pattern in a zero knowledge (zk) application is thus:
- A prover has some data which is used within a circuit.
- This data, as it may be high-dimensional or somewhat private, is pre-committed to using some hash function.
- The zk-circuit which forms the core of the application then proves (para-phrasing) a statement of the form:
>"I know some data D which when hashed corresponds to the pre-committed to value H + whatever else the circuit is proving over D".
From our own experience, we've implemented such patterns using snark-friendly hash functions like [Poseidon](https://www.poseidon-hash.info/), for which there is a relatively well vetted [implementation](https://docs.rs/halo2_gadgets/latest/halo2_gadgets/poseidon/index.html) in Halo2. Even then these hash functions can introduce lots of overhead and can be very expensive to generate proofs for if the dimensionality of the data D is large.
You can also implement such a pattern using Halo2's `Fixed` columns _if the privacy preservation of the pre-image is not necessary_. These are Halo2 columns (i.e in reality just polynomials) that are left unblinded (unlike the blinded `Advice` columns), and whose commitments are shared with the verifier by way of the verifying key for the application's zk-circuit. These commitments are much lower cost to generate than implementing a hashing function, such as Poseidon, within a circuit.
> **Note:** Blinding is the process whereby a certain set of the final elements (i.e rows) of a Halo2 column are set to random field elements. This is the mechanism by which Halo2 achieves its zero knowledge properties for `Advice` columns. By contrast `Fixed` columns aren't zero-knowledge in that they are vulnerable to dictionary attacks in the same manner a hash function is. Given some set of known or popular data D an attacker can attempt to recover the pre-image of a hash by running D through the hash function to see if the outputs match a public commitment. These attacks aren't "possible" on blinded `Advice` columns.
> **Further Note:** Note that without blinding, with access to `M` proofs, each of which contains an evaluation of the polynomial at a different point, an attacker can more easily recover a non blinded column's pre-image. This is because each proof generates a new query and evaluation of the polynomial represented by the column and as such with repetition a clearer picture can emerge of the column's pre-image. Thus unblinded columns should only be used for privacy preservation, in the manner of a hash, if the number of proofs generated against a fixed set of values is limited. More formally if M independent and _unique_ queries are generated; if M is equal to the degree + 1 of the polynomial represented by the column (i.e the unique lagrange interpolation of the values in the columns), then the column's pre-image can be recovered. As such as the logrows K increases, the more queries are required to recover the pre-image (as 2^K unique queries are required). This assumes that the entries in the column are not structured, as if they are then the number of queries required to recover the pre-image is reduced (eg. if all rows above a certain point are known to be nil).
The annoyance in using `Fixed` columns comes from the fact that they require generating a new verifying key every time a new set of commitments is generated.
> **Example:** Say for instance an application leverages a zero-knowledge circuit to prove the correct execution of a neural network. Every week the neural network is finetuned or retrained on new data. If the architecture remains the same then commiting to the new network parameters, along with a new proof of performance on a test set, would be an ideal setup. If we leverage `Fixed` columns to commit to the model parameters, each new commitment will require re-generating a verifying key and sharing the new key with the verifier(s). This is not-ideal UX and can become expensive if the verifier is deployed on-chain.
An ideal commitment would thus have the low cost of a `Fixed` column but wouldn't require regenerating a new verifying key for each new commitment.
### Unblinded Advice Columns
A first step in designing such a commitment is to allow for optionally unblinded `Advice` columns within the Halo2 API. These won't be included in the verifying key, AND are blinded with a constant factor `1` -- such that if someone knows the pre-image to the commitment, they can recover it by running it through the corresponding polynomial commitment scheme (in ezkl's case [KZG commitments](https://dankradfeist.de/ethereum/2020/06/16/kate-polynomial-commitments.html)).
This is implemented using the `polycommit` visibility parameter in the ezkl API.
## The Vulnerability of Public Commitments
Public commitments in EZKL (both Poseidon-hashed inputs and KZG commitments) can be vulnerable to brute-force attacks when input data has low entropy. A malicious actor could reveal committed data by searching through possible input values, compromising privacy in applications like anonymous credentials. This is particularly relevant when input data comes from known finite sets (e.g., names, dates).
Example Risk: In an anonymous credential system using EZKL for ID verification, an attacker could match hashed outputs against a database of common identifying information to deanonymize users.

View File

@@ -0,0 +1,22 @@
# EZKL Security Note: Quantization-Induced Model Backdoors
> Note: this only affects a situation where a party separate to an application's developer has access to the model's weights and can modify them. This is a common scenario in adversarial machine learning research, but can be less common in real-world applications. If you're building your models in house and deploying them yourself, this is less of a concern. If you're building a permisionless system where anyone can submit models, this is more of a concern.
Models processed through EZKL's quantization step can harbor backdoors that are dormant in the original full-precision model but activate during quantization. These backdoors force specific outputs when triggered, with impact varying by application.
Key Factors:
- Larger models increase attack feasibility through more parameter capacity
- Smaller quantization scales facilitate attacks by allowing greater weight modifications
- Rebase ratio of 1 enables exploitation of convolutional layer consistency
Limitations:
- Attack effectiveness depends on calibration settings and internal rescaling operations.
- Further research needed on backdoor persistence through witness/proof stages.
- Can be mitigated by evaluating the quantized model (using `ezkl gen-witness`), rather than relying on the evaluation of the original model.
References:
1. [Quantization Backdoors to Deep Learning Commercial Frameworks (Ma et al., 2021)](https://arxiv.org/abs/2108.09187)
2. [Planting Undetectable Backdoors in Machine Learning Models (Goldwasser et al., 2022)](https://arxiv.org/abs/2204.06974)

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '0.0.0'
release = '19.0.2'
version = release

View File

@@ -77,6 +77,7 @@
"outputs": [],
"source": [
"gip_run_args = ezkl.PyRunArgs()\n",
"gip_run_args.ignore_range_check_inputs_outputs = True\n",
"gip_run_args.input_visibility = \"polycommit\" # matrix and generalized inverse commitments\n",
"gip_run_args.output_visibility = \"fixed\" # no parameters used\n",
"gip_run_args.param_visibility = \"fixed\" # should be Tensor(True)"
@@ -335,9 +336,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -308,8 +308,11 @@
"compiled_filename = os.path.join('lol.compiled')\n",
"settings_filename = os.path.join('settings.json')\n",
"\n",
"run_args = ezkl.PyRunArgs()\n",
"run_args.decomp_legs = 4\n",
"\n",
"# Generate settings using ezkl\n",
"res = ezkl.gen_settings(onnx_filename, settings_filename)\n",
"res = ezkl.gen_settings(onnx_filename, settings_filename, py_run_args=run_args)\n",
"\n",
"assert res == True\n",
"\n",

View File

@@ -152,9 +152,11 @@
"metadata": {},
"outputs": [],
"source": [
"!RUST_LOG=trace\n",
"# TODO: Dictionary outputs\n",
"res = ezkl.gen_settings(model_path, settings_path)\n",
"run_args = ezkl.PyRunArgs()\n",
"# logrows\n",
"run_args.logrows = 20\n",
"\n",
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
"assert res == True\n"
]
},
@@ -302,7 +304,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
"version": "3.9.13"
}
},
"nbformat": 4,

View File

@@ -167,6 +167,8 @@
"run_args = ezkl.PyRunArgs()\n",
"# \"hashed/private\" means that the output of the hashing is not visible to the verifier and is instead fed into the computational graph\n",
"run_args.input_visibility = \"hashed/private/0\"\n",
"# as the inputs are felts we turn off input range checks\n",
"run_args.ignore_range_check_inputs_outputs = True\n",
"# we set it to fix the set we want to check membership for\n",
"run_args.param_visibility = \"fixed\"\n",
"# the output is public -- set membership fails if it is not = 0\n",
@@ -519,4 +521,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -204,6 +204,7 @@
"run_args = ezkl.PyRunArgs()\n",
"# \"polycommit\" means that the output of the hashing is not visible to the verifier and is instead fed into the computational graph\n",
"run_args.input_visibility = \"polycommit\"\n",
"run_args.ignore_range_check_inputs_outputs = True\n",
"# the parameters are public\n",
"run_args.param_visibility = \"fixed\"\n",
"# the output is public (this is the inequality test)\n",
@@ -514,4 +515,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -20,7 +20,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -60,7 +60,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -94,7 +94,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -134,7 +134,7 @@
},
{
"cell_type": "code",
"execution_count": 44,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -183,7 +183,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -201,6 +201,7 @@
"run_args.input_visibility = \"public\"\n",
"run_args.param_visibility = \"private\"\n",
"run_args.output_visibility = \"public\"\n",
"run_args.decomp_legs=6\n",
"run_args.num_inner_cols = 1\n",
"run_args.variables = [(\"batch_size\", 1)]"
]

View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x // 3
circuit = MyModel()
x = torch.randint(0, 10, (1, 2, 2, 8))
out = circuit(x)
print(x)
print(out)
print(x/3)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[3, 4, 0, 9, 2, 6, 2, 5, 1, 5, 3, 5, 5, 7, 0, 2, 6, 1, 4, 4, 1, 9, 7, 7, 5, 8, 2, 0, 1, 5, 9, 8]]}

Binary file not shown.

View File

@@ -8,7 +8,6 @@ use crate::circuit::InputType;
use crate::circuit::{CheckMode, Tolerance};
use crate::commands::*;
use crate::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::TestDataSource;
use crate::graph::{
quantize_float, scale_to_multiplier, GraphCircuit, GraphSettings, Model, Visibility,
@@ -207,6 +206,9 @@ struct PyRunArgs {
/// bool: Should the circuit use unbounded lookups for log
#[pyo3(get, set)]
pub bounded_log_lookup: bool,
/// bool: Should the circuit use range checks for inputs and outputs (set to false if the input is a felt)
#[pyo3(get, set)]
pub ignore_range_check_inputs_outputs: bool,
}
/// default instantiation of PyRunArgs
@@ -239,6 +241,7 @@ impl From<PyRunArgs> for RunArgs {
commitment: Some(py_run_args.commitment.into()),
decomp_base: py_run_args.decomp_base,
decomp_legs: py_run_args.decomp_legs,
ignore_range_check_inputs_outputs: py_run_args.ignore_range_check_inputs_outputs,
}
}
}
@@ -263,6 +266,7 @@ impl Into<PyRunArgs> for RunArgs {
commitment: self.commitment.into(),
decomp_base: self.decomp_base,
decomp_legs: self.decomp_legs,
ignore_range_check_inputs_outputs: self.ignore_range_check_inputs_outputs,
}
}
}
@@ -573,10 +577,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
.map(crate::pfsys::string_to_field::<Fr>)
.collect::<Vec<_>>();
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
.map_err(|_| PyIOError::new_err("Failed to run poseidon"))?;
let hash = output[0]

View File

@@ -8,10 +8,7 @@ use crate::{
Module,
},
fieldutils::{felt_to_integer_rep, integer_rep_to_felt},
graph::{
modules::POSEIDON_LEN_GRAPH, quantize_float, scale_to_multiplier, GraphCircuit,
GraphSettings,
},
graph::{quantize_float, scale_to_multiplier, GraphCircuit, GraphSettings},
};
use console_error_panic_hook;
use halo2_proofs::{
@@ -231,10 +228,7 @@ pub fn poseidonHash(
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
.map_err(|e| JsError::new(&format!("{}", e)))?;
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(

View File

@@ -8,13 +8,11 @@ pub mod poseidon_params;
pub mod spec;
// This chip adds a set of advice columns to the gadget Chip to store the inputs of the hash
use halo2_gadgets::poseidon::{primitives::*, Hash, Pow5Chip, Pow5Config};
use halo2_proofs::arithmetic::Field;
use halo2_gadgets::poseidon::{
primitives::VariableLength, primitives::*, Hash, Pow5Chip, Pow5Config,
};
use halo2_proofs::halo2curves::bn256::Fr as Fp;
use halo2_proofs::{circuit::*, plonk::*};
// use maybe_rayon::prelude::{IndexedParallelIterator, IntoParallelRefIterator};
use maybe_rayon::prelude::ParallelIterator;
use maybe_rayon::slice::ParallelSlice;
use std::marker::PhantomData;
@@ -40,22 +38,17 @@ pub struct PoseidonConfig<const WIDTH: usize, const RATE: usize> {
pub pow5_config: Pow5Config<Fp, WIDTH, RATE>,
}
type InputAssignments = (Vec<AssignedCell<Fp, Fp>>, AssignedCell<Fp, Fp>);
type InputAssignments = Vec<AssignedCell<Fp, Fp>>;
/// PoseidonChip is a wrapper around the Pow5Chip that adds a set of advice columns to the gadget Chip to store the inputs of the hash
#[derive(Debug, Clone)]
pub struct PoseidonChip<
S: Spec<Fp, WIDTH, RATE> + Sync,
const WIDTH: usize,
const RATE: usize,
const L: usize,
> {
pub struct PoseidonChip<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize> {
config: PoseidonConfig<WIDTH, RATE>,
_marker: PhantomData<S>,
}
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize>
PoseidonChip<S, WIDTH, RATE, L>
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize>
PoseidonChip<S, WIDTH, RATE>
{
/// Creates a new PoseidonChip
pub fn configure_with_cols(
@@ -82,8 +75,8 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
}
}
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize>
PoseidonChip<S, WIDTH, RATE, L>
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize>
PoseidonChip<S, WIDTH, RATE>
{
/// Configuration of the PoseidonChip
pub fn configure_with_optional_instance(
@@ -113,8 +106,8 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
}
}
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize>
Module<Fp> for PoseidonChip<S, WIDTH, RATE, L>
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize> Module<Fp>
for PoseidonChip<S, WIDTH, RATE>
{
type Config = PoseidonConfig<WIDTH, RATE>;
type InputAssignments = InputAssignments;
@@ -183,95 +176,81 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
let res = layouter.assign_region(
|| "load message",
|mut region| {
let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, ModuleError> =
match &message {
ValTensor::Value { inner: v, .. } => {
v.iter()
.enumerate()
.map(|(i, value)| {
let x = i % WIDTH;
let y = i / WIDTH;
let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, _> = match &message {
ValTensor::Value { inner: v, .. } => v
.iter()
.enumerate()
.map(|(i, value)| {
let x = i % WIDTH;
let y = i / WIDTH;
match value {
ValType::Value(v) => region
.assign_advice(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
|| *v,
)
.map_err(|e| e.into()),
ValType::PrevAssigned(v)
| ValType::AssignedConstant(v, ..) => Ok(v.clone()),
ValType::Constant(f) => {
if local_constants.contains_key(f) {
Ok(constants
.get(f)
.unwrap()
.assigned_cell()
.ok_or(ModuleError::ConstantNotAssigned)?)
} else {
let res = region.assign_advice_from_constant(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
*f,
)?;
constants.insert(
*f,
ValType::AssignedConstant(res.clone(), *f),
);
Ok(res)
}
}
e => Err(ModuleError::WrongInputType(
format!("{:?}", e),
"AssignedValue".to_string(),
)),
}
})
.collect()
}
ValTensor::Instance {
dims,
inner: col,
idx,
initial_offset,
..
} => {
// this should never ever fail
let num_elems = dims[*idx].iter().product::<usize>();
(0..num_elems)
.map(|i| {
let x = i % WIDTH;
let y = i / WIDTH;
region.assign_advice_from_instance(
|| "pub input anchor",
*col,
initial_offset + i,
match value {
ValType::Value(v) => region
.assign_advice(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
|| *v,
)
})
.collect::<Result<Vec<_>, _>>()
.map_err(|e| e.into())
}
};
.map_err(|e| e.into()),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
Ok(v.clone())
}
ValType::Constant(f) => {
if local_constants.contains_key(f) {
Ok(constants
.get(f)
.unwrap()
.assigned_cell()
.ok_or(ModuleError::ConstantNotAssigned)?)
} else {
let res = region.assign_advice_from_constant(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
*f,
)?;
let offset = message.len() / WIDTH + 1;
constants
.insert(*f, ValType::AssignedConstant(res.clone(), *f));
let zero_val = region
.assign_advice_from_constant(
|| "",
self.config.hash_inputs[0],
offset,
Fp::ZERO,
)
.unwrap();
Ok(res)
}
}
e => Err(ModuleError::WrongInputType(
format!("{:?}", e),
"AssignedValue".to_string(),
)),
}
})
.collect(),
ValTensor::Instance {
dims,
inner: col,
idx,
initial_offset,
..
} => {
// this should never ever fail
let num_elems = dims[*idx].iter().product::<usize>();
(0..num_elems)
.map(|i| {
let x = i % WIDTH;
let y = i / WIDTH;
region.assign_advice_from_instance(
|| "pub input anchor",
*col,
initial_offset + i,
self.config.hash_inputs[x],
y,
)
})
.collect::<Result<Vec<_>, _>>()
.map_err(|e| e.into())
}
};
Ok((assigned_message?, zero_val))
Ok(assigned_message?)
},
);
log::trace!(
@@ -292,7 +271,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
row_offset: usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<ValTensor<Fp>, ModuleError> {
let (mut input_cells, zero_val) = self.layout_inputs(layouter, input, constants)?;
let input_cells = self.layout_inputs(layouter, input, constants)?;
// empty hash case
if input_cells.is_empty() {
@@ -306,52 +285,25 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
let start_time = instant::Instant::now();
let mut one_iter = false;
// do the Tree dance baby
while input_cells.len() > 1 || !one_iter {
let hashes: Result<Vec<AssignedCell<Fp, Fp>>, ModuleError> = input_cells
.chunks(L)
.enumerate()
.map(|(i, block)| {
let _start_time = instant::Instant::now();
let pow5_chip = Pow5Chip::construct(self.config.pow5_config.clone());
// initialize the hasher
let hasher = Hash::<_, _, S, VariableLength, WIDTH, RATE>::init(
pow5_chip,
layouter.namespace(|| "block_hasher"),
)?;
let mut block = block.to_vec();
let remainder = block.len() % L;
if remainder != 0 {
block.extend(vec![zero_val.clone(); L - remainder]);
}
let pow5_chip = Pow5Chip::construct(self.config.pow5_config.clone());
// initialize the hasher
let hasher = Hash::<_, _, S, ConstantLength<L>, WIDTH, RATE>::init(
pow5_chip,
layouter.namespace(|| "block_hasher"),
)?;
let hash = hasher.hash(
layouter.namespace(|| "hash"),
block.to_vec().try_into().map_err(|_| Error::Synthesis)?,
);
if i == 0 {
log::trace!("block (L={:?}) took: {:?}", L, _start_time.elapsed());
}
hash
})
.collect::<Result<Vec<_>, _>>()
.map_err(|e| e.into());
log::trace!("hashes (N={:?}) took: {:?}", len, start_time.elapsed());
one_iter = true;
input_cells = hashes?;
}
let hash: AssignedCell<Fp, Fp> = hasher.hash(
layouter.namespace(|| "hash"),
input_cells
.to_vec()
.try_into()
.map_err(|_| Error::Synthesis)?,
)?;
let duration = start_time.elapsed();
log::trace!("layout (N={:?}) took: {:?}", len, duration);
let result = Tensor::from(input_cells.iter().map(|e| ValType::from(e.clone())));
let result = Tensor::from(vec![ValType::from(hash.clone())].into_iter());
let output = match result[0].clone() {
ValType::PrevAssigned(v) => v,
@@ -390,69 +342,59 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
///
fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, ModuleError> {
let mut hash_inputs = message;
let len = hash_inputs.len();
let len = message.len();
if len == 0 {
return Ok(vec![vec![]]);
}
let start_time = instant::Instant::now();
let mut one_iter = false;
// do the Tree dance baby
while hash_inputs.len() > 1 || !one_iter {
let hashes: Vec<Fp> = hash_inputs
.par_chunks(L)
.map(|block| {
let mut block = block.to_vec();
let remainder = block.len() % L;
if remainder != 0 {
block.extend(vec![Fp::ZERO; L - remainder].iter());
}
let block_len = block.len();
let message = block
.try_into()
.map_err(|_| ModuleError::InputWrongLength(block_len))?;
Ok(halo2_gadgets::poseidon::primitives::Hash::<
_,
S,
ConstantLength<L>,
{ WIDTH },
{ RATE },
>::init()
.hash(message))
})
.collect::<Result<Vec<_>, ModuleError>>()?;
one_iter = true;
hash_inputs = hashes;
}
let hash = halo2_gadgets::poseidon::primitives::Hash::<
_,
S,
VariableLength,
{ WIDTH },
{ RATE },
>::init()
.hash(message);
let duration = start_time.elapsed();
log::trace!("run (N={:?}) took: {:?}", len, duration);
Ok(vec![hash_inputs])
Ok(vec![vec![hash]])
}
fn num_rows(mut input_len: usize) -> usize {
fn num_rows(input_len: usize) -> usize {
// this was determined by running the circuit and looking at the number of constraints
// in the test called hash_for_a_range_of_input_sizes, then regressing in python to find the slope
let fixed_cost: usize = 41 * L;
// import numpy as np
// from scipy import stats
let mut num_rows = 0;
// x = np.array([32, 64, 96, 128, 160, 192])
// y = np.array([1298, 2594, 3890, 5186, 6482, 7778])
loop {
// the number of times the input_len is divisible by L
let num_chunks = input_len / L + 1;
num_rows += num_chunks * fixed_cost;
if num_chunks == 1 {
break;
}
input_len = num_chunks;
}
// slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
num_rows
// print(f"slope: {slope}")
// print(f"intercept: {intercept}")
// print(f"R^2: {r_value**2}")
// # Predict for any x
// def predict(x):
// return slope * x + intercept
// # Test prediction
// test_x = 256
// print(f"Predicted value for x={test_x}: {predict(test_x)}")
// our output:
// slope: 40.5
// intercept: 2.0
// R^2: 1.0
// Predicted value for x=256: 10370.0
let fixed_cost: usize = 41 * input_len;
// the cost of the hash function is linear with the number of inputs
fixed_cost + 2
}
}
@@ -479,12 +421,12 @@ mod tests {
const RATE: usize = POSEIDON_RATE;
const R: usize = 240;
struct HashCircuit<S: Spec<Fp, WIDTH, RATE>, const L: usize> {
struct HashCircuit<S: Spec<Fp, WIDTH, RATE>> {
message: ValTensor<Fp>,
_spec: PhantomData<S>,
}
impl<S: Spec<Fp, WIDTH, RATE>, const L: usize> Circuit<Fp> for HashCircuit<S, L> {
impl<S: Spec<Fp, WIDTH, RATE>> Circuit<Fp> for HashCircuit<S> {
type Config = PoseidonConfig<WIDTH, RATE>;
type FloorPlanner = ModulePlanner;
type Params = ();
@@ -500,7 +442,7 @@ mod tests {
}
fn configure(meta: &mut ConstraintSystem<Fp>) -> PoseidonConfig<WIDTH, RATE> {
PoseidonChip::<PoseidonSpec, WIDTH, RATE, L>::configure(meta, ())
PoseidonChip::<PoseidonSpec, WIDTH, RATE>::configure(meta, ())
}
fn synthesize(
@@ -508,7 +450,7 @@ mod tests {
config: PoseidonConfig<WIDTH, RATE>,
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
let chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, L> = PoseidonChip::new(config);
let chip: PoseidonChip<PoseidonSpec, WIDTH, RATE> = PoseidonChip::new(config);
chip.layout(
&mut layouter,
&[self.message.clone()],
@@ -523,15 +465,15 @@ mod tests {
#[test]
fn poseidon_hash_empty() {
let message = [];
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 2>::run(message.to_vec()).unwrap();
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.to_vec()).unwrap();
let mut message: Tensor<ValType<Fp>> =
message.into_iter().map(|m| Value::known(m).into()).into();
let k = 9;
let circuit = HashCircuit::<PoseidonSpec, 2> {
let circuit = HashCircuit::<PoseidonSpec> {
message: message.into(),
_spec: PhantomData,
};
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap();
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, vec![vec![]]).unwrap();
assert_eq!(prover.verify(), Ok(()))
}
@@ -540,13 +482,13 @@ mod tests {
let rng = rand::rngs::OsRng;
let message = [Fp::random(rng), Fp::random(rng)];
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 2>::run(message.to_vec()).unwrap();
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.to_vec()).unwrap();
let mut message: Tensor<ValType<Fp>> =
message.into_iter().map(|m| Value::known(m).into()).into();
let k = 9;
let circuit = HashCircuit::<PoseidonSpec, 2> {
let circuit = HashCircuit::<PoseidonSpec> {
message: message.into(),
_spec: PhantomData,
};
@@ -559,13 +501,13 @@ mod tests {
let rng = rand::rngs::OsRng;
let message = [Fp::random(rng), Fp::random(rng), Fp::random(rng)];
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 3>::run(message.to_vec()).unwrap();
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.to_vec()).unwrap();
let mut message: Tensor<ValType<Fp>> =
message.into_iter().map(|m| Value::known(m).into()).into();
let k = 9;
let circuit = HashCircuit::<PoseidonSpec, 3> {
let circuit = HashCircuit::<PoseidonSpec> {
message: message.into(),
_spec: PhantomData,
};
@@ -581,23 +523,21 @@ mod tests {
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
{
let i = 32;
for i in (32..128).step_by(32) {
// print a bunch of new lines
println!(
log::info!(
"i is {} -------------------------------------------------",
i
);
let message: Vec<Fp> = (0..i).map(|_| Fp::random(rng)).collect::<Vec<_>>();
let output =
PoseidonChip::<PoseidonSpec, WIDTH, RATE, 32>::run(message.clone()).unwrap();
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.clone()).unwrap();
let mut message: Tensor<ValType<Fp>> =
message.into_iter().map(|m| Value::known(m).into()).into();
let k = 17;
let circuit = HashCircuit::<PoseidonSpec, 32> {
let circuit = HashCircuit::<PoseidonSpec> {
message: message.into(),
_spec: PhantomData,
};
@@ -614,13 +554,13 @@ mod tests {
let mut message: Vec<Fp> = (0..2048).map(|_| Fp::random(rng)).collect::<Vec<_>>();
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 25>::run(message.clone()).unwrap();
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.clone()).unwrap();
let mut message: Tensor<ValType<Fp>> =
message.into_iter().map(|m| Value::known(m).into()).into();
let k = 17;
let circuit = HashCircuit::<PoseidonSpec, 25> {
let circuit = HashCircuit::<PoseidonSpec> {
message: message.into(),
_spec: PhantomData,
};

View File

@@ -17,7 +17,6 @@ pub enum BaseOp {
Sub,
SumInit,
Sum,
IsBoolean,
}
/// Matches a [BaseOp] to an operation over inputs
@@ -34,7 +33,6 @@ impl BaseOp {
BaseOp::Add => a + b,
BaseOp::Sub => a - b,
BaseOp::Mult => a * b,
BaseOp::IsBoolean => b,
_ => panic!("nonaccum_f called on accumulating operation"),
}
}
@@ -74,7 +72,6 @@ impl BaseOp {
BaseOp::Mult => "MULT",
BaseOp::Sum => "SUM",
BaseOp::SumInit => "SUMINIT",
BaseOp::IsBoolean => "ISBOOLEAN",
}
}
@@ -90,7 +87,6 @@ impl BaseOp {
BaseOp::Mult => (0, 1),
BaseOp::Sum => (-1, 2),
BaseOp::SumInit => (0, 1),
BaseOp::IsBoolean => (0, 1),
}
}
@@ -106,7 +102,6 @@ impl BaseOp {
BaseOp::Mult => 2,
BaseOp::Sum => 1,
BaseOp::SumInit => 1,
BaseOp::IsBoolean => 0,
}
}
@@ -122,7 +117,6 @@ impl BaseOp {
BaseOp::SumInit => 0,
BaseOp::CumProd => 1,
BaseOp::CumProdInit => 0,
BaseOp::IsBoolean => 0,
}
}
}

View File

@@ -394,7 +394,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
nonaccum_selectors.insert((BaseOp::Add, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::Sub, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::Mult, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::IsBoolean, i, j), meta.selector());
}
}
@@ -428,24 +427,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
// Get output expressions for each input channel
let (rotation_offset, rng) = base_op.query_offset_rng();
let constraints = match base_op {
BaseOp::IsBoolean => {
let expected_output: Tensor<Expression<F>> = output
.query_rng(meta, *block_idx, *inner_col_idx, 0, 1)
.expect("non accum: output query failed");
let constraints = {
let expected_output: Tensor<Expression<F>> = output
.query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng)
.expect("non accum: output query failed");
let output = expected_output[base_op.constraint_idx()].clone();
vec![(output.clone()) * (output.clone() - Expression::Constant(F::from(1)))]
}
_ => {
let expected_output: Tensor<Expression<F>> = output
.query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng)
.expect("non accum: output query failed");
let res = base_op.nonaccum_f((qis[0].clone(), qis[1].clone()));
vec![expected_output[base_op.constraint_idx()].clone() - res]
}
let res = base_op.nonaccum_f((qis[0].clone(), qis[1].clone()));
vec![expected_output[base_op.constraint_idx()].clone() - res]
};
Constraints::with_selector(selector, constraints)

View File

@@ -106,4 +106,7 @@ pub enum CircuitError {
/// Visibility has not been set
#[error("visibility has not been set")]
UnsetVisibility,
/// A decomposition base overflowed
#[error("decomposition base overflowed")]
DecompositionBaseOverflow,
}

View File

@@ -76,7 +76,10 @@ pub enum HybridOp {
output_scale: utils::F32,
axes: Vec<usize>,
},
RangeCheck(Tolerance),
Output {
tol: Tolerance,
decomp: bool,
},
Greater,
GreaterEqual,
Less,
@@ -178,7 +181,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
input_scale, output_scale, axes
)
}
HybridOp::RangeCheck(p) => format!("RANGECHECK (tol={:?})", p),
HybridOp::Output { tol, decomp } => {
format!("OUTPUT (tol={:?}, decomp={})", tol, decomp)
}
HybridOp::Greater => "GREATER".to_string(),
HybridOp::GreaterEqual => "GREATEREQUAL".to_string(),
HybridOp::Less => "LESS".to_string(),
@@ -314,12 +319,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
*output_scale,
axes,
)?,
HybridOp::RangeCheck(tol) => layouts::range_check_percent(
HybridOp::Output { tol, decomp } => layouts::output(
config,
region,
values[..].try_into()?,
tol.scale,
tol.val,
*decomp,
)?,
HybridOp::Greater => layouts::greater(config, region, values[..].try_into()?)?,
HybridOp::GreaterEqual => {

View File

@@ -11,7 +11,6 @@ use log::{error, trace};
use maybe_rayon::{
iter::IntoParallelRefIterator,
prelude::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator},
slice::ParallelSliceMut,
};
use self::tensor::{create_constant_tensor, create_zero_tensor};
@@ -155,13 +154,15 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
.into()
};
claimed_output.reshape(input_dims)?;
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
// here we decompose and extract the sign of the input
let sign = sign(config, region, &[claimed_output.clone()])?;
// implicitly check if the prover provided output is within range
let claimed_output = identity(config, region, &[claimed_output], true)?;
// check if x is too large only if the decomp would support overflow in the previous op
if (IntegerRep::MAX).abs() < ((region.base() as i128).pow(region.legs() as u32)) - 1 {
if F::from_u128(IntegerRep::MAX as u128)
< F::from_u128(region.base() as u128).pow([region.legs() as u64]) - F::ONE
{
// here we decompose and extract the sign of the input
let sign = sign(config, region, &[claimed_output.clone()])?;
let abs_value = pairwise(
config,
region,
@@ -221,9 +222,9 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
.into()
};
claimed_output.reshape(input_dims)?;
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
// implicitly check if the prover provided output is within range
let claimed_output = identity(config, region, &[claimed_output], true)?;
// divide by input_scale
let zero_inverse_val =
tensor::ops::nonlinearities::zero_recip(felt_to_integer_rep(output_scale) as f64)[0];
@@ -254,10 +255,12 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
BaseOp::Mult,
)?;
// here we decompose and extract the sign of the input
let sign = sign(config, region, &[masked_output.clone()])?;
// check if x is too large only if the decomp would support overflow in the previous op
if (IntegerRep::MAX).abs() < ((region.base() as i128).pow(region.legs() as u32)) - 1 {
if F::from_u128(IntegerRep::MAX as u128)
< F::from_u128(region.base() as u128).pow([region.legs() as u64]) - F::ONE
{
// here we decompose and extract the sign of the input
let sign = sign(config, region, &[masked_output.clone()])?;
let abs_value = pairwise(
config,
region,
@@ -346,12 +349,8 @@ pub fn sqrt<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
.into()
};
claimed_output.reshape(input_dims)?;
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
// force the output to be positive or zero
// force the output to be positive or zero, also implicitly checks that the ouput is in range
let claimed_output = abs(config, region, &[claimed_output.clone()])?;
// rescaled input
let rescaled_input = pairwise(config, region, &[input.clone(), unit_scale], BaseOp::Mult)?;
@@ -907,11 +906,23 @@ pub fn einsum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
Ok(output)
}
#[derive(Debug, Clone, Copy)]
/// Determines how to handle collisions in sorting.
pub enum SortCollisionMode {
/// Do not sort (no rule)
Unsorted,
/// Sort by smallest index first
SmallestIndexFirst,
/// Sort by largest index first on collision
LargestIndexFirst,
}
fn _sort_ascending<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
) -> Result<ValTensor<F>, CircuitError> {
collision_handling: SortCollisionMode,
) -> Result<(ValTensor<F>, ValTensor<F>), CircuitError> {
let mut input = values[0].clone();
input.flatten();
@@ -919,7 +930,7 @@ fn _sort_ascending<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let sorted = if is_assigned {
let mut int_evals = input.int_evals()?;
int_evals.par_sort_unstable_by(|a, b| a.cmp(b));
int_evals.sort_unstable();
int_evals
.par_iter()
.map(|x| Value::known(integer_rep_to_felt(*x)))
@@ -932,21 +943,73 @@ fn _sort_ascending<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
};
let assigned_sort = region.assign(&config.custom_gates.inputs[0], &sorted.into())?;
region.increment(assigned_sort.len());
// assert that this is a permutation/shuffle
let indices = shuffles(
config,
region,
&[assigned_sort.clone()],
&[input.clone()],
collision_handling,
)?;
let window_a = assigned_sort.get_slice(&[0..assigned_sort.len() - 1])?;
let window_b = assigned_sort.get_slice(&[1..assigned_sort.len()])?;
let is_greater = greater_equal(config, region, &[window_b.clone(), window_a.clone()])?;
let unit = create_unit_tensor(is_greater.len());
let indices_a = indices.get_slice(&[0..indices.len() - 1])?;
let indices_b = indices.get_slice(&[1..indices.len()])?;
enforce_equality(config, region, &[unit, is_greater])?;
let unit = create_unit_tensor(window_a.len());
// assert that this is a permutation/shuffle
shuffles(config, region, &[assigned_sort.clone()], &[input.clone()])?;
match collision_handling {
SortCollisionMode::Unsorted => {
let is_greater = greater_equal(config, region, &[window_b.clone(), window_a.clone()])?;
enforce_equality(config, region, &[unit, is_greater])?;
}
SortCollisionMode::SmallestIndexFirst => {
let is_greater = greater(config, region, &[window_b.clone(), window_a.clone()])?;
let is_equal = equals(config, region, &[window_b.clone(), window_a.clone()])?;
let is_greater_indices =
greater(config, region, &[indices_b.clone(), indices_a.clone()])?;
Ok(assigned_sort)
let is_equal_and_is_greater_indices =
and(config, region, &[is_equal, is_greater_indices])?;
let is_greater_or_is_equal_and_is_greater_indices = or(
config,
region,
&[is_greater, is_equal_and_is_greater_indices],
)?;
enforce_equality(
config,
region,
&[unit, is_greater_or_is_equal_and_is_greater_indices],
)?;
}
SortCollisionMode::LargestIndexFirst => {
let is_greater = greater(config, region, &[window_b.clone(), window_a.clone()])?;
let is_equal = equals(config, region, &[window_b.clone(), window_a.clone()])?;
let is_lesser_indices = less(config, region, &[indices_b.clone(), indices_a.clone()])?;
let is_equal_and_is_lesser_indices =
and(config, region, &[is_equal, is_lesser_indices])?;
let is_greater_or_is_equal_and_is_greater_indices = or(
config,
region,
&[is_greater, is_equal_and_is_lesser_indices],
)?;
enforce_equality(
config,
region,
&[unit, is_greater_or_is_equal_and_is_greater_indices],
)?;
}
}
Ok((assigned_sort, indices))
}
/// Returns top K values.
@@ -957,7 +1020,7 @@ fn _select_topk<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
k: usize,
largest: bool,
) -> Result<ValTensor<F>, CircuitError> {
let mut sorted = _sort_ascending(config, region, values)?;
let mut sorted = _sort_ascending(config, region, values, SortCollisionMode::Unsorted)?.0;
if largest {
sorted.reverse()?;
}
@@ -1210,6 +1273,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
region: &mut RegionCtx<F>,
output: &[ValTensor<F>; 1],
input: &[ValTensor<F>; 1],
collision_handling: SortCollisionMode,
) -> Result<ValTensor<F>, CircuitError> {
let shuffle_index = region.shuffle_index();
let (output, input) = (output[0].clone(), input[0].clone());
@@ -1253,13 +1317,26 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
.iter()
.map(|x| {
// Find all positions of the current element
let positions: Vec<usize> = input
let mut positions: Vec<usize> = input
.iter()
.enumerate()
.filter(|(_, y)| *y == x)
.map(|(i, _)| i)
.collect();
match collision_handling {
SortCollisionMode::Unsorted => {}
SortCollisionMode::SmallestIndexFirst => {
// Sort the positions by the index of the input element
positions.sort_unstable_by(|a, b| input[*a].cmp(&input[*b]));
}
SortCollisionMode::LargestIndexFirst => {
// Sort the positions by the index of the input element
positions.reverse();
}
}
// Find the first unused position for this element
let pos = positions
.iter()
@@ -1332,7 +1409,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
region.increment_shuffle_index(1);
region.increment(output_len);
Ok(output)
Ok(claimed_index_output)
}
/// One hot accumulated layout
@@ -1801,11 +1878,18 @@ pub(crate) fn get_missing_set_elements<
region,
&[input_and_claimed_output.clone()],
&[fullset.clone()],
SortCollisionMode::Unsorted,
)?;
if ordered {
// assert that the claimed output is sorted
claimed_output = _sort_ascending(config, region, &[claimed_output])?;
claimed_output = _sort_ascending(
config,
region,
&[claimed_output],
SortCollisionMode::Unsorted,
)?
.0;
}
Ok(claimed_output)
@@ -2572,9 +2656,9 @@ pub fn mean_of_squares_axes<F: PrimeField + TensorType + PartialOrd + std::hash:
let squared = pow(config, region, values, 2)?;
let sum_squared = sum_axes(config, region, &[squared], axes)?;
let dividand: usize = values[0].len() / sum_squared.len();
let dividend: usize = values[0].len() / sum_squared.len();
let mean_squared = div(config, region, &[sum_squared], F::from(dividand as u64))?;
let mean_squared = div(config, region, &[sum_squared], F::from(dividend as u64))?;
Ok(mean_squared)
}
@@ -3923,11 +4007,24 @@ pub(crate) fn identity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
decomp: bool,
) -> Result<ValTensor<F>, CircuitError> {
let mut output = values[0].clone();
if !output.all_prev_assigned() {
output = region.assign(&config.custom_gates.output, &values[0])?;
region.increment(output.len());
// checks they are in range
if decomp {
output = decompose(
config,
region,
&[output.clone()],
&region.base(),
&region.legs(),
)?
.1;
} else {
output = region.assign(&config.custom_gates.output, &values[0])?;
region.increment(output.len());
}
}
Ok(output)
@@ -3948,23 +4045,8 @@ pub(crate) fn boolean_identity<F: PrimeField + TensorType + PartialOrd + std::ha
} else {
values[0].clone()
};
// Enable the selectors
if !region.is_dummy() {
(0..output.len())
.map(|j| {
let index = region.linear_coord() - j - 1;
let (x, y, z) = config.custom_gates.output.cartesian_coord(index);
let selector = config
.custom_gates
.selectors
.get(&(BaseOp::IsBoolean, x, y));
region.enable(selector, z)?;
Ok(())
})
.collect::<Result<Vec<_>, CircuitError>>()?;
}
range_check(config, region, values, &(0, 1))?;
Ok(output)
}
@@ -4215,9 +4297,11 @@ pub(crate) fn argmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&[values[0].clone(), assigned_argmax.clone()],
)?;
let max_val = max(config, region, &[values[0].clone()])?;
let (sorted_val, indices) =
_sort_ascending(config, region, values, SortCollisionMode::LargestIndexFirst)?;
enforce_equality(config, region, &[claimed_val, max_val])?;
enforce_equality(config, region, &[claimed_val, sorted_val.last()?])?;
enforce_equality(config, region, &[assigned_argmax.clone(), indices.last()?])?;
Ok(assigned_argmax)
}
@@ -4251,9 +4335,14 @@ pub(crate) fn argmin<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
region,
&[values[0].clone(), assigned_argmin.clone()],
)?;
let min_val = min(config, region, &[values[0].clone()])?;
enforce_equality(config, region, &[claimed_val, min_val])?;
let (min_val, indices) = _sort_ascending(
config,
region,
values,
SortCollisionMode::SmallestIndexFirst,
)?;
enforce_equality(config, region, &[claimed_val, min_val.first()?])?;
enforce_equality(config, region, &[assigned_argmin.clone(), indices.first()?])?;
Ok(assigned_argmin)
}
@@ -4368,7 +4457,11 @@ pub(crate) fn max<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
) -> Result<ValTensor<F>, CircuitError> {
Ok(_sort_ascending(config, region, values)?.last()?)
Ok(
_sort_ascending(config, region, values, SortCollisionMode::Unsorted)?
.0
.last()?,
)
}
/// min layout
@@ -4377,7 +4470,11 @@ pub(crate) fn min<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
) -> Result<ValTensor<F>, CircuitError> {
Ok(_sort_ascending(config, region, values)?.first()?)
Ok(
_sort_ascending(config, region, values, SortCollisionMode::Unsorted)?
.0
.first()?,
)
}
/// floor layout
@@ -4417,7 +4514,7 @@ pub fn floor<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
legs: usize,
) -> Result<ValTensor<F>, CircuitError> {
// decompose with base scale and then set the last element to zero
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?;
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?.0;
// set the last element to zero and then recompose, we don't actually need to assign here
// as this will automatically be assigned in the recompose function and uses the constant caching of RegionCtx
let zero = ValType::Constant(F::ZERO);
@@ -4530,7 +4627,7 @@ pub fn ceil<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
legs: usize,
) -> Result<ValTensor<F>, CircuitError> {
// decompose with base scale and then set the last element to zero
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?;
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?.0;
// set the last element to zero and then recompose, we don't actually need to assign here
// as this will automatically be assigned in the recompose function and uses the constant caching of RegionCtx
let zero = ValType::Constant(F::ZERO);
@@ -4684,7 +4781,7 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
.into()
};
claimed_output.reshape(input.dims())?;
region.assign(&config.custom_gates.output, &claimed_output)?;
let claimed_output = identity(&config, region, &[claimed_output], true)?;
region.increment(claimed_output.len());
let pow2_of_claimed_output = nonlinearity(
@@ -4930,7 +5027,7 @@ pub fn round<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
legs: usize,
) -> Result<ValTensor<F>, CircuitError> {
// decompose with base scale and then set the last element to zero
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?;
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?.0;
// set the last element to zero and then recompose, we don't actually need to assign here
// as this will automatically be assigned in the recompose function and uses the constant caching of RegionCtx
let zero = ValType::Constant(F::ZERO);
@@ -5074,7 +5171,7 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
legs: usize,
) -> Result<ValTensor<F>, CircuitError> {
// decompose with base scale and then set the last element to zero
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?;
let decomposition = decompose(config, region, values, &(scale.0 as usize), &legs)?.0;
// set the last element to zero and then recompose, we don't actually need to assign here
// as this will automatically be assigned in the recompose function and uses the constant caching of RegionCtx
let zero = ValType::Constant(F::ZERO);
@@ -5182,59 +5279,64 @@ pub(crate) fn recompose<F: PrimeField + TensorType + PartialOrd + std::hash::Has
values: &[ValTensor<F>; 1],
base: &usize,
) -> Result<ValTensor<F>, CircuitError> {
let input = values[0].clone();
let mut input = values[0].clone();
let first_dims = input.dims().to_vec()[..input.dims().len() - 1].to_vec();
let num_first_dims = first_dims.iter().product::<usize>();
let n = input.dims().last().unwrap() - 1;
let is_assigned = !input.all_prev_assigned();
if !input.all_prev_assigned() {
input = region.assign(&config.custom_gates.inputs[0], &input)?;
region.increment(input.len());
}
let bases: ValTensor<F> = Tensor::from(
(0..n)
.rev()
.map(|x| ValType::Constant(integer_rep_to_felt(base.pow(x as u32) as IntegerRep))),
)
// to force the bases to be assigned
if input.is_singleton() {
input.reshape(&[1])?;
}
let mut bases: ValTensor<F> = Tensor::from({
(0..num_first_dims)
.flat_map(|_| {
(0..n).rev().map(|x| {
let base = (*base).checked_pow(x as u32);
if let Some(base) = base {
Ok(ValType::Constant(integer_rep_to_felt(base as IntegerRep)))
} else {
Err(CircuitError::DecompositionBaseOverflow)
}
})
})
.collect::<Result<Vec<_>, CircuitError>>()?
.into_iter()
})
.into();
let mut bases_dims = first_dims.clone();
bases_dims.push(n);
bases.reshape(&bases_dims)?;
// multiply and sum the values
let mut output: Tensor<Tensor<ValType<F>>> = Tensor::new(None, &first_dims)?;
// equation needs to be constructed as ij,j->i but for arbitrary n dims we need to construct this dynamically
// indices should map in order of the alphabet
// start with lhs
let lhs = ASCII_ALPHABET.chars().take(input.dims().len()).join("");
let rhs = ASCII_ALPHABET.chars().take(input.dims().len() - 1).join("");
let cartesian_coord = first_dims
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
let equation = format!("{},{}->{}", lhs, lhs, rhs);
let inner_loop_function =
|i: usize, region: &mut RegionCtx<F>| -> Result<Tensor<ValType<F>>, CircuitError> {
let coord = cartesian_coord[i].clone();
let slice = coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>();
let mut sliced_input = input.get_slice(&slice)?;
sliced_input.flatten();
let mut sign_slice = first_dims.iter().map(|x| 0..*x).collect::<Vec<_>>();
sign_slice.push(0..1);
let mut rest_slice = first_dims.iter().map(|x| 0..*x).collect::<Vec<_>>();
rest_slice.push(1..n + 1);
if !is_assigned {
sliced_input = region.assign(&config.custom_gates.inputs[0], &sliced_input)?;
region.increment(sliced_input.len());
}
let sign = input.get_slice(&sign_slice)?;
let rest = input.get_slice(&rest_slice)?;
// get the sign bit and make sure it is valid
let sign = sliced_input.first()?;
let rest = sliced_input.get_slice(&[1..sliced_input.len()])?;
// now add the rhs
let prod_recomp = einsum(config, region, &[rest.clone(), bases], &equation)?;
let mut signed_recomp = pairwise(config, region, &[prod_recomp, sign], BaseOp::Mult)?;
signed_recomp.reshape(&first_dims)?;
let prod_decomp = dot(config, region, &[rest, bases.clone()])?;
let signed_decomp = pairwise(config, region, &[prod_decomp, sign], BaseOp::Mult)?;
Ok(signed_decomp.get_inner_tensor()?.clone())
};
region.apply_in_loop(&mut output, inner_loop_function)?;
let mut combined_output = output.combine()?;
combined_output.reshape(&first_dims)?;
Ok(combined_output.into())
Ok(signed_recomp.into())
}
pub(crate) fn decompose<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
@@ -5243,24 +5345,35 @@ pub(crate) fn decompose<F: PrimeField + TensorType + PartialOrd + std::hash::Has
values: &[ValTensor<F>; 1],
base: &usize,
n: &usize,
) -> Result<ValTensor<F>, CircuitError> {
) -> Result<(ValTensor<F>, ValTensor<F>), CircuitError> {
let mut input = values[0].clone();
let is_assigned = !input.all_prev_assigned();
if !is_assigned {
if !input.all_prev_assigned() {
input = region.assign(&config.custom_gates.inputs[0], &input)?;
}
let mut bases: ValTensor<F> = Tensor::from(
// repeat it input.len() times
(0..input.len()).flat_map(|_| {
(0..*n)
.rev()
.map(|x| ValType::Constant(integer_rep_to_felt(base.pow(x as u32) as IntegerRep)))
}),
)
// to force the bases to be assigned
if input.is_singleton() {
input.reshape(&[1])?;
}
let mut bases: ValTensor<F> = Tensor::from({
(0..input.len())
.flat_map(|_| {
(0..*n).rev().map(|x| {
let base = (*base).checked_pow(x as u32);
if let Some(base) = base {
Ok(ValType::Constant(integer_rep_to_felt(base as IntegerRep)))
} else {
Err(CircuitError::DecompositionBaseOverflow)
}
})
})
.collect::<Result<Vec<_>, CircuitError>>()?
.into_iter()
})
.into();
let mut bases_dims = input.dims().to_vec();
bases_dims.push(*n);
bases.reshape(&bases_dims)?;
@@ -5279,7 +5392,7 @@ pub(crate) fn decompose<F: PrimeField + TensorType + PartialOrd + std::hash::Has
claimed_output.into()
};
region.assign(&config.custom_gates.output, &claimed_output)?;
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
let input_slice = input.dims().iter().map(|x| 0..*x).collect::<Vec<_>>();
@@ -5324,9 +5437,9 @@ pub(crate) fn decompose<F: PrimeField + TensorType + PartialOrd + std::hash::Has
let signed_decomp = pairwise(config, region, &[prod_decomp, sign], BaseOp::Mult)?;
enforce_equality(config, region, &[input, signed_decomp])?;
enforce_equality(config, region, &[input.clone(), signed_decomp])?;
Ok(claimed_output)
Ok((claimed_output, input))
}
pub(crate) fn sign<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
@@ -5334,7 +5447,7 @@ pub(crate) fn sign<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
) -> Result<ValTensor<F>, CircuitError> {
let mut decomp = decompose(config, region, values, &region.base(), &region.legs())?;
let mut decomp = decompose(config, region, values, &region.base(), &region.legs())?.0;
// get every n elements now, which correspond to the sign bit
decomp.get_every_n(region.legs() + 1)?;
decomp.reshape(values[0].dims())?;
@@ -5616,7 +5729,7 @@ pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::circuit::ops::layouts::range_check_percent;
/// use ezkl::circuit::ops::layouts::output;
/// use ezkl::tensor::val::ValTensor;
/// use halo2curves::bn256::Fr as Fp;
/// use ezkl::circuit::region::RegionCtx;
@@ -5634,28 +5747,32 @@ pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
/// Some(&[101, 201, 302, 403, 503, 603]),
/// &[2, 3],
/// ).unwrap());
/// let result = range_check_percent::<Fp>(&dummy_config, &mut dummy_region, &[x, y], 1024.0.into(), 1.0).unwrap();
/// let result = output::<Fp>(&dummy_config, &mut dummy_region, &[x, y], 1024.0.into(), 1.0, false).unwrap();
/// ```
pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
pub fn output<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
scale: utils::F32,
tol: f32,
decomp: bool,
) -> Result<ValTensor<F>, CircuitError> {
if tol == 0.0 {
// regular equality constraint
return enforce_equality(config, region, values);
}
let mut values = [values[0].clone(), values[1].clone()];
values[0] = region.assign(&config.custom_gates.inputs[0], &values[0])?;
values[1] = region.assign(&config.custom_gates.inputs[1], &values[1])?;
let total_assigned_0 = values[0].len();
let total_assigned_1 = values[1].len();
let total_assigned = std::cmp::max(total_assigned_0, total_assigned_1);
region.increment(total_assigned);
if !values[0].all_prev_assigned() {
// range check the outputs
values[0] = layouts::identity(config, region, &[values[0].clone()], decomp)?;
}
if !values[1].all_prev_assigned() {
// range check the outputs
values[1] = layouts::identity(config, region, &[values[1].clone()], decomp)?;
}
if tol == 0.0 {
// regular equality constraint
return enforce_equality(config, region, &[values[0].clone(), values[1].clone()]);
}
// Calculate the difference between the expected output and actual output
let diff = pairwise(config, region, &values, BaseOp::Sub)?;

View File

@@ -159,6 +159,8 @@ pub struct Input {
pub scale: crate::Scale,
///
pub datum_type: InputType,
/// decomp check
pub decomp: bool,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input {
@@ -196,6 +198,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input
config,
region,
values[..].try_into()?,
self.decomp,
)?)),
}
} else {
@@ -251,15 +254,18 @@ pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
///
#[serde(skip)]
pub pre_assigned_val: Option<ValTensor<F>>,
///
pub decomp: bool,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
///
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self {
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>, decomp: bool) -> Self {
Self {
quantized_values,
raw_values,
pre_assigned_val: None,
decomp,
}
}
/// Rebase the scale of the constant
@@ -311,7 +317,12 @@ impl<
self.quantized_values.clone().try_into()?
};
// we gotta constrain it once if its used multiple times
Ok(Some(layouts::identity(config, region, &[value])?))
Ok(Some(layouts::identity(
config,
region,
&[value],
self.decomp,
)?))
}
fn clone_dyn(&self) -> Box<dyn Op<F>> {

View File

@@ -323,7 +323,9 @@ impl<
PolyOp::Mult => {
layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Mult)?
}
PolyOp::Identity { .. } => layouts::identity(config, region, values[..].try_into()?)?,
PolyOp::Identity { .. } => {
layouts::identity(config, region, values[..].try_into()?, false)?
}
PolyOp::Reshape(d) | PolyOp::Flatten(d) => layouts::reshape(values[..].try_into()?, d)?,
PolyOp::Pad(p) => {
if values.len() != 1 {

View File

@@ -1813,6 +1813,7 @@ mod shuffle {
&mut region,
&self.inputs[i],
&self.references[i],
layouts::SortCollisionMode::Unsorted,
)
.map_err(|_| Error::Synthesis)?;
}
@@ -1998,7 +1999,7 @@ mod add_with_overflow_and_poseidon {
let base = BaseConfig::configure(cs, &[a, b], &output, CheckMode::SAFE);
VarTensor::constant_cols(cs, K, 2, false);
let poseidon = PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::configure(cs, ());
let poseidon = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::configure(cs, ());
MyCircuitConfig { base, poseidon }
}
@@ -2008,7 +2009,7 @@ mod add_with_overflow_and_poseidon {
mut config: Self::Config,
mut layouter: impl Layouter<Fr>,
) -> Result<(), Error> {
let poseidon_chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, WIDTH> =
let poseidon_chip: PoseidonChip<PoseidonSpec, WIDTH, RATE> =
PoseidonChip::new(config.poseidon.clone());
let assigned_inputs_a =
@@ -2043,11 +2044,9 @@ mod add_with_overflow_and_poseidon {
let b = (0..LEN)
.map(|i| halo2curves::bn256::Fr::from(i as u64 + 1))
.collect::<Vec<_>>();
let commitment_a =
PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(a.clone()).unwrap()[0][0];
let commitment_a = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(a.clone()).unwrap()[0][0];
let commitment_b =
PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(b.clone()).unwrap()[0][0];
let commitment_b = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(b.clone()).unwrap()[0][0];
// parameters
let a = Tensor::from(a.into_iter().map(Value::known));
@@ -2069,13 +2068,11 @@ mod add_with_overflow_and_poseidon {
let b = (0..LEN)
.map(|i| halo2curves::bn256::Fr::from(i as u64 + 1))
.collect::<Vec<_>>();
let commitment_a = PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(a.clone())
.unwrap()[0][0]
+ Fr::one();
let commitment_a =
PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(a.clone()).unwrap()[0][0] + Fr::one();
let commitment_b = PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(b.clone())
.unwrap()[0][0]
+ Fr::one();
let commitment_b =
PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(b.clone()).unwrap()[0][0] + Fr::one();
// parameters
let a = Tensor::from(a.into_iter().map(Value::known));

View File

@@ -908,6 +908,7 @@ impl Model {
n.opkind = SupportedOp::Input(Input {
scale,
datum_type: inp.datum_type,
decomp: !run_args.ignore_range_check_inputs_outputs,
});
input_idx += 1;
n.out_scale = scale;
@@ -1145,8 +1146,8 @@ impl Model {
.iter()
.enumerate()
.map(|(i, output)| {
let mut tolerance = run_args.tolerance;
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
let mut tol: crate::circuit::Tolerance = run_args.tolerance;
tol.scale = scale_to_multiplier(output_scales[i]).into();
let comparators = if run_args.output_visibility == Visibility::Public {
let res = vars
@@ -1169,7 +1170,10 @@ impl Model {
.layout(
&mut thread_safe_region,
&[output.clone(), comparators],
Box::new(HybridOp::RangeCheck(tolerance)),
Box::new(HybridOp::Output {
tol,
decomp: !run_args.ignore_range_check_inputs_outputs,
}),
)
.map_err(|e| e.into())
})
@@ -1446,13 +1450,16 @@ impl Model {
.into();
comparator.reshape(output.dims())?;
let mut tolerance = run_args.tolerance;
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
let mut tol = run_args.tolerance;
tol.scale = scale_to_multiplier(output_scales[i]).into();
dummy_config.layout(
&mut region,
&[output.clone(), comparator],
Box::new(HybridOp::RangeCheck(tolerance)),
Box::new(HybridOp::Output {
tol,
decomp: !run_args.ignore_range_check_inputs_outputs,
}),
)
})
.collect::<Result<Vec<_>, _>>();
@@ -1544,6 +1551,7 @@ impl Model {
let mut op = crate::circuit::Constant::new(
c.quantized_values.clone(),
c.raw_values.clone(),
c.decomp,
);
op.pre_assign(consts[const_idx].clone());
n.opkind = SupportedOp::Constant(op);

View File

@@ -14,14 +14,11 @@ use serde::{Deserialize, Serialize};
use super::errors::GraphError;
use super::{VarVisibility, Visibility};
/// poseidon len to hash in tree
pub const POSEIDON_LEN_GRAPH: usize = 32;
/// Poseidon number of instances
pub const POSEIDON_INSTANCES: usize = 1;
/// Poseidon module type
pub type ModulePoseidon =
PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>;
pub type ModulePoseidon = PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>;
/// Poseidon module config
pub type ModulePoseidonConfig = PoseidonConfig<POSEIDON_WIDTH, POSEIDON_RATE>;

View File

@@ -274,11 +274,9 @@ pub fn new_op_from_onnx(
symbol_values: &SymbolValues,
run_args: &crate::RunArgs,
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
use std::f64::consts::E;
use tract_onnx::tract_core::ops::array::Trilu;
use crate::circuit::InputType;
use std::f64::consts::E;
use tract_onnx::tract_core::ops::array::Trilu;
let input_scales = inputs
.iter()
@@ -384,7 +382,11 @@ pub fn new_op_from_onnx(
// Quantize the raw value (integers)
let quantized_value = quantize_tensor(raw_value.clone(), 0, &Visibility::Fixed)?;
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
let c = crate::circuit::ops::Constant::new(
quantized_value,
raw_value,
!run_args.ignore_range_check_inputs_outputs,
);
// Create a constant op
SupportedOp::Constant(c)
}
@@ -446,6 +448,7 @@ pub fn new_op_from_onnx(
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
decomp: false,
}));
inputs[1].bump_scale(0);
}
@@ -522,6 +525,7 @@ pub fn new_op_from_onnx(
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
decomp: !run_args.ignore_range_check_inputs_outputs,
}));
inputs[1].bump_scale(0);
}
@@ -558,6 +562,7 @@ pub fn new_op_from_onnx(
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
decomp: !run_args.ignore_range_check_inputs_outputs,
}));
inputs[1].bump_scale(0);
}
@@ -595,6 +600,7 @@ pub fn new_op_from_onnx(
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
decomp: !run_args.ignore_range_check_inputs_outputs,
}));
inputs[1].bump_scale(0);
}
@@ -632,6 +638,7 @@ pub fn new_op_from_onnx(
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
decomp: !run_args.ignore_range_check_inputs_outputs,
}));
inputs[1].bump_scale(0);
}
@@ -706,7 +713,11 @@ pub fn new_op_from_onnx(
constant_scale,
&run_args.param_visibility,
)?;
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
let c = crate::circuit::ops::Constant::new(
quantized_value,
raw_value,
run_args.ignore_range_check_inputs_outputs,
);
// Create a constant op
SupportedOp::Constant(c)
}
@@ -969,7 +980,11 @@ pub fn new_op_from_onnx(
DatumType::F64 => (scales.input, InputType::F64),
_ => return Err(GraphError::UnsupportedDataType(idx, format!("{:?}", dt))),
};
SupportedOp::Input(crate::circuit::ops::Input { scale, datum_type })
SupportedOp::Input(crate::circuit::ops::Input {
scale,
datum_type,
decomp: !run_args.ignore_range_check_inputs_outputs,
})
}
"Cast" => {
let op = load_op::<Cast>(node.op(), idx, node.op().name().to_string())?;
@@ -1257,9 +1272,19 @@ pub fn new_op_from_onnx(
// get the non constant index
let denom = c.raw_values[0];
SupportedOp::Hybrid(HybridOp::Div {
let op = SupportedOp::Hybrid(HybridOp::Div {
denom: denom.into(),
})
});
// if the input is scale 0 we re up to the max scale
if input_scales[0] == 0 {
SupportedOp::Rescaled(Rescaled {
inner: Box::new(op),
scale: vec![(0, scale_to_multiplier(scales.get_max()) as u128)],
})
} else {
op
}
} else {
return Err(GraphError::MisformedParams(
"only support non zero divisors of size 1".to_string(),

View File

@@ -349,6 +349,12 @@ pub struct RunArgs {
arg(long, default_value = "false")
)]
pub bounded_log_lookup: bool,
/// Range check inputs and outputs (turn off if the inputs are felts)
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
arg(long, default_value = "false")
)]
pub ignore_range_check_inputs_outputs: bool,
}
impl Default for RunArgs {
@@ -375,6 +381,7 @@ impl Default for RunArgs {
commitment: None,
decomp_base: 16384,
decomp_legs: 2,
ignore_range_check_inputs_outputs: false,
}
}
}

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -28,11 +28,12 @@
"commitment": "KZG",
"decomp_base": 128,
"decomp_legs": 2,
"bounded_log_lookup": false
"bounded_log_lookup": false,
"ignore_range_check_inputs_outputs": false
},
"num_rows": 46,
"total_assignments": 92,
"total_const_size": 3,
"num_rows": 236,
"total_assignments": 472,
"total_const_size": 4,
"total_dynamic_col_size": 0,
"max_dynamic_input_len": 0,
"num_dynamic_lookups": 0,

Binary file not shown.

View File

@@ -1,7 +1,6 @@
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
#[cfg(test)]
mod native_tests {
use ezkl::circuit::Tolerance;
use ezkl::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
@@ -23,6 +22,8 @@ mod native_tests {
static COMPILE_WASM: Once = Once::new();
static ENV_SETUP: Once = Once::new();
const TEST_BINARY: &str = "test-runs/ezkl";
//Sure to run this once
#[derive(Debug)]
#[allow(dead_code)]
@@ -103,7 +104,7 @@ mod native_tests {
fn download_srs(logrows: u32, commitment: Commitments) {
// if does not exist, download it
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"get-srs",
"--logrows",
@@ -205,62 +206,62 @@ mod native_tests {
"1l_tiny_div",
];
const TESTS: [&str; 98] = [
"1l_mlp", //0
"1l_slice",
"1l_concat",
"1l_flatten",
const TESTS: [&str; 99] = [
"1l_mlp", //0
"1l_slice", //1
"1l_concat", //2
"1l_flatten", //3
// "1l_average",
"1l_div",
"1l_pad", // 5
"1l_reshape",
"1l_eltwise_div",
"1l_sigmoid",
"1l_sqrt",
"1l_softmax", //10
"1l_div", //4
"1l_pad", // 5
"1l_reshape", //6
"1l_eltwise_div", //7
"1l_sigmoid", //8
"1l_sqrt", //9
"1l_softmax", //10
// "1l_instance_norm",
"1l_batch_norm",
"1l_prelu",
"1l_leakyrelu",
"1l_gelu_noappx",
"1l_batch_norm", //11
"1l_prelu", //12
"1l_leakyrelu", //13
"1l_gelu_noappx", //14
// "1l_gelu_tanh_appx",
"1l_relu", //15
"1l_downsample",
"1l_tanh",
"2l_relu_sigmoid_small",
"2l_relu_fc",
"2l_relu_small", //20
"2l_relu_sigmoid",
"1l_conv",
"2l_sigmoid_small",
"2l_relu_sigmoid_conv",
"3l_relu_conv_fc", //25
"4l_relu_conv_fc",
"1l_erf",
"1l_var",
"1l_elu",
"min", //30
"max",
"1l_max_pool",
"1l_conv_transpose",
"1l_upsample",
"1l_identity", //35
"idolmodel", // too big evm
"trig", // too big evm
"prelu_gmm",
"lstm",
"rnn", //40
"quantize_dequantize",
"1l_where",
"boolean",
"boolean_identity",
"decision_tree", // 45
"random_forest",
"gradient_boosted_trees",
"1l_topk",
"xgboost",
"lightgbm", //50
"hummingbird_decision_tree",
"1l_relu", //15
"1l_downsample", //16
"1l_tanh", //17
"2l_relu_sigmoid_small", //18
"2l_relu_fc", //19
"2l_relu_small", //20
"2l_relu_sigmoid", //21
"1l_conv", //22
"2l_sigmoid_small", //23
"2l_relu_sigmoid_conv", //24
"3l_relu_conv_fc", //25
"4l_relu_conv_fc", //26
"1l_erf", //27
"1l_var", //28
"1l_elu", //29
"min", //30
"max", //31
"1l_max_pool", //32
"1l_conv_transpose", //33
"1l_upsample", //34
"1l_identity", //35
"idolmodel", // too big evm
"trig", // too big evm
"prelu_gmm", //38
"lstm", //39
"rnn", //40
"quantize_dequantize", //41
"1l_where", //42
"boolean", //43
"boolean_identity", //44
"decision_tree", // 45
"random_forest", //46
"gradient_boosted_trees", //47
"1l_topk", //48
"xgboost", //49
"lightgbm", //50
"hummingbird_decision_tree", //51
"oh_decision_tree",
"linear_svc",
"gather_elements",
@@ -308,6 +309,7 @@ mod native_tests {
"log", // 95
"exp", // 96
"general_exp", // 97
"integer_div", // 98
];
const WASM_TESTS: [&str; 46] = [
@@ -546,7 +548,7 @@ mod native_tests {
}
});
seq!(N in 0..=97 {
seq!(N in 0..=98 {
#(#[test_case(TESTS[N])])*
#[ignore]
@@ -627,7 +629,7 @@ mod native_tests {
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// gen random number between 0.0 and 1.0
let tolerance = rand::thread_rng().gen_range(0.0..1.0) * 100.0;
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance, false, Some(8194), Some(5));
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance, false, Some(32776), Some(5));
test_dir.close().unwrap();
}
@@ -981,7 +983,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0, false, None, None);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0, false, None, Some(5));
test_dir.close().unwrap();
}
});
@@ -1555,7 +1557,7 @@ mod native_tests {
.save(format!("{}/{}/witness_bad.json", test_dir, example_name).into())
.unwrap();
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"mock",
"-W",
@@ -1567,7 +1569,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"mock",
"-W",
@@ -1579,7 +1581,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"mock",
"-W",
@@ -1591,7 +1593,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(!status.success());
} else {
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"mock",
"-W",
@@ -1640,6 +1642,11 @@ mod native_tests {
format!("--commitment={}", commitment),
];
// if output-visibility is fixed set --range-check-inputs-outputs to False
if output_visibility == "fixed" {
args.push("--ignore-range-check-inputs-outputs".to_string());
}
if let Some(decomp_base) = decomp_base {
args.push(format!("--decomp-base={}", decomp_base));
}
@@ -1652,7 +1659,7 @@ mod native_tests {
args.push("--bounded-log-lookup".to_string());
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(args)
.status()
.expect("failed to execute process");
@@ -1682,7 +1689,7 @@ mod native_tests {
calibrate_args.push(scales);
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(calibrate_args)
.status()
.expect("failed to execute process");
@@ -1706,7 +1713,7 @@ mod native_tests {
*tolerance = 0.0;
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"compile-circuit",
"-M",
@@ -1723,7 +1730,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"gen-witness",
"-D",
@@ -1791,7 +1798,7 @@ mod native_tests {
// Mock prove (fast, but does not cover some potential issues)
fn render_circuit(test_dir: &str, example_name: String) {
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"render-circuit",
"-M",
@@ -1822,7 +1829,7 @@ mod native_tests {
Commitments::KZG,
2,
);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"mock-aggregate",
"--logrows=23",
@@ -1860,7 +1867,7 @@ mod native_tests {
download_srs(23, commitment);
// now setup-aggregate
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"setup-aggregate",
"--sample-snarks",
@@ -1876,7 +1883,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"aggregate",
"--logrows=23",
@@ -1891,7 +1898,7 @@ mod native_tests {
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"verify-aggr",
"--logrows=23",
@@ -1941,7 +1948,7 @@ mod native_tests {
let private_key = format!("--private-key={}", *ANVIL_DEFAULT_PRIVATE_KEY);
// create encoded calldata
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"encode-evm-calldata",
"--proof-path",
@@ -1963,7 +1970,7 @@ mod native_tests {
let args = build_args(base_args, &sol_arg);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(args)
.status()
.expect("failed to execute process");
@@ -1979,7 +1986,7 @@ mod native_tests {
private_key.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2001,14 +2008,14 @@ mod native_tests {
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&base_args)
.status()
.expect("failed to execute process");
assert!(status.success());
// As sanity check, add example that should fail.
base_args[2] = PF_FAILURE_AGGR;
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(base_args)
.status()
.expect("failed to execute process");
@@ -2059,7 +2066,7 @@ mod native_tests {
init_params(settings_path.clone().into());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"setup",
"-M",
@@ -2074,7 +2081,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"prove",
"-W",
@@ -2092,7 +2099,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"swap-proof-commitments",
"--proof-path",
@@ -2104,7 +2111,7 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"verify",
format!("--settings-path={}", settings_path).as_str(),
@@ -2127,7 +2134,7 @@ mod native_tests {
// get_srs for the graph_settings_num_instances
download_srs(1, graph_settings.run_args.commitment.into());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"verify",
format!("--settings-path={}", settings_path).as_str(),
@@ -2177,7 +2184,7 @@ mod native_tests {
let settings_arg = format!("--settings-path={}", settings_path);
// create encoded calldata
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"encode-evm-calldata",
"--proof-path",
@@ -2197,7 +2204,7 @@ mod native_tests {
args.push("--sol-code-path");
args.push(sol_arg.as_str());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2209,7 +2216,7 @@ mod native_tests {
args.push("--sol-code-path");
args.push(sol_arg.as_str());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2231,14 +2238,14 @@ mod native_tests {
deployed_addr_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// As sanity check, add example that should fail.
args[2] = PF_FAILURE;
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(args)
.status()
.expect("failed to execute process");
@@ -2297,7 +2304,7 @@ mod native_tests {
"--reusable",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2312,7 +2319,7 @@ mod native_tests {
"-C=verifier/reusable",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2341,7 +2348,7 @@ mod native_tests {
&sol_arg_vk,
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2356,7 +2363,7 @@ mod native_tests {
"-C=vka",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2369,7 +2376,7 @@ mod native_tests {
let deployed_addr_arg_vk = format!("--addr-vk={}", addr_vk);
// create encoded calldata
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"encode-evm-calldata",
"--proof-path",
@@ -2392,7 +2399,7 @@ mod native_tests {
deployed_addr_arg_vk.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2425,7 +2432,7 @@ mod native_tests {
// Verify the modified proof (should fail)
let mut args_mod = args.clone();
args_mod[2] = &modified_pf_arg;
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args_mod)
.status()
.expect("failed to execute process");
@@ -2503,7 +2510,7 @@ mod native_tests {
let test_input_source = format!("--input-source={}", input_source);
let test_output_source = format!("--output-source={}", output_source);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"setup",
"-M",
@@ -2518,7 +2525,7 @@ mod native_tests {
assert!(status.success());
// generate the witness, passing the vk path to generate the necessary kzg commits
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"gen-witness",
"-D",
@@ -2575,7 +2582,7 @@ mod native_tests {
}
input.save(data_path.clone().into()).unwrap();
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"setup-test-evm-data",
"-D",
@@ -2593,7 +2600,7 @@ mod native_tests {
assert!(status.success());
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"prove",
"-W",
@@ -2614,7 +2621,7 @@ mod native_tests {
let settings_arg = format!("--settings-path={}", settings_path);
// create encoded calldata
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"encode-evm-calldata",
"--proof-path",
@@ -2633,7 +2640,7 @@ mod native_tests {
args.push("--sol-code-path");
args.push(sol_arg.as_str());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2654,7 +2661,7 @@ mod native_tests {
args.push("--sol-code-path");
args.push(sol_arg.as_str());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2677,7 +2684,7 @@ mod native_tests {
create_da_args.push(test_on_chain_data_path.as_str());
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&create_da_args)
.status()
.expect("failed to execute process");
@@ -2690,7 +2697,7 @@ mod native_tests {
};
let addr_path_da_arg = format!("--addr-path={}/{}/addr_da.txt", test_dir, example_name);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"deploy-evm-da",
format!("--settings-path={}", settings_path).as_str(),
@@ -2728,14 +2735,14 @@ mod native_tests {
deployed_addr_da_arg.as_str(),
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// Create a new set of test on chain data only for the on-chain input source
if input_source != "file" || output_source != "file" {
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args([
"setup-test-evm-data",
"-D",
@@ -2762,7 +2769,7 @@ mod native_tests {
test_on_chain_data_path.as_str(),
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(&args)
.status()
.expect("failed to execute process");
@@ -2778,7 +2785,7 @@ mod native_tests {
deployed_addr_da_arg.as_str(),
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
.args(args)
.status()
.expect("failed to execute process");
@@ -2789,7 +2796,7 @@ mod native_tests {
#[cfg(feature = "icicle")]
let args = [
"build",
"--release",
"--profile=test-runs",
"--bin",
"ezkl",
"--features",
@@ -2798,7 +2805,7 @@ mod native_tests {
#[cfg(feature = "macos-metal")]
let args = [
"build",
"--release",
"--profile=test-runs",
"--bin",
"ezkl",
"--features",
@@ -2806,11 +2813,11 @@ mod native_tests {
];
// not macos-metal and not icicle
#[cfg(all(not(feature = "icicle"), not(feature = "macos-metal")))]
let args = ["build", "--release", "--bin", "ezkl"];
let args = ["build", "--profile=test-runs", "--bin", "ezkl"];
#[cfg(not(feature = "mv-lookup"))]
let args = [
"build",
"--release",
"--profile=test-runs",
"--bin",
"ezkl",
"--no-default-features",
@@ -2831,7 +2838,7 @@ mod native_tests {
let status = Command::new("wasm-pack")
.args([
"build",
"--release",
"--profile=test-runs",
"--target",
"nodejs",
"--out-dir",

View File

@@ -126,10 +126,10 @@ mod py_tests {
}
const TESTS: [&str; 35] = [
"ezkl_demo_batch.ipynb", // 0
"proof_splitting.ipynb", // 1
"variance.ipynb", // 2
"mnist_gan.ipynb", // 3
"mnist_gan.ipynb", // 0
"ezkl_demo_batch.ipynb", // 1
"proof_splitting.ipynb", // 2
"variance.ipynb", // 3
"keras_simple_demo.ipynb", // 4
"mnist_gan_proof_splitting.ipynb", // 5
"hashed_vis.ipynb", // 6

View File

@@ -59,7 +59,7 @@ def test_poseidon_hash():
message = [ezkl.float_to_felt(x, 7) for x in message]
res = ezkl.poseidon_hash(message)
assert ezkl.felt_to_big_endian(
res[0]) == "0x0da7e5e5c8877242fa699f586baf770d731defd54f952d4adeb85047a0e32f45"
res[0]) == "0x2369898875588bf49b6539376b09705ea69aee318a58e6fcc1e68fc3e7ad81ab"

View File

@@ -11,7 +11,6 @@ mod wasm32 {
use ezkl::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use ezkl::circuit::modules::poseidon::PoseidonChip;
use ezkl::circuit::modules::Module;
use ezkl::graph::modules::POSEIDON_LEN_GRAPH;
use ezkl::graph::GraphCircuit;
use ezkl::graph::{GraphSettings, GraphWitness};
use ezkl::pfsys;
@@ -227,11 +226,9 @@ mod wasm32 {
let hash: Vec<Vec<Fr>> = serde_json::from_slice(&hash[..]).unwrap();
let reference_hash =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|_| "failed")
.unwrap();
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
.map_err(|_| "failed")
.unwrap();
assert_eq!(hash, reference_hash)
}